2 * Copyright (C) 2009, 2010 University of Szeged
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY UNIVERSITY OF SZEGED ``AS IS'' AND ANY
15 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL UNIVERSITY OF SZEGED OR
18 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
19 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
20 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
21 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
22 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
24 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #ifndef ARMAssembler_h
28 #define ARMAssembler_h
30 #if ENABLE(ASSEMBLER) && CPU(ARM_TRADITIONAL)
32 #include "AssemblerBufferWithConstantPool.h"
33 #include "JITCompilationEffort.h"
34 #include <wtf/Assertions.h>
37 typedef uint32_t ARMWord
;
39 namespace ARMRegisters
{
94 } // namespace ARMRegisters
98 typedef ARMRegisters::RegisterID RegisterID
;
99 typedef ARMRegisters::FPRegisterID FPRegisterID
;
100 typedef AssemblerBufferWithConstantPool
<2048, 4, 4, ARMAssembler
> ARMBuffer
;
101 typedef SegmentedVector
<AssemblerLabel
, 64> Jumps
;
105 // ARM conditional constants
107 EQ
= 0x00000000, // Zero
108 NE
= 0x10000000, // Non-zero
124 // ARM instruction constants
144 VADD_F64
= 0x0e300b00,
145 VDIV_F64
= 0x0e800b00,
146 VSUB_F64
= 0x0e300b40,
147 VMUL_F64
= 0x0e200b00,
148 VCMP_F64
= 0x0eb40b40,
149 VSQRT_F64
= 0x0eb10bc0,
158 #if WTF_ARM_ARCH_AT_LEAST(5) || defined(__ARM_ARCH_4T__)
161 VMOV_VFP
= 0x0e000a10,
162 VMOV_ARM
= 0x0e100a10,
163 VCVT_F64_S32
= 0x0eb80bc0,
164 VCVT_S32_F64
= 0x0ebd0b40,
165 VMRS_APSR
= 0x0ef1fa10,
166 #if WTF_ARM_ARCH_AT_LEAST(5)
172 #if WTF_ARM_ARCH_AT_LEAST(7)
181 OP2_IMMh
= (1 << 22),
182 OP2_INV_IMM
= (1 << 26),
184 OP2_OFSREG
= (1 << 25),
188 // This flag is inlcuded in LDR and STR
194 // Masks of ARM instructions
196 BRANCH_MASK
= 0x00ffffff,
198 SDT_MASK
= 0x0c000000,
199 SDT_OFFSET_MASK
= 0xfff,
203 BOFFSET_MIN
= -0x00800000,
204 BOFFSET_MAX
= 0x007fffff,
210 padForAlign16
= 0x0000,
211 padForAlign32
= 0xe12fff7f // 'bkpt 0xffff' instruction.
214 static const ARMWord INVALID_IMM
= 0xf0000000;
215 static const ARMWord InvalidBranchTarget
= 0xffffffff;
216 static const int DefaultPrefetching
= 2;
218 // Instruction formating
220 void emitInst(ARMWord op
, int rd
, int rn
, ARMWord op2
)
222 ASSERT(((op2
& ~OP2_IMM
) <= 0xfff) || (((op2
& ~OP2_IMMh
) <= 0xfff)));
223 m_buffer
.putInt(op
| RN(rn
) | RD(rd
) | op2
);
226 void emitDoublePrecisionInst(ARMWord op
, int dd
, int dn
, int dm
)
228 ASSERT((dd
>= 0 && dd
<= 31) && (dn
>= 0 && dn
<= 31) && (dm
>= 0 && dm
<= 31));
229 m_buffer
.putInt(op
| ((dd
& 0xf) << 12) | ((dd
& 0x10) << (22 - 4))
230 | ((dn
& 0xf) << 16) | ((dn
& 0x10) << (7 - 4))
231 | (dm
& 0xf) | ((dm
& 0x10) << (5 - 4)));
234 void emitSinglePrecisionInst(ARMWord op
, int sd
, int sn
, int sm
)
236 ASSERT((sd
>= 0 && sd
<= 31) && (sn
>= 0 && sn
<= 31) && (sm
>= 0 && sm
<= 31));
237 m_buffer
.putInt(op
| ((sd
>> 1) << 12) | ((sd
& 0x1) << 22)
238 | ((sn
>> 1) << 16) | ((sn
& 0x1) << 7)
239 | (sm
>> 1) | ((sm
& 0x1) << 5));
242 void and_r(int rd
, int rn
, ARMWord op2
, Condition cc
= AL
)
244 emitInst(static_cast<ARMWord
>(cc
) | AND
, rd
, rn
, op2
);
247 void ands_r(int rd
, int rn
, ARMWord op2
, Condition cc
= AL
)
249 emitInst(static_cast<ARMWord
>(cc
) | AND
| SET_CC
, rd
, rn
, op2
);
252 void eor_r(int rd
, int rn
, ARMWord op2
, Condition cc
= AL
)
254 emitInst(static_cast<ARMWord
>(cc
) | EOR
, rd
, rn
, op2
);
257 void eors_r(int rd
, int rn
, ARMWord op2
, Condition cc
= AL
)
259 emitInst(static_cast<ARMWord
>(cc
) | EOR
| SET_CC
, rd
, rn
, op2
);
262 void sub_r(int rd
, int rn
, ARMWord op2
, Condition cc
= AL
)
264 emitInst(static_cast<ARMWord
>(cc
) | SUB
, rd
, rn
, op2
);
267 void subs_r(int rd
, int rn
, ARMWord op2
, Condition cc
= AL
)
269 emitInst(static_cast<ARMWord
>(cc
) | SUB
| SET_CC
, rd
, rn
, op2
);
272 void rsb_r(int rd
, int rn
, ARMWord op2
, Condition cc
= AL
)
274 emitInst(static_cast<ARMWord
>(cc
) | RSB
, rd
, rn
, op2
);
277 void rsbs_r(int rd
, int rn
, ARMWord op2
, Condition cc
= AL
)
279 emitInst(static_cast<ARMWord
>(cc
) | RSB
| SET_CC
, rd
, rn
, op2
);
282 void add_r(int rd
, int rn
, ARMWord op2
, Condition cc
= AL
)
284 emitInst(static_cast<ARMWord
>(cc
) | ADD
, rd
, rn
, op2
);
287 void adds_r(int rd
, int rn
, ARMWord op2
, Condition cc
= AL
)
289 emitInst(static_cast<ARMWord
>(cc
) | ADD
| SET_CC
, rd
, rn
, op2
);
292 void adc_r(int rd
, int rn
, ARMWord op2
, Condition cc
= AL
)
294 emitInst(static_cast<ARMWord
>(cc
) | ADC
, rd
, rn
, op2
);
297 void adcs_r(int rd
, int rn
, ARMWord op2
, Condition cc
= AL
)
299 emitInst(static_cast<ARMWord
>(cc
) | ADC
| SET_CC
, rd
, rn
, op2
);
302 void sbc_r(int rd
, int rn
, ARMWord op2
, Condition cc
= AL
)
304 emitInst(static_cast<ARMWord
>(cc
) | SBC
, rd
, rn
, op2
);
307 void sbcs_r(int rd
, int rn
, ARMWord op2
, Condition cc
= AL
)
309 emitInst(static_cast<ARMWord
>(cc
) | SBC
| SET_CC
, rd
, rn
, op2
);
312 void rsc_r(int rd
, int rn
, ARMWord op2
, Condition cc
= AL
)
314 emitInst(static_cast<ARMWord
>(cc
) | RSC
, rd
, rn
, op2
);
317 void rscs_r(int rd
, int rn
, ARMWord op2
, Condition cc
= AL
)
319 emitInst(static_cast<ARMWord
>(cc
) | RSC
| SET_CC
, rd
, rn
, op2
);
322 void tst_r(int rn
, ARMWord op2
, Condition cc
= AL
)
324 emitInst(static_cast<ARMWord
>(cc
) | TST
| SET_CC
, 0, rn
, op2
);
327 void teq_r(int rn
, ARMWord op2
, Condition cc
= AL
)
329 emitInst(static_cast<ARMWord
>(cc
) | TEQ
| SET_CC
, 0, rn
, op2
);
332 void cmp_r(int rn
, ARMWord op2
, Condition cc
= AL
)
334 emitInst(static_cast<ARMWord
>(cc
) | CMP
| SET_CC
, 0, rn
, op2
);
337 void cmn_r(int rn
, ARMWord op2
, Condition cc
= AL
)
339 emitInst(static_cast<ARMWord
>(cc
) | CMN
| SET_CC
, 0, rn
, op2
);
342 void orr_r(int rd
, int rn
, ARMWord op2
, Condition cc
= AL
)
344 emitInst(static_cast<ARMWord
>(cc
) | ORR
, rd
, rn
, op2
);
347 void orrs_r(int rd
, int rn
, ARMWord op2
, Condition cc
= AL
)
349 emitInst(static_cast<ARMWord
>(cc
) | ORR
| SET_CC
, rd
, rn
, op2
);
352 void mov_r(int rd
, ARMWord op2
, Condition cc
= AL
)
354 emitInst(static_cast<ARMWord
>(cc
) | MOV
, rd
, ARMRegisters::r0
, op2
);
357 #if WTF_ARM_ARCH_AT_LEAST(7)
358 void movw_r(int rd
, ARMWord op2
, Condition cc
= AL
)
360 ASSERT((op2
| 0xf0fff) == 0xf0fff);
361 m_buffer
.putInt(static_cast<ARMWord
>(cc
) | MOVW
| RD(rd
) | op2
);
364 void movt_r(int rd
, ARMWord op2
, Condition cc
= AL
)
366 ASSERT((op2
| 0xf0fff) == 0xf0fff);
367 m_buffer
.putInt(static_cast<ARMWord
>(cc
) | MOVT
| RD(rd
) | op2
);
371 void movs_r(int rd
, ARMWord op2
, Condition cc
= AL
)
373 emitInst(static_cast<ARMWord
>(cc
) | MOV
| SET_CC
, rd
, ARMRegisters::r0
, op2
);
376 void bic_r(int rd
, int rn
, ARMWord op2
, Condition cc
= AL
)
378 emitInst(static_cast<ARMWord
>(cc
) | BIC
, rd
, rn
, op2
);
381 void bics_r(int rd
, int rn
, ARMWord op2
, Condition cc
= AL
)
383 emitInst(static_cast<ARMWord
>(cc
) | BIC
| SET_CC
, rd
, rn
, op2
);
386 void mvn_r(int rd
, ARMWord op2
, Condition cc
= AL
)
388 emitInst(static_cast<ARMWord
>(cc
) | MVN
, rd
, ARMRegisters::r0
, op2
);
391 void mvns_r(int rd
, ARMWord op2
, Condition cc
= AL
)
393 emitInst(static_cast<ARMWord
>(cc
) | MVN
| SET_CC
, rd
, ARMRegisters::r0
, op2
);
396 void mul_r(int rd
, int rn
, int rm
, Condition cc
= AL
)
398 m_buffer
.putInt(static_cast<ARMWord
>(cc
) | MUL
| RN(rd
) | RS(rn
) | RM(rm
));
401 void muls_r(int rd
, int rn
, int rm
, Condition cc
= AL
)
403 m_buffer
.putInt(static_cast<ARMWord
>(cc
) | MUL
| SET_CC
| RN(rd
) | RS(rn
) | RM(rm
));
406 void mull_r(int rdhi
, int rdlo
, int rn
, int rm
, Condition cc
= AL
)
408 m_buffer
.putInt(static_cast<ARMWord
>(cc
) | MULL
| RN(rdhi
) | RD(rdlo
) | RS(rn
) | RM(rm
));
411 void vadd_f64_r(int dd
, int dn
, int dm
, Condition cc
= AL
)
413 emitDoublePrecisionInst(static_cast<ARMWord
>(cc
) | VADD_F64
, dd
, dn
, dm
);
416 void vdiv_f64_r(int dd
, int dn
, int dm
, Condition cc
= AL
)
418 emitDoublePrecisionInst(static_cast<ARMWord
>(cc
) | VDIV_F64
, dd
, dn
, dm
);
421 void vsub_f64_r(int dd
, int dn
, int dm
, Condition cc
= AL
)
423 emitDoublePrecisionInst(static_cast<ARMWord
>(cc
) | VSUB_F64
, dd
, dn
, dm
);
426 void vmul_f64_r(int dd
, int dn
, int dm
, Condition cc
= AL
)
428 emitDoublePrecisionInst(static_cast<ARMWord
>(cc
) | VMUL_F64
, dd
, dn
, dm
);
431 void vcmp_f64_r(int dd
, int dm
, Condition cc
= AL
)
433 emitDoublePrecisionInst(static_cast<ARMWord
>(cc
) | VCMP_F64
, dd
, 0, dm
);
436 void vsqrt_f64_r(int dd
, int dm
, Condition cc
= AL
)
438 emitDoublePrecisionInst(static_cast<ARMWord
>(cc
) | VSQRT_F64
, dd
, 0, dm
);
441 void ldr_imm(int rd
, ARMWord imm
, Condition cc
= AL
)
443 m_buffer
.putIntWithConstantInt(static_cast<ARMWord
>(cc
) | DTR
| DT_LOAD
| DT_UP
| RN(ARMRegisters::pc
) | RD(rd
), imm
, true);
446 void ldr_un_imm(int rd
, ARMWord imm
, Condition cc
= AL
)
448 m_buffer
.putIntWithConstantInt(static_cast<ARMWord
>(cc
) | DTR
| DT_LOAD
| DT_UP
| RN(ARMRegisters::pc
) | RD(rd
), imm
);
451 void dtr_u(bool isLoad
, int rd
, int rb
, ARMWord op2
, Condition cc
= AL
)
453 emitInst(static_cast<ARMWord
>(cc
) | DTR
| (isLoad
? DT_LOAD
: 0) | DT_UP
, rd
, rb
, op2
);
456 void dtr_ur(bool isLoad
, int rd
, int rb
, int rm
, Condition cc
= AL
)
458 emitInst(static_cast<ARMWord
>(cc
) | DTR
| (isLoad
? DT_LOAD
: 0) | DT_UP
| OP2_OFSREG
, rd
, rb
, rm
);
461 void dtr_d(bool isLoad
, int rd
, int rb
, ARMWord op2
, Condition cc
= AL
)
463 emitInst(static_cast<ARMWord
>(cc
) | DTR
| (isLoad
? DT_LOAD
: 0), rd
, rb
, op2
);
466 void dtr_dr(bool isLoad
, int rd
, int rb
, int rm
, Condition cc
= AL
)
468 emitInst(static_cast<ARMWord
>(cc
) | DTR
| (isLoad
? DT_LOAD
: 0) | OP2_OFSREG
, rd
, rb
, rm
);
471 void ldrh_r(int rd
, int rn
, int rm
, Condition cc
= AL
)
473 emitInst(static_cast<ARMWord
>(cc
) | LDRH
| HDT_UH
| DT_UP
| DT_PRE
, rd
, rn
, rm
);
476 void ldrh_d(int rd
, int rb
, ARMWord op2
, Condition cc
= AL
)
478 emitInst(static_cast<ARMWord
>(cc
) | LDRH
| HDT_UH
| DT_PRE
, rd
, rb
, op2
);
481 void ldrh_u(int rd
, int rb
, ARMWord op2
, Condition cc
= AL
)
483 emitInst(static_cast<ARMWord
>(cc
) | LDRH
| HDT_UH
| DT_UP
| DT_PRE
, rd
, rb
, op2
);
486 void strh_r(int rn
, int rm
, int rd
, Condition cc
= AL
)
488 emitInst(static_cast<ARMWord
>(cc
) | STRH
| HDT_UH
| DT_UP
| DT_PRE
, rd
, rn
, rm
);
491 void fdtr_u(bool isLoad
, int rd
, int rb
, ARMWord op2
, Condition cc
= AL
)
494 emitInst(static_cast<ARMWord
>(cc
) | FDTR
| DT_UP
| (isLoad
? DT_LOAD
: 0), rd
, rb
, op2
);
497 void fdtr_d(bool isLoad
, int rd
, int rb
, ARMWord op2
, Condition cc
= AL
)
500 emitInst(static_cast<ARMWord
>(cc
) | FDTR
| (isLoad
? DT_LOAD
: 0), rd
, rb
, op2
);
503 void push_r(int reg
, Condition cc
= AL
)
505 ASSERT(ARMWord(reg
) <= 0xf);
506 m_buffer
.putInt(cc
| DTR
| DT_WB
| RN(ARMRegisters::sp
) | RD(reg
) | 0x4);
509 void pop_r(int reg
, Condition cc
= AL
)
511 ASSERT(ARMWord(reg
) <= 0xf);
512 m_buffer
.putInt(cc
| (DTR
^ DT_PRE
) | DT_LOAD
| DT_UP
| RN(ARMRegisters::sp
) | RD(reg
) | 0x4);
515 inline void poke_r(int reg
, Condition cc
= AL
)
517 dtr_d(false, ARMRegisters::sp
, 0, reg
, cc
);
520 inline void peek_r(int reg
, Condition cc
= AL
)
522 dtr_u(true, reg
, ARMRegisters::sp
, 0, cc
);
525 void vmov_vfp_r(int sn
, int rt
, Condition cc
= AL
)
528 emitSinglePrecisionInst(static_cast<ARMWord
>(cc
) | VMOV_VFP
, rt
<< 1, sn
, 0);
531 void vmov_arm_r(int rt
, int sn
, Condition cc
= AL
)
534 emitSinglePrecisionInst(static_cast<ARMWord
>(cc
) | VMOV_ARM
, rt
<< 1, sn
, 0);
537 void vcvt_f64_s32_r(int dd
, int sm
, Condition cc
= AL
)
539 ASSERT(!(sm
& 0x1)); // sm must be divisible by 2
540 emitDoublePrecisionInst(static_cast<ARMWord
>(cc
) | VCVT_F64_S32
, dd
, 0, (sm
>> 1));
543 void vcvt_s32_f64_r(int sd
, int dm
, Condition cc
= AL
)
545 ASSERT(!(sd
& 0x1)); // sd must be divisible by 2
546 emitDoublePrecisionInst(static_cast<ARMWord
>(cc
) | VCVT_S32_F64
, (sd
>> 1), 0, dm
);
549 void vmrs_apsr(Condition cc
= AL
)
551 m_buffer
.putInt(static_cast<ARMWord
>(cc
) | VMRS_APSR
);
554 #if WTF_ARM_ARCH_AT_LEAST(5)
555 void clz_r(int rd
, int rm
, Condition cc
= AL
)
557 m_buffer
.putInt(static_cast<ARMWord
>(cc
) | CLZ
| RD(rd
) | RM(rm
));
561 void bkpt(ARMWord value
)
563 #if WTF_ARM_ARCH_AT_LEAST(5)
564 m_buffer
.putInt(BKPT
| ((value
& 0xff0) << 4) | (value
& 0xf));
566 // Cannot access to Zero memory address
567 dtr_dr(true, ARMRegisters::S0
, ARMRegisters::S0
, ARMRegisters::S0
);
573 m_buffer
.putInt(OP_NOP_T2
);
578 m_buffer
.putInt(NOP
);
581 void bx(int rm
, Condition cc
= AL
)
583 #if WTF_ARM_ARCH_AT_LEAST(5) || defined(__ARM_ARCH_4T__)
584 emitInst(static_cast<ARMWord
>(cc
) | BX
, 0, 0, RM(rm
));
586 mov_r(ARMRegisters::pc
, RM(rm
), cc
);
590 AssemblerLabel
blx(int rm
, Condition cc
= AL
)
592 #if WTF_ARM_ARCH_AT_LEAST(5)
593 emitInst(static_cast<ARMWord
>(cc
) | BLX
, 0, 0, RM(rm
));
596 ensureSpace(2 * sizeof(ARMWord
), 0);
597 mov_r(ARMRegisters::lr
, ARMRegisters::pc
, cc
);
600 return m_buffer
.label();
603 static ARMWord
lsl(int reg
, ARMWord value
)
605 ASSERT(reg
<= ARMRegisters::pc
);
606 ASSERT(value
<= 0x1f);
607 return reg
| (value
<< 7) | 0x00;
610 static ARMWord
lsr(int reg
, ARMWord value
)
612 ASSERT(reg
<= ARMRegisters::pc
);
613 ASSERT(value
<= 0x1f);
614 return reg
| (value
<< 7) | 0x20;
617 static ARMWord
asr(int reg
, ARMWord value
)
619 ASSERT(reg
<= ARMRegisters::pc
);
620 ASSERT(value
<= 0x1f);
621 return reg
| (value
<< 7) | 0x40;
624 static ARMWord
lsl_r(int reg
, int shiftReg
)
626 ASSERT(reg
<= ARMRegisters::pc
);
627 ASSERT(shiftReg
<= ARMRegisters::pc
);
628 return reg
| (shiftReg
<< 8) | 0x10;
631 static ARMWord
lsr_r(int reg
, int shiftReg
)
633 ASSERT(reg
<= ARMRegisters::pc
);
634 ASSERT(shiftReg
<= ARMRegisters::pc
);
635 return reg
| (shiftReg
<< 8) | 0x30;
638 static ARMWord
asr_r(int reg
, int shiftReg
)
640 ASSERT(reg
<= ARMRegisters::pc
);
641 ASSERT(shiftReg
<= ARMRegisters::pc
);
642 return reg
| (shiftReg
<< 8) | 0x50;
647 size_t codeSize() const
649 return m_buffer
.codeSize();
652 void ensureSpace(int insnSpace
, int constSpace
)
654 m_buffer
.ensureSpace(insnSpace
, constSpace
);
657 int sizeOfConstantPool()
659 return m_buffer
.sizeOfConstantPool();
662 AssemblerLabel
label()
664 m_buffer
.ensureSpaceForAnyOneInstruction();
665 return m_buffer
.label();
668 AssemblerLabel
align(int alignment
)
670 while (!m_buffer
.isAligned(alignment
))
671 mov_r(ARMRegisters::r0
, ARMRegisters::r0
);
676 AssemblerLabel
loadBranchTarget(int rd
, Condition cc
= AL
, int useConstantPool
= 0)
678 ensureSpace(sizeof(ARMWord
), sizeof(ARMWord
));
679 m_jumps
.append(m_buffer
.codeSize() | (useConstantPool
& 0x1));
680 ldr_un_imm(rd
, InvalidBranchTarget
, cc
);
681 return m_buffer
.label();
684 AssemblerLabel
jmp(Condition cc
= AL
, int useConstantPool
= 0)
686 return loadBranchTarget(ARMRegisters::pc
, cc
, useConstantPool
);
689 PassRefPtr
<ExecutableMemoryHandle
> executableCopy(JSGlobalData
&, void* ownerUID
, JITCompilationEffort
);
691 unsigned debugOffset() { return m_buffer
.debugOffset(); }
695 static ARMWord
* getLdrImmAddress(ARMWord
* insn
)
697 #if WTF_ARM_ARCH_AT_LEAST(5)
699 if ((*insn
& 0x0f7f0000) != 0x051f0000) {
701 ASSERT((*insn
& 0x012fff30) == 0x012fff30);
705 // Must be an ldr ..., [pc +/- imm]
706 ASSERT((*insn
& 0x0f7f0000) == 0x051f0000);
708 ARMWord addr
= reinterpret_cast<ARMWord
>(insn
) + DefaultPrefetching
* sizeof(ARMWord
);
710 return reinterpret_cast<ARMWord
*>(addr
+ (*insn
& SDT_OFFSET_MASK
));
711 return reinterpret_cast<ARMWord
*>(addr
- (*insn
& SDT_OFFSET_MASK
));
714 static ARMWord
* getLdrImmAddressOnPool(ARMWord
* insn
, uint32_t* constPool
)
716 // Must be an ldr ..., [pc +/- imm]
717 ASSERT((*insn
& 0x0f7f0000) == 0x051f0000);
720 return reinterpret_cast<ARMWord
*>(constPool
+ ((*insn
& SDT_OFFSET_MASK
) >> 1));
721 return getLdrImmAddress(insn
);
724 static void patchPointerInternal(intptr_t from
, void* to
)
726 ARMWord
* insn
= reinterpret_cast<ARMWord
*>(from
);
727 ARMWord
* addr
= getLdrImmAddress(insn
);
728 *addr
= reinterpret_cast<ARMWord
>(to
);
731 static ARMWord
patchConstantPoolLoad(ARMWord load
, ARMWord value
)
733 value
= (value
<< 1) + 1;
734 ASSERT(!(value
& ~0xfff));
735 return (load
& ~0xfff) | value
;
738 static void patchConstantPoolLoad(void* loadAddr
, void* constPoolAddr
);
741 static void* readPointer(void* from
)
743 ARMWord
* insn
= reinterpret_cast<ARMWord
*>(from
);
744 ARMWord
* addr
= getLdrImmAddress(insn
);
745 return *reinterpret_cast<void**>(addr
);
750 static void linkPointer(void* code
, AssemblerLabel from
, void* to
)
752 patchPointerInternal(reinterpret_cast<intptr_t>(code
) + from
.m_offset
, to
);
755 static void repatchInt32(void* from
, int32_t to
)
757 patchPointerInternal(reinterpret_cast<intptr_t>(from
), reinterpret_cast<void*>(to
));
760 static void repatchCompact(void* where
, int32_t value
)
762 repatchInt32(where
, value
);
765 static void repatchPointer(void* from
, void* to
)
767 patchPointerInternal(reinterpret_cast<intptr_t>(from
), to
);
771 static intptr_t getAbsoluteJumpAddress(void* base
, int offset
= 0)
773 return reinterpret_cast<intptr_t>(base
) + offset
- sizeof(ARMWord
);
776 void linkJump(AssemblerLabel from
, AssemblerLabel to
)
778 ARMWord
* insn
= reinterpret_cast<ARMWord
*>(getAbsoluteJumpAddress(m_buffer
.data(), from
.m_offset
));
779 ARMWord
* addr
= getLdrImmAddressOnPool(insn
, m_buffer
.poolAddress());
780 *addr
= static_cast<ARMWord
>(to
.m_offset
);
783 static void linkJump(void* code
, AssemblerLabel from
, void* to
)
785 patchPointerInternal(getAbsoluteJumpAddress(code
, from
.m_offset
), to
);
788 static void relinkJump(void* from
, void* to
)
790 patchPointerInternal(getAbsoluteJumpAddress(from
), to
);
793 static void linkCall(void* code
, AssemblerLabel from
, void* to
)
795 patchPointerInternal(getAbsoluteJumpAddress(code
, from
.m_offset
), to
);
798 static void relinkCall(void* from
, void* to
)
800 patchPointerInternal(getAbsoluteJumpAddress(from
), to
);
803 static void* readCallTarget(void* from
)
805 return reinterpret_cast<void*>(readPointer(reinterpret_cast<void*>(getAbsoluteJumpAddress(from
))));
808 // Address operations
810 static void* getRelocatedAddress(void* code
, AssemblerLabel label
)
812 return reinterpret_cast<void*>(reinterpret_cast<char*>(code
) + label
.m_offset
);
815 // Address differences
817 static int getDifferenceBetweenLabels(AssemblerLabel a
, AssemblerLabel b
)
819 return b
.m_offset
- a
.m_offset
;
822 static unsigned getCallReturnOffset(AssemblerLabel call
)
824 return call
.m_offset
;
829 static ARMWord
getOp2Byte(ARMWord imm
)
832 return OP2_IMMh
| (imm
& 0x0f) | ((imm
& 0xf0) << 4) ;
835 static ARMWord
getOp2(ARMWord imm
);
837 #if WTF_ARM_ARCH_AT_LEAST(7)
838 static ARMWord
getImm16Op2(ARMWord imm
)
841 return (imm
& 0xf000) << 4 | (imm
& 0xfff);
845 ARMWord
getImm(ARMWord imm
, int tmpReg
, bool invert
= false);
846 void moveImm(ARMWord imm
, int dest
);
847 ARMWord
encodeComplexImm(ARMWord imm
, int dest
);
849 ARMWord
getOffsetForHalfwordDataTransfer(ARMWord imm
, int tmpReg
)
851 // Encode immediate data in the instruction if it is possible
853 return getOp2Byte(imm
);
854 // Otherwise, store the data in a temporary register
855 return encodeComplexImm(imm
, tmpReg
);
858 // Memory load/store helpers
860 void dataTransfer32(bool isLoad
, RegisterID srcDst
, RegisterID base
, int32_t offset
, bool bytes
= false);
861 void baseIndexTransfer32(bool isLoad
, RegisterID srcDst
, RegisterID base
, RegisterID index
, int scale
, int32_t offset
, bool bytes
= false);
862 void doubleTransfer(bool isLoad
, FPRegisterID srcDst
, RegisterID base
, int32_t offset
);
864 // Constant pool hnadlers
866 static ARMWord
placeConstantPoolBarrier(int offset
)
868 offset
= (offset
- sizeof(ARMWord
)) >> 2;
869 ASSERT((offset
<= BOFFSET_MAX
&& offset
>= BOFFSET_MIN
));
870 return AL
| B
| (offset
& BRANCH_MASK
);
873 #if OS(LINUX) && COMPILER(RVCT)
874 static __asm
void cacheFlush(void* code
, size_t);
876 static void cacheFlush(void* code
, size_t size
)
878 #if OS(LINUX) && COMPILER(GCC)
879 uintptr_t currentPage
= reinterpret_cast<uintptr_t>(code
) & ~(pageSize() - 1);
880 uintptr_t lastPage
= (reinterpret_cast<uintptr_t>(code
) + size
) & ~(pageSize() - 1);
892 : "r" (currentPage
), "r" (currentPage
+ pageSize())
894 currentPage
+= pageSize();
895 } while (lastPage
>= currentPage
);
897 CacheRangeFlush(code
, size
, CACHE_SYNC_ALL
);
898 #elif OS(QNX) && ENABLE(ASSEMBLER_WX_EXCLUSIVE)
902 msync(code
, size
, MS_INVALIDATE_ICACHE
);
904 #error "The cacheFlush support is missing on this platform."
912 ASSERT(reg
<= ARMRegisters::pc
);
918 ASSERT(reg
<= ARMRegisters::pc
);
924 ASSERT(reg
<= ARMRegisters::pc
);
930 ASSERT(reg
<= ARMRegisters::pc
);
934 static ARMWord
getConditionalField(ARMWord i
)
936 return i
& 0xf0000000;
939 int genInt(int reg
, ARMWord imm
, bool positive
);
947 #endif // ENABLE(ASSEMBLER) && CPU(ARM_TRADITIONAL)
949 #endif // ARMAssembler_h