2 * Copyright (C) 2008 Apple Inc.
3 * Copyright (C) 2009, 2010 University of Szeged
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
16 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
18 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
19 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
20 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
21 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
22 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
23 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
25 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 #ifndef MacroAssemblerARM_h
29 #define MacroAssemblerARM_h
31 #if ENABLE(ASSEMBLER) && CPU(ARM_TRADITIONAL)
33 #include "ARMAssembler.h"
34 #include "AbstractMacroAssembler.h"
38 class MacroAssemblerARM
: public AbstractMacroAssembler
<ARMAssembler
> {
39 static const int DoubleConditionMask
= 0x0f;
40 static const int DoubleConditionBitSpecial
= 0x10;
41 COMPILE_ASSERT(!(DoubleConditionBitSpecial
& DoubleConditionMask
), DoubleConditionBitSpecial_should_not_interfere_with_ARMAssembler_Condition_codes
);
43 typedef ARMRegisters::FPRegisterID FPRegisterID
;
46 Equal
= ARMAssembler::EQ
,
47 NotEqual
= ARMAssembler::NE
,
48 Above
= ARMAssembler::HI
,
49 AboveOrEqual
= ARMAssembler::CS
,
50 Below
= ARMAssembler::CC
,
51 BelowOrEqual
= ARMAssembler::LS
,
52 GreaterThan
= ARMAssembler::GT
,
53 GreaterThanOrEqual
= ARMAssembler::GE
,
54 LessThan
= ARMAssembler::LT
,
55 LessThanOrEqual
= ARMAssembler::LE
,
56 Overflow
= ARMAssembler::VS
,
57 Signed
= ARMAssembler::MI
,
58 Zero
= ARMAssembler::EQ
,
59 NonZero
= ARMAssembler::NE
62 enum DoubleCondition
{
63 // These conditions will only evaluate to true if the comparison is ordered - i.e. neither operand is NaN.
64 DoubleEqual
= ARMAssembler::EQ
,
65 DoubleNotEqual
= ARMAssembler::NE
| DoubleConditionBitSpecial
,
66 DoubleGreaterThan
= ARMAssembler::GT
,
67 DoubleGreaterThanOrEqual
= ARMAssembler::GE
,
68 DoubleLessThan
= ARMAssembler::CC
,
69 DoubleLessThanOrEqual
= ARMAssembler::LS
,
70 // If either operand is NaN, these conditions always evaluate to true.
71 DoubleEqualOrUnordered
= ARMAssembler::EQ
| DoubleConditionBitSpecial
,
72 DoubleNotEqualOrUnordered
= ARMAssembler::NE
,
73 DoubleGreaterThanOrUnordered
= ARMAssembler::HI
,
74 DoubleGreaterThanOrEqualOrUnordered
= ARMAssembler::CS
,
75 DoubleLessThanOrUnordered
= ARMAssembler::LT
,
76 DoubleLessThanOrEqualOrUnordered
= ARMAssembler::LE
,
79 static const RegisterID stackPointerRegister
= ARMRegisters::sp
;
80 static const RegisterID linkRegister
= ARMRegisters::lr
;
82 static const Scale ScalePtr
= TimesFour
;
84 void add32(RegisterID src
, RegisterID dest
)
86 m_assembler
.adds_r(dest
, dest
, src
);
89 void add32(Imm32 imm
, Address address
)
91 load32(address
, ARMRegisters::S1
);
92 add32(imm
, ARMRegisters::S1
);
93 store32(ARMRegisters::S1
, address
);
96 void add32(Imm32 imm
, RegisterID dest
)
98 m_assembler
.adds_r(dest
, dest
, m_assembler
.getImm(imm
.m_value
, ARMRegisters::S0
));
101 void add32(Address src
, RegisterID dest
)
103 load32(src
, ARMRegisters::S1
);
104 add32(ARMRegisters::S1
, dest
);
107 void and32(RegisterID src
, RegisterID dest
)
109 m_assembler
.ands_r(dest
, dest
, src
);
112 void and32(Imm32 imm
, RegisterID dest
)
114 ARMWord w
= m_assembler
.getImm(imm
.m_value
, ARMRegisters::S0
, true);
115 if (w
& ARMAssembler::OP2_INV_IMM
)
116 m_assembler
.bics_r(dest
, dest
, w
& ~ARMAssembler::OP2_INV_IMM
);
118 m_assembler
.ands_r(dest
, dest
, w
);
121 void lshift32(RegisterID shift_amount
, RegisterID dest
)
123 ARMWord w
= ARMAssembler::getOp2(0x1f);
124 ASSERT(w
!= ARMAssembler::INVALID_IMM
);
125 m_assembler
.and_r(ARMRegisters::S0
, shift_amount
, w
);
127 m_assembler
.movs_r(dest
, m_assembler
.lsl_r(dest
, ARMRegisters::S0
));
130 void lshift32(Imm32 imm
, RegisterID dest
)
132 m_assembler
.movs_r(dest
, m_assembler
.lsl(dest
, imm
.m_value
& 0x1f));
135 void mul32(RegisterID src
, RegisterID dest
)
138 move(src
, ARMRegisters::S0
);
139 src
= ARMRegisters::S0
;
141 m_assembler
.muls_r(dest
, dest
, src
);
144 void mul32(Imm32 imm
, RegisterID src
, RegisterID dest
)
146 move(imm
, ARMRegisters::S0
);
147 m_assembler
.muls_r(dest
, src
, ARMRegisters::S0
);
150 void neg32(RegisterID srcDest
)
152 m_assembler
.rsbs_r(srcDest
, srcDest
, ARMAssembler::getOp2(0));
155 void not32(RegisterID dest
)
157 m_assembler
.mvns_r(dest
, dest
);
160 void or32(RegisterID src
, RegisterID dest
)
162 m_assembler
.orrs_r(dest
, dest
, src
);
165 void or32(Imm32 imm
, RegisterID dest
)
167 m_assembler
.orrs_r(dest
, dest
, m_assembler
.getImm(imm
.m_value
, ARMRegisters::S0
));
170 void rshift32(RegisterID shift_amount
, RegisterID dest
)
172 ARMWord w
= ARMAssembler::getOp2(0x1f);
173 ASSERT(w
!= ARMAssembler::INVALID_IMM
);
174 m_assembler
.and_r(ARMRegisters::S0
, shift_amount
, w
);
176 m_assembler
.movs_r(dest
, m_assembler
.asr_r(dest
, ARMRegisters::S0
));
179 void rshift32(Imm32 imm
, RegisterID dest
)
181 m_assembler
.movs_r(dest
, m_assembler
.asr(dest
, imm
.m_value
& 0x1f));
184 void urshift32(RegisterID shift_amount
, RegisterID dest
)
186 ARMWord w
= ARMAssembler::getOp2(0x1f);
187 ASSERT(w
!= ARMAssembler::INVALID_IMM
);
188 m_assembler
.and_r(ARMRegisters::S0
, shift_amount
, w
);
190 m_assembler
.movs_r(dest
, m_assembler
.lsr_r(dest
, ARMRegisters::S0
));
193 void urshift32(Imm32 imm
, RegisterID dest
)
195 m_assembler
.movs_r(dest
, m_assembler
.lsr(dest
, imm
.m_value
& 0x1f));
198 void sub32(RegisterID src
, RegisterID dest
)
200 m_assembler
.subs_r(dest
, dest
, src
);
203 void sub32(Imm32 imm
, RegisterID dest
)
205 m_assembler
.subs_r(dest
, dest
, m_assembler
.getImm(imm
.m_value
, ARMRegisters::S0
));
208 void sub32(Imm32 imm
, Address address
)
210 load32(address
, ARMRegisters::S1
);
211 sub32(imm
, ARMRegisters::S1
);
212 store32(ARMRegisters::S1
, address
);
215 void sub32(Address src
, RegisterID dest
)
217 load32(src
, ARMRegisters::S1
);
218 sub32(ARMRegisters::S1
, dest
);
221 void xor32(RegisterID src
, RegisterID dest
)
223 m_assembler
.eors_r(dest
, dest
, src
);
226 void xor32(Imm32 imm
, RegisterID dest
)
228 m_assembler
.eors_r(dest
, dest
, m_assembler
.getImm(imm
.m_value
, ARMRegisters::S0
));
231 void load8(ImplicitAddress address
, RegisterID dest
)
233 m_assembler
.dataTransfer32(true, dest
, address
.base
, address
.offset
, true);
236 void load32(ImplicitAddress address
, RegisterID dest
)
238 m_assembler
.dataTransfer32(true, dest
, address
.base
, address
.offset
);
241 void load32(BaseIndex address
, RegisterID dest
)
243 m_assembler
.baseIndexTransfer32(true, dest
, address
.base
, address
.index
, static_cast<int>(address
.scale
), address
.offset
);
246 #if CPU(ARMV5_OR_LOWER)
247 void load32WithUnalignedHalfWords(BaseIndex address
, RegisterID dest
);
249 void load32WithUnalignedHalfWords(BaseIndex address
, RegisterID dest
)
251 load32(address
, dest
);
255 DataLabel32
load32WithAddressOffsetPatch(Address address
, RegisterID dest
)
257 DataLabel32
dataLabel(this);
258 m_assembler
.ldr_un_imm(ARMRegisters::S0
, 0);
259 m_assembler
.dtr_ur(true, dest
, address
.base
, ARMRegisters::S0
);
263 Label
loadPtrWithPatchToLEA(Address address
, RegisterID dest
)
266 load32(address
, dest
);
270 void load16(BaseIndex address
, RegisterID dest
)
272 m_assembler
.add_r(ARMRegisters::S0
, address
.base
, m_assembler
.lsl(address
.index
, address
.scale
));
273 if (address
.offset
>=0)
274 m_assembler
.ldrh_u(dest
, ARMRegisters::S0
, ARMAssembler::getOp2Byte(address
.offset
));
276 m_assembler
.ldrh_d(dest
, ARMRegisters::S0
, ARMAssembler::getOp2Byte(-address
.offset
));
279 void load16(ImplicitAddress address
, RegisterID dest
)
281 if (address
.offset
>= 0)
282 m_assembler
.ldrh_u(dest
, address
.base
, ARMAssembler::getOp2Byte(address
.offset
));
284 m_assembler
.ldrh_d(dest
, address
.base
, ARMAssembler::getOp2Byte(-address
.offset
));
287 DataLabel32
store32WithAddressOffsetPatch(RegisterID src
, Address address
)
289 DataLabel32
dataLabel(this);
290 m_assembler
.ldr_un_imm(ARMRegisters::S0
, 0);
291 m_assembler
.dtr_ur(false, src
, address
.base
, ARMRegisters::S0
);
295 void store32(RegisterID src
, ImplicitAddress address
)
297 m_assembler
.dataTransfer32(false, src
, address
.base
, address
.offset
);
300 void store32(RegisterID src
, BaseIndex address
)
302 m_assembler
.baseIndexTransfer32(false, src
, address
.base
, address
.index
, static_cast<int>(address
.scale
), address
.offset
);
305 void store32(Imm32 imm
, ImplicitAddress address
)
308 m_assembler
.ldr_un_imm(ARMRegisters::S1
, imm
.m_value
);
310 move(imm
, ARMRegisters::S1
);
311 store32(ARMRegisters::S1
, address
);
314 void store32(RegisterID src
, void* address
)
316 m_assembler
.ldr_un_imm(ARMRegisters::S0
, reinterpret_cast<ARMWord
>(address
));
317 m_assembler
.dtr_u(false, src
, ARMRegisters::S0
, 0);
320 void store32(Imm32 imm
, void* address
)
322 m_assembler
.ldr_un_imm(ARMRegisters::S0
, reinterpret_cast<ARMWord
>(address
));
324 m_assembler
.ldr_un_imm(ARMRegisters::S1
, imm
.m_value
);
326 m_assembler
.moveImm(imm
.m_value
, ARMRegisters::S1
);
327 m_assembler
.dtr_u(false, ARMRegisters::S1
, ARMRegisters::S0
, 0);
330 void pop(RegisterID dest
)
332 m_assembler
.pop_r(dest
);
335 void push(RegisterID src
)
337 m_assembler
.push_r(src
);
340 void push(Address address
)
342 load32(address
, ARMRegisters::S1
);
343 push(ARMRegisters::S1
);
348 move(imm
, ARMRegisters::S0
);
349 push(ARMRegisters::S0
);
352 void move(Imm32 imm
, RegisterID dest
)
355 m_assembler
.ldr_un_imm(dest
, imm
.m_value
);
357 m_assembler
.moveImm(imm
.m_value
, dest
);
360 void move(RegisterID src
, RegisterID dest
)
362 m_assembler
.mov_r(dest
, src
);
365 void move(ImmPtr imm
, RegisterID dest
)
367 move(Imm32(imm
), dest
);
370 void swap(RegisterID reg1
, RegisterID reg2
)
372 m_assembler
.mov_r(ARMRegisters::S0
, reg1
);
373 m_assembler
.mov_r(reg1
, reg2
);
374 m_assembler
.mov_r(reg2
, ARMRegisters::S0
);
377 void signExtend32ToPtr(RegisterID src
, RegisterID dest
)
383 void zeroExtend32ToPtr(RegisterID src
, RegisterID dest
)
389 Jump
branch8(Condition cond
, Address left
, Imm32 right
)
391 load8(left
, ARMRegisters::S1
);
392 return branch32(cond
, ARMRegisters::S1
, right
);
395 Jump
branch32(Condition cond
, RegisterID left
, RegisterID right
, int useConstantPool
= 0)
397 m_assembler
.cmp_r(left
, right
);
398 return Jump(m_assembler
.jmp(ARMCondition(cond
), useConstantPool
));
401 Jump
branch32(Condition cond
, RegisterID left
, Imm32 right
, int useConstantPool
= 0)
403 if (right
.m_isPointer
) {
404 m_assembler
.ldr_un_imm(ARMRegisters::S0
, right
.m_value
);
405 m_assembler
.cmp_r(left
, ARMRegisters::S0
);
407 m_assembler
.cmp_r(left
, m_assembler
.getImm(right
.m_value
, ARMRegisters::S0
));
408 return Jump(m_assembler
.jmp(ARMCondition(cond
), useConstantPool
));
411 Jump
branch32(Condition cond
, RegisterID left
, Address right
)
413 load32(right
, ARMRegisters::S1
);
414 return branch32(cond
, left
, ARMRegisters::S1
);
417 Jump
branch32(Condition cond
, Address left
, RegisterID right
)
419 load32(left
, ARMRegisters::S1
);
420 return branch32(cond
, ARMRegisters::S1
, right
);
423 Jump
branch32(Condition cond
, Address left
, Imm32 right
)
425 load32(left
, ARMRegisters::S1
);
426 return branch32(cond
, ARMRegisters::S1
, right
);
429 Jump
branch32(Condition cond
, BaseIndex left
, Imm32 right
)
431 load32(left
, ARMRegisters::S1
);
432 return branch32(cond
, ARMRegisters::S1
, right
);
435 Jump
branch32WithUnalignedHalfWords(Condition cond
, BaseIndex left
, Imm32 right
)
437 load32WithUnalignedHalfWords(left
, ARMRegisters::S1
);
438 return branch32(cond
, ARMRegisters::S1
, right
);
441 Jump
branch16(Condition cond
, BaseIndex left
, RegisterID right
)
446 ASSERT_NOT_REACHED();
450 Jump
branch16(Condition cond
, BaseIndex left
, Imm32 right
)
452 load16(left
, ARMRegisters::S0
);
453 move(right
, ARMRegisters::S1
);
454 m_assembler
.cmp_r(ARMRegisters::S0
, ARMRegisters::S1
);
455 return m_assembler
.jmp(ARMCondition(cond
));
458 Jump
branchTest8(Condition cond
, Address address
, Imm32 mask
= Imm32(-1))
460 load8(address
, ARMRegisters::S1
);
461 return branchTest32(cond
, ARMRegisters::S1
, mask
);
464 Jump
branchTest32(Condition cond
, RegisterID reg
, RegisterID mask
)
466 ASSERT((cond
== Zero
) || (cond
== NonZero
));
467 m_assembler
.tst_r(reg
, mask
);
468 return Jump(m_assembler
.jmp(ARMCondition(cond
)));
471 Jump
branchTest32(Condition cond
, RegisterID reg
, Imm32 mask
= Imm32(-1))
473 ASSERT((cond
== Zero
) || (cond
== NonZero
));
474 ARMWord w
= m_assembler
.getImm(mask
.m_value
, ARMRegisters::S0
, true);
475 if (w
& ARMAssembler::OP2_INV_IMM
)
476 m_assembler
.bics_r(ARMRegisters::S0
, reg
, w
& ~ARMAssembler::OP2_INV_IMM
);
478 m_assembler
.tst_r(reg
, w
);
479 return Jump(m_assembler
.jmp(ARMCondition(cond
)));
482 Jump
branchTest32(Condition cond
, Address address
, Imm32 mask
= Imm32(-1))
484 load32(address
, ARMRegisters::S1
);
485 return branchTest32(cond
, ARMRegisters::S1
, mask
);
488 Jump
branchTest32(Condition cond
, BaseIndex address
, Imm32 mask
= Imm32(-1))
490 load32(address
, ARMRegisters::S1
);
491 return branchTest32(cond
, ARMRegisters::S1
, mask
);
496 return Jump(m_assembler
.jmp());
499 void jump(RegisterID target
)
501 m_assembler
.bx(target
);
504 void jump(Address address
)
506 load32(address
, ARMRegisters::pc
);
509 Jump
branchAdd32(Condition cond
, RegisterID src
, RegisterID dest
)
511 ASSERT((cond
== Overflow
) || (cond
== Signed
) || (cond
== Zero
) || (cond
== NonZero
));
513 return Jump(m_assembler
.jmp(ARMCondition(cond
)));
516 Jump
branchAdd32(Condition cond
, Imm32 imm
, RegisterID dest
)
518 ASSERT((cond
== Overflow
) || (cond
== Signed
) || (cond
== Zero
) || (cond
== NonZero
));
520 return Jump(m_assembler
.jmp(ARMCondition(cond
)));
523 void mull32(RegisterID src1
, RegisterID src2
, RegisterID dest
)
526 move(src1
, ARMRegisters::S0
);
527 src1
= ARMRegisters::S0
;
529 m_assembler
.mull_r(ARMRegisters::S1
, dest
, src2
, src1
);
530 m_assembler
.cmp_r(ARMRegisters::S1
, m_assembler
.asr(dest
, 31));
533 Jump
branchMul32(Condition cond
, RegisterID src
, RegisterID dest
)
535 ASSERT((cond
== Overflow
) || (cond
== Signed
) || (cond
== Zero
) || (cond
== NonZero
));
536 if (cond
== Overflow
) {
537 mull32(src
, dest
, dest
);
542 return Jump(m_assembler
.jmp(ARMCondition(cond
)));
545 Jump
branchMul32(Condition cond
, Imm32 imm
, RegisterID src
, RegisterID dest
)
547 ASSERT((cond
== Overflow
) || (cond
== Signed
) || (cond
== Zero
) || (cond
== NonZero
));
548 if (cond
== Overflow
) {
549 move(imm
, ARMRegisters::S0
);
550 mull32(ARMRegisters::S0
, src
, dest
);
554 mul32(imm
, src
, dest
);
555 return Jump(m_assembler
.jmp(ARMCondition(cond
)));
558 Jump
branchSub32(Condition cond
, RegisterID src
, RegisterID dest
)
560 ASSERT((cond
== Overflow
) || (cond
== Signed
) || (cond
== Zero
) || (cond
== NonZero
));
562 return Jump(m_assembler
.jmp(ARMCondition(cond
)));
565 Jump
branchSub32(Condition cond
, Imm32 imm
, RegisterID dest
)
567 ASSERT((cond
== Overflow
) || (cond
== Signed
) || (cond
== Zero
) || (cond
== NonZero
));
569 return Jump(m_assembler
.jmp(ARMCondition(cond
)));
572 Jump
branchNeg32(Condition cond
, RegisterID srcDest
)
574 ASSERT((cond
== Overflow
) || (cond
== Signed
) || (cond
== Zero
) || (cond
== NonZero
));
576 return Jump(m_assembler
.jmp(ARMCondition(cond
)));
579 Jump
branchOr32(Condition cond
, RegisterID src
, RegisterID dest
)
581 ASSERT((cond
== Signed
) || (cond
== Zero
) || (cond
== NonZero
));
583 return Jump(m_assembler
.jmp(ARMCondition(cond
)));
593 #if WTF_ARM_ARCH_AT_LEAST(5)
594 ensureSpace(2 * sizeof(ARMWord
), sizeof(ARMWord
));
595 m_assembler
.loadBranchTarget(ARMRegisters::S1
, ARMAssembler::AL
, true);
596 return Call(m_assembler
.blx(ARMRegisters::S1
), Call::LinkableNear
);
599 return Call(m_assembler
.jmp(ARMAssembler::AL
, true), Call::LinkableNear
);
603 Call
call(RegisterID target
)
605 m_assembler
.blx(target
);
607 return Call(jmpSrc
, Call::None
);
610 void call(Address address
)
612 call32(address
.base
, address
.offset
);
617 m_assembler
.bx(linkRegister
);
620 void set32(Condition cond
, RegisterID left
, RegisterID right
, RegisterID dest
)
622 m_assembler
.cmp_r(left
, right
);
623 m_assembler
.mov_r(dest
, ARMAssembler::getOp2(0));
624 m_assembler
.mov_r(dest
, ARMAssembler::getOp2(1), ARMCondition(cond
));
627 void set32(Condition cond
, RegisterID left
, Imm32 right
, RegisterID dest
)
629 m_assembler
.cmp_r(left
, m_assembler
.getImm(right
.m_value
, ARMRegisters::S0
));
630 m_assembler
.mov_r(dest
, ARMAssembler::getOp2(0));
631 m_assembler
.mov_r(dest
, ARMAssembler::getOp2(1), ARMCondition(cond
));
634 void set8(Condition cond
, RegisterID left
, RegisterID right
, RegisterID dest
)
636 // ARM doesn't have byte registers
637 set32(cond
, left
, right
, dest
);
640 void set8(Condition cond
, Address left
, RegisterID right
, RegisterID dest
)
642 // ARM doesn't have byte registers
643 load32(left
, ARMRegisters::S1
);
644 set32(cond
, ARMRegisters::S1
, right
, dest
);
647 void set8(Condition cond
, RegisterID left
, Imm32 right
, RegisterID dest
)
649 // ARM doesn't have byte registers
650 set32(cond
, left
, right
, dest
);
653 void setTest32(Condition cond
, Address address
, Imm32 mask
, RegisterID dest
)
655 load32(address
, ARMRegisters::S1
);
656 if (mask
.m_value
== -1)
657 m_assembler
.cmp_r(0, ARMRegisters::S1
);
659 m_assembler
.tst_r(ARMRegisters::S1
, m_assembler
.getImm(mask
.m_value
, ARMRegisters::S0
));
660 m_assembler
.mov_r(dest
, ARMAssembler::getOp2(0));
661 m_assembler
.mov_r(dest
, ARMAssembler::getOp2(1), ARMCondition(cond
));
664 void setTest8(Condition cond
, Address address
, Imm32 mask
, RegisterID dest
)
666 // ARM doesn't have byte registers
667 setTest32(cond
, address
, mask
, dest
);
670 void add32(Imm32 imm
, RegisterID src
, RegisterID dest
)
672 m_assembler
.add_r(dest
, src
, m_assembler
.getImm(imm
.m_value
, ARMRegisters::S0
));
675 void add32(Imm32 imm
, AbsoluteAddress address
)
677 m_assembler
.ldr_un_imm(ARMRegisters::S1
, reinterpret_cast<ARMWord
>(address
.m_ptr
));
678 m_assembler
.dtr_u(true, ARMRegisters::S1
, ARMRegisters::S1
, 0);
679 add32(imm
, ARMRegisters::S1
);
680 m_assembler
.ldr_un_imm(ARMRegisters::S0
, reinterpret_cast<ARMWord
>(address
.m_ptr
));
681 m_assembler
.dtr_u(false, ARMRegisters::S1
, ARMRegisters::S0
, 0);
684 void sub32(Imm32 imm
, AbsoluteAddress address
)
686 m_assembler
.ldr_un_imm(ARMRegisters::S1
, reinterpret_cast<ARMWord
>(address
.m_ptr
));
687 m_assembler
.dtr_u(true, ARMRegisters::S1
, ARMRegisters::S1
, 0);
688 sub32(imm
, ARMRegisters::S1
);
689 m_assembler
.ldr_un_imm(ARMRegisters::S0
, reinterpret_cast<ARMWord
>(address
.m_ptr
));
690 m_assembler
.dtr_u(false, ARMRegisters::S1
, ARMRegisters::S0
, 0);
693 void load32(void* address
, RegisterID dest
)
695 m_assembler
.ldr_un_imm(ARMRegisters::S0
, reinterpret_cast<ARMWord
>(address
));
696 m_assembler
.dtr_u(true, dest
, ARMRegisters::S0
, 0);
699 Jump
branch32(Condition cond
, AbsoluteAddress left
, RegisterID right
)
701 load32(left
.m_ptr
, ARMRegisters::S1
);
702 return branch32(cond
, ARMRegisters::S1
, right
);
705 Jump
branch32(Condition cond
, AbsoluteAddress left
, Imm32 right
)
707 load32(left
.m_ptr
, ARMRegisters::S1
);
708 return branch32(cond
, ARMRegisters::S1
, right
);
713 #if WTF_ARM_ARCH_AT_LEAST(5)
714 ensureSpace(2 * sizeof(ARMWord
), sizeof(ARMWord
));
715 m_assembler
.loadBranchTarget(ARMRegisters::S1
, ARMAssembler::AL
, true);
716 return Call(m_assembler
.blx(ARMRegisters::S1
), Call::Linkable
);
719 return Call(m_assembler
.jmp(ARMAssembler::AL
, true), Call::Linkable
);
723 Call
tailRecursiveCall()
725 return Call::fromTailJump(jump());
728 Call
makeTailRecursiveCall(Jump oldJump
)
730 return Call::fromTailJump(oldJump
);
733 DataLabelPtr
moveWithPatch(ImmPtr initialValue
, RegisterID dest
)
735 DataLabelPtr
dataLabel(this);
736 m_assembler
.ldr_un_imm(dest
, reinterpret_cast<ARMWord
>(initialValue
.m_value
));
740 Jump
branchPtrWithPatch(Condition cond
, RegisterID left
, DataLabelPtr
& dataLabel
, ImmPtr initialRightValue
= ImmPtr(0))
742 dataLabel
= moveWithPatch(initialRightValue
, ARMRegisters::S1
);
743 Jump jump
= branch32(cond
, left
, ARMRegisters::S1
, true);
747 Jump
branchPtrWithPatch(Condition cond
, Address left
, DataLabelPtr
& dataLabel
, ImmPtr initialRightValue
= ImmPtr(0))
749 load32(left
, ARMRegisters::S1
);
750 dataLabel
= moveWithPatch(initialRightValue
, ARMRegisters::S0
);
751 Jump jump
= branch32(cond
, ARMRegisters::S0
, ARMRegisters::S1
, true);
755 DataLabelPtr
storePtrWithPatch(ImmPtr initialValue
, ImplicitAddress address
)
757 DataLabelPtr dataLabel
= moveWithPatch(initialValue
, ARMRegisters::S1
);
758 store32(ARMRegisters::S1
, address
);
762 DataLabelPtr
storePtrWithPatch(ImplicitAddress address
)
764 return storePtrWithPatch(ImmPtr(0), address
);
767 // Floating point operators
768 bool supportsFloatingPoint() const
770 return s_isVFPPresent
;
773 bool supportsFloatingPointTruncate() const
778 bool supportsFloatingPointSqrt() const
780 return s_isVFPPresent
;
783 void loadDouble(ImplicitAddress address
, FPRegisterID dest
)
785 m_assembler
.doubleTransfer(true, dest
, address
.base
, address
.offset
);
788 void loadDouble(const void* address
, FPRegisterID dest
)
790 m_assembler
.ldr_un_imm(ARMRegisters::S0
, (ARMWord
)address
);
791 m_assembler
.fdtr_u(true, dest
, ARMRegisters::S0
, 0);
794 void storeDouble(FPRegisterID src
, ImplicitAddress address
)
796 m_assembler
.doubleTransfer(false, src
, address
.base
, address
.offset
);
799 void addDouble(FPRegisterID src
, FPRegisterID dest
)
801 m_assembler
.faddd_r(dest
, dest
, src
);
804 void addDouble(Address src
, FPRegisterID dest
)
806 loadDouble(src
, ARMRegisters::SD0
);
807 addDouble(ARMRegisters::SD0
, dest
);
810 void divDouble(FPRegisterID src
, FPRegisterID dest
)
812 m_assembler
.fdivd_r(dest
, dest
, src
);
815 void divDouble(Address src
, FPRegisterID dest
)
817 ASSERT_NOT_REACHED(); // Untested
818 loadDouble(src
, ARMRegisters::SD0
);
819 divDouble(ARMRegisters::SD0
, dest
);
822 void subDouble(FPRegisterID src
, FPRegisterID dest
)
824 m_assembler
.fsubd_r(dest
, dest
, src
);
827 void subDouble(Address src
, FPRegisterID dest
)
829 loadDouble(src
, ARMRegisters::SD0
);
830 subDouble(ARMRegisters::SD0
, dest
);
833 void mulDouble(FPRegisterID src
, FPRegisterID dest
)
835 m_assembler
.fmuld_r(dest
, dest
, src
);
838 void mulDouble(Address src
, FPRegisterID dest
)
840 loadDouble(src
, ARMRegisters::SD0
);
841 mulDouble(ARMRegisters::SD0
, dest
);
844 void sqrtDouble(FPRegisterID src
, FPRegisterID dest
)
846 m_assembler
.fsqrtd_r(dest
, src
);
849 void convertInt32ToDouble(RegisterID src
, FPRegisterID dest
)
851 m_assembler
.fmsr_r(dest
, src
);
852 m_assembler
.fsitod_r(dest
, dest
);
855 void convertInt32ToDouble(Address src
, FPRegisterID dest
)
857 ASSERT_NOT_REACHED(); // Untested
858 // flds does not worth the effort here
859 load32(src
, ARMRegisters::S1
);
860 convertInt32ToDouble(ARMRegisters::S1
, dest
);
863 void convertInt32ToDouble(AbsoluteAddress src
, FPRegisterID dest
)
865 ASSERT_NOT_REACHED(); // Untested
866 // flds does not worth the effort here
867 m_assembler
.ldr_un_imm(ARMRegisters::S1
, (ARMWord
)src
.m_ptr
);
868 m_assembler
.dtr_u(true, ARMRegisters::S1
, ARMRegisters::S1
, 0);
869 convertInt32ToDouble(ARMRegisters::S1
, dest
);
872 Jump
branchDouble(DoubleCondition cond
, FPRegisterID left
, FPRegisterID right
)
874 m_assembler
.fcmpd_r(left
, right
);
875 m_assembler
.fmstat();
876 if (cond
& DoubleConditionBitSpecial
)
877 m_assembler
.cmp_r(ARMRegisters::S0
, ARMRegisters::S0
, ARMAssembler::VS
);
878 return Jump(m_assembler
.jmp(static_cast<ARMAssembler::Condition
>(cond
& ~DoubleConditionMask
)));
881 // Truncates 'src' to an integer, and places the resulting 'dest'.
882 // If the result is not representable as a 32 bit value, branch.
883 // May also branch for some values that are representable in 32 bits
884 // (specifically, in this case, INT_MIN).
885 Jump
branchTruncateDoubleToInt32(FPRegisterID src
, RegisterID dest
)
889 ASSERT_NOT_REACHED();
893 // Convert 'src' to an integer, and places the resulting 'dest'.
894 // If the result is not representable as a 32 bit value, branch.
895 // May also branch for some values that are representable in 32 bits
896 // (specifically, in this case, 0).
897 void branchConvertDoubleToInt32(FPRegisterID src
, RegisterID dest
, JumpList
& failureCases
, FPRegisterID fpTemp
)
899 m_assembler
.ftosid_r(ARMRegisters::SD0
, src
);
900 m_assembler
.fmrs_r(dest
, ARMRegisters::SD0
);
902 // Convert the integer result back to float & compare to the original value - if not equal or unordered (NaN) then jump.
903 m_assembler
.fsitod_r(ARMRegisters::SD0
, ARMRegisters::SD0
);
904 failureCases
.append(branchDouble(DoubleNotEqualOrUnordered
, src
, ARMRegisters::SD0
));
906 // If the result is zero, it might have been -0.0, and 0.0 equals to -0.0
907 failureCases
.append(branchTest32(Zero
, dest
));
910 Jump
branchDoubleNonZero(FPRegisterID reg
, FPRegisterID scratch
)
912 m_assembler
.mov_r(ARMRegisters::S0
, ARMAssembler::getOp2(0));
913 convertInt32ToDouble(ARMRegisters::S0
, scratch
);
914 return branchDouble(DoubleNotEqual
, reg
, scratch
);
917 Jump
branchDoubleZeroOrNaN(FPRegisterID reg
, FPRegisterID scratch
)
919 m_assembler
.mov_r(ARMRegisters::S0
, ARMAssembler::getOp2(0));
920 convertInt32ToDouble(ARMRegisters::S0
, scratch
);
921 return branchDouble(DoubleEqualOrUnordered
, reg
, scratch
);
925 ARMAssembler::Condition
ARMCondition(Condition cond
)
927 return static_cast<ARMAssembler::Condition
>(cond
);
930 void ensureSpace(int insnSpace
, int constSpace
)
932 m_assembler
.ensureSpace(insnSpace
, constSpace
);
935 int sizeOfConstantPool()
937 return m_assembler
.sizeOfConstantPool();
942 #if WTF_ARM_ARCH_VERSION < 5
943 ensureSpace(2 * sizeof(ARMWord
), sizeof(ARMWord
));
945 m_assembler
.mov_r(linkRegister
, ARMRegisters::pc
);
949 void call32(RegisterID base
, int32_t offset
)
951 #if WTF_ARM_ARCH_AT_LEAST(5)
952 int targetReg
= ARMRegisters::S1
;
954 int targetReg
= ARMRegisters::pc
;
956 int tmpReg
= ARMRegisters::S1
;
958 if (base
== ARMRegisters::sp
)
962 if (offset
<= 0xfff) {
964 m_assembler
.dtr_u(true, targetReg
, base
, offset
);
965 } else if (offset
<= 0xfffff) {
966 m_assembler
.add_r(tmpReg
, base
, ARMAssembler::OP2_IMM
| (offset
>> 12) | (10 << 8));
968 m_assembler
.dtr_u(true, targetReg
, tmpReg
, offset
& 0xfff);
970 ARMWord reg
= m_assembler
.getImm(offset
, tmpReg
);
972 m_assembler
.dtr_ur(true, targetReg
, base
, reg
);
976 if (offset
<= 0xfff) {
978 m_assembler
.dtr_d(true, targetReg
, base
, offset
);
979 } else if (offset
<= 0xfffff) {
980 m_assembler
.sub_r(tmpReg
, base
, ARMAssembler::OP2_IMM
| (offset
>> 12) | (10 << 8));
982 m_assembler
.dtr_d(true, targetReg
, tmpReg
, offset
& 0xfff);
984 ARMWord reg
= m_assembler
.getImm(offset
, tmpReg
);
986 m_assembler
.dtr_dr(true, targetReg
, base
, reg
);
989 #if WTF_ARM_ARCH_AT_LEAST(5)
990 m_assembler
.blx(targetReg
);
995 friend class LinkBuffer
;
996 friend class RepatchBuffer
;
998 static void linkCall(void* code
, Call call
, FunctionPtr function
)
1000 ARMAssembler::linkCall(code
, call
.m_jmp
, function
.value());
1003 static void repatchCall(CodeLocationCall call
, CodeLocationLabel destination
)
1005 ARMAssembler::relinkCall(call
.dataLocation(), destination
.executableAddress());
1008 static void repatchCall(CodeLocationCall call
, FunctionPtr destination
)
1010 ARMAssembler::relinkCall(call
.dataLocation(), destination
.executableAddress());
1013 static const bool s_isVFPPresent
;
1018 #endif // ENABLE(ASSEMBLER) && CPU(ARM_TRADITIONAL)
1020 #endif // MacroAssemblerARM_h