2 * Copyright (C) 2008 Apple Inc.
3 * Copyright (C) 2009, 2010 University of Szeged
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
16 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
18 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
19 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
20 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
21 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
22 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
23 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
25 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 #ifndef MacroAssemblerARM_h
29 #define MacroAssemblerARM_h
31 #if ENABLE(ASSEMBLER) && CPU(ARM_TRADITIONAL)
33 #include "ARMAssembler.h"
34 #include "AbstractMacroAssembler.h"
38 class MacroAssemblerARM
: public AbstractMacroAssembler
<ARMAssembler
> {
39 static const int DoubleConditionMask
= 0x0f;
40 static const int DoubleConditionBitSpecial
= 0x10;
41 COMPILE_ASSERT(!(DoubleConditionBitSpecial
& DoubleConditionMask
), DoubleConditionBitSpecial_should_not_interfere_with_ARMAssembler_Condition_codes
);
43 typedef ARMRegisters::FPRegisterID FPRegisterID
;
44 static const int MaximumCompactPtrAlignedAddressOffset
= 0x7FFFFFFF;
46 enum RelationalCondition
{
47 Equal
= ARMAssembler::EQ
,
48 NotEqual
= ARMAssembler::NE
,
49 Above
= ARMAssembler::HI
,
50 AboveOrEqual
= ARMAssembler::CS
,
51 Below
= ARMAssembler::CC
,
52 BelowOrEqual
= ARMAssembler::LS
,
53 GreaterThan
= ARMAssembler::GT
,
54 GreaterThanOrEqual
= ARMAssembler::GE
,
55 LessThan
= ARMAssembler::LT
,
56 LessThanOrEqual
= ARMAssembler::LE
59 enum ResultCondition
{
60 Overflow
= ARMAssembler::VS
,
61 Signed
= ARMAssembler::MI
,
62 Zero
= ARMAssembler::EQ
,
63 NonZero
= ARMAssembler::NE
66 enum DoubleCondition
{
67 // These conditions will only evaluate to true if the comparison is ordered - i.e. neither operand is NaN.
68 DoubleEqual
= ARMAssembler::EQ
,
69 DoubleNotEqual
= ARMAssembler::NE
| DoubleConditionBitSpecial
,
70 DoubleGreaterThan
= ARMAssembler::GT
,
71 DoubleGreaterThanOrEqual
= ARMAssembler::GE
,
72 DoubleLessThan
= ARMAssembler::CC
,
73 DoubleLessThanOrEqual
= ARMAssembler::LS
,
74 // If either operand is NaN, these conditions always evaluate to true.
75 DoubleEqualOrUnordered
= ARMAssembler::EQ
| DoubleConditionBitSpecial
,
76 DoubleNotEqualOrUnordered
= ARMAssembler::NE
,
77 DoubleGreaterThanOrUnordered
= ARMAssembler::HI
,
78 DoubleGreaterThanOrEqualOrUnordered
= ARMAssembler::CS
,
79 DoubleLessThanOrUnordered
= ARMAssembler::LT
,
80 DoubleLessThanOrEqualOrUnordered
= ARMAssembler::LE
,
83 static const RegisterID stackPointerRegister
= ARMRegisters::sp
;
84 static const RegisterID linkRegister
= ARMRegisters::lr
;
86 static const Scale ScalePtr
= TimesFour
;
88 void add32(RegisterID src
, RegisterID dest
)
90 m_assembler
.adds_r(dest
, dest
, src
);
93 void add32(TrustedImm32 imm
, Address address
)
95 load32(address
, ARMRegisters::S1
);
96 add32(imm
, ARMRegisters::S1
);
97 store32(ARMRegisters::S1
, address
);
100 void add32(TrustedImm32 imm
, RegisterID dest
)
102 m_assembler
.adds_r(dest
, dest
, m_assembler
.getImm(imm
.m_value
, ARMRegisters::S0
));
105 void add32(Address src
, RegisterID dest
)
107 load32(src
, ARMRegisters::S1
);
108 add32(ARMRegisters::S1
, dest
);
111 void and32(RegisterID src
, RegisterID dest
)
113 m_assembler
.ands_r(dest
, dest
, src
);
116 void and32(TrustedImm32 imm
, RegisterID dest
)
118 ARMWord w
= m_assembler
.getImm(imm
.m_value
, ARMRegisters::S0
, true);
119 if (w
& ARMAssembler::OP2_INV_IMM
)
120 m_assembler
.bics_r(dest
, dest
, w
& ~ARMAssembler::OP2_INV_IMM
);
122 m_assembler
.ands_r(dest
, dest
, w
);
125 void lshift32(RegisterID shift_amount
, RegisterID dest
)
127 ARMWord w
= ARMAssembler::getOp2(0x1f);
128 ASSERT(w
!= ARMAssembler::INVALID_IMM
);
129 m_assembler
.and_r(ARMRegisters::S0
, shift_amount
, w
);
131 m_assembler
.movs_r(dest
, m_assembler
.lsl_r(dest
, ARMRegisters::S0
));
134 void lshift32(TrustedImm32 imm
, RegisterID dest
)
136 m_assembler
.movs_r(dest
, m_assembler
.lsl(dest
, imm
.m_value
& 0x1f));
139 void mul32(RegisterID src
, RegisterID dest
)
142 move(src
, ARMRegisters::S0
);
143 src
= ARMRegisters::S0
;
145 m_assembler
.muls_r(dest
, dest
, src
);
148 void mul32(TrustedImm32 imm
, RegisterID src
, RegisterID dest
)
150 move(imm
, ARMRegisters::S0
);
151 m_assembler
.muls_r(dest
, src
, ARMRegisters::S0
);
154 void neg32(RegisterID srcDest
)
156 m_assembler
.rsbs_r(srcDest
, srcDest
, ARMAssembler::getOp2(0));
159 void not32(RegisterID dest
)
161 m_assembler
.mvns_r(dest
, dest
);
164 void or32(RegisterID src
, RegisterID dest
)
166 m_assembler
.orrs_r(dest
, dest
, src
);
169 void or32(TrustedImm32 imm
, RegisterID dest
)
171 m_assembler
.orrs_r(dest
, dest
, m_assembler
.getImm(imm
.m_value
, ARMRegisters::S0
));
174 void rshift32(RegisterID shift_amount
, RegisterID dest
)
176 ARMWord w
= ARMAssembler::getOp2(0x1f);
177 ASSERT(w
!= ARMAssembler::INVALID_IMM
);
178 m_assembler
.and_r(ARMRegisters::S0
, shift_amount
, w
);
180 m_assembler
.movs_r(dest
, m_assembler
.asr_r(dest
, ARMRegisters::S0
));
183 void rshift32(TrustedImm32 imm
, RegisterID dest
)
185 m_assembler
.movs_r(dest
, m_assembler
.asr(dest
, imm
.m_value
& 0x1f));
188 void urshift32(RegisterID shift_amount
, RegisterID dest
)
190 ARMWord w
= ARMAssembler::getOp2(0x1f);
191 ASSERT(w
!= ARMAssembler::INVALID_IMM
);
192 m_assembler
.and_r(ARMRegisters::S0
, shift_amount
, w
);
194 m_assembler
.movs_r(dest
, m_assembler
.lsr_r(dest
, ARMRegisters::S0
));
197 void urshift32(TrustedImm32 imm
, RegisterID dest
)
199 m_assembler
.movs_r(dest
, m_assembler
.lsr(dest
, imm
.m_value
& 0x1f));
202 void sub32(RegisterID src
, RegisterID dest
)
204 m_assembler
.subs_r(dest
, dest
, src
);
207 void sub32(TrustedImm32 imm
, RegisterID dest
)
209 m_assembler
.subs_r(dest
, dest
, m_assembler
.getImm(imm
.m_value
, ARMRegisters::S0
));
212 void sub32(TrustedImm32 imm
, Address address
)
214 load32(address
, ARMRegisters::S1
);
215 sub32(imm
, ARMRegisters::S1
);
216 store32(ARMRegisters::S1
, address
);
219 void sub32(Address src
, RegisterID dest
)
221 load32(src
, ARMRegisters::S1
);
222 sub32(ARMRegisters::S1
, dest
);
225 void xor32(RegisterID src
, RegisterID dest
)
227 m_assembler
.eors_r(dest
, dest
, src
);
230 void xor32(TrustedImm32 imm
, RegisterID dest
)
232 m_assembler
.eors_r(dest
, dest
, m_assembler
.getImm(imm
.m_value
, ARMRegisters::S0
));
235 void countLeadingZeros32(RegisterID src
, RegisterID dest
)
237 #if WTF_ARM_ARCH_AT_LEAST(5)
238 m_assembler
.clz_r(dest
, src
);
242 ASSERT_NOT_REACHED();
246 void load8(ImplicitAddress address
, RegisterID dest
)
248 m_assembler
.dataTransfer32(true, dest
, address
.base
, address
.offset
, true);
251 void load32(ImplicitAddress address
, RegisterID dest
)
253 m_assembler
.dataTransfer32(true, dest
, address
.base
, address
.offset
);
256 void load32(BaseIndex address
, RegisterID dest
)
258 m_assembler
.baseIndexTransfer32(true, dest
, address
.base
, address
.index
, static_cast<int>(address
.scale
), address
.offset
);
261 #if CPU(ARMV5_OR_LOWER)
262 void load32WithUnalignedHalfWords(BaseIndex address
, RegisterID dest
);
264 void load32WithUnalignedHalfWords(BaseIndex address
, RegisterID dest
)
266 load32(address
, dest
);
270 DataLabel32
load32WithAddressOffsetPatch(Address address
, RegisterID dest
)
272 DataLabel32
dataLabel(this);
273 m_assembler
.ldr_un_imm(ARMRegisters::S0
, 0);
274 m_assembler
.dtr_ur(true, dest
, address
.base
, ARMRegisters::S0
);
278 DataLabelCompact
load32WithCompactAddressOffsetPatch(Address address
, RegisterID dest
)
280 DataLabelCompact
dataLabel(this);
281 load32WithAddressOffsetPatch(address
, dest
);
285 void load16(BaseIndex address
, RegisterID dest
)
287 m_assembler
.add_r(ARMRegisters::S1
, address
.base
, m_assembler
.lsl(address
.index
, address
.scale
));
288 load16(Address(ARMRegisters::S1
, address
.offset
), dest
);
291 void load16(ImplicitAddress address
, RegisterID dest
)
293 if (address
.offset
>= 0)
294 m_assembler
.ldrh_u(dest
, address
.base
, m_assembler
.getOffsetForHalfwordDataTransfer(address
.offset
, ARMRegisters::S0
));
296 m_assembler
.ldrh_d(dest
, address
.base
, m_assembler
.getOffsetForHalfwordDataTransfer(-address
.offset
, ARMRegisters::S0
));
299 DataLabel32
store32WithAddressOffsetPatch(RegisterID src
, Address address
)
301 DataLabel32
dataLabel(this);
302 m_assembler
.ldr_un_imm(ARMRegisters::S0
, 0);
303 m_assembler
.dtr_ur(false, src
, address
.base
, ARMRegisters::S0
);
307 void store32(RegisterID src
, ImplicitAddress address
)
309 m_assembler
.dataTransfer32(false, src
, address
.base
, address
.offset
);
312 void store32(RegisterID src
, BaseIndex address
)
314 m_assembler
.baseIndexTransfer32(false, src
, address
.base
, address
.index
, static_cast<int>(address
.scale
), address
.offset
);
317 void store32(TrustedImm32 imm
, ImplicitAddress address
)
320 m_assembler
.ldr_un_imm(ARMRegisters::S1
, imm
.m_value
);
322 move(imm
, ARMRegisters::S1
);
323 store32(ARMRegisters::S1
, address
);
326 void store32(RegisterID src
, void* address
)
328 m_assembler
.ldr_un_imm(ARMRegisters::S0
, reinterpret_cast<ARMWord
>(address
));
329 m_assembler
.dtr_u(false, src
, ARMRegisters::S0
, 0);
332 void store32(TrustedImm32 imm
, void* address
)
334 m_assembler
.ldr_un_imm(ARMRegisters::S0
, reinterpret_cast<ARMWord
>(address
));
336 m_assembler
.ldr_un_imm(ARMRegisters::S1
, imm
.m_value
);
338 m_assembler
.moveImm(imm
.m_value
, ARMRegisters::S1
);
339 m_assembler
.dtr_u(false, ARMRegisters::S1
, ARMRegisters::S0
, 0);
342 void pop(RegisterID dest
)
344 m_assembler
.pop_r(dest
);
347 void push(RegisterID src
)
349 m_assembler
.push_r(src
);
352 void push(Address address
)
354 load32(address
, ARMRegisters::S1
);
355 push(ARMRegisters::S1
);
358 void push(TrustedImm32 imm
)
360 move(imm
, ARMRegisters::S0
);
361 push(ARMRegisters::S0
);
364 void move(TrustedImm32 imm
, RegisterID dest
)
367 m_assembler
.ldr_un_imm(dest
, imm
.m_value
);
369 m_assembler
.moveImm(imm
.m_value
, dest
);
372 void move(RegisterID src
, RegisterID dest
)
374 m_assembler
.mov_r(dest
, src
);
377 void move(TrustedImmPtr imm
, RegisterID dest
)
379 move(TrustedImm32(imm
), dest
);
382 void swap(RegisterID reg1
, RegisterID reg2
)
384 m_assembler
.mov_r(ARMRegisters::S0
, reg1
);
385 m_assembler
.mov_r(reg1
, reg2
);
386 m_assembler
.mov_r(reg2
, ARMRegisters::S0
);
389 void signExtend32ToPtr(RegisterID src
, RegisterID dest
)
395 void zeroExtend32ToPtr(RegisterID src
, RegisterID dest
)
401 Jump
branch8(RelationalCondition cond
, Address left
, TrustedImm32 right
)
403 load8(left
, ARMRegisters::S1
);
404 return branch32(cond
, ARMRegisters::S1
, right
);
407 Jump
branch32(RelationalCondition cond
, RegisterID left
, RegisterID right
, int useConstantPool
= 0)
409 m_assembler
.cmp_r(left
, right
);
410 return Jump(m_assembler
.jmp(ARMCondition(cond
), useConstantPool
));
413 Jump
branch32(RelationalCondition cond
, RegisterID left
, TrustedImm32 right
, int useConstantPool
= 0)
415 if (right
.m_isPointer
) {
416 m_assembler
.ldr_un_imm(ARMRegisters::S0
, right
.m_value
);
417 m_assembler
.cmp_r(left
, ARMRegisters::S0
);
419 ARMWord tmp
= m_assembler
.getOp2(-right
.m_value
);
420 if (tmp
!= ARMAssembler::INVALID_IMM
)
421 m_assembler
.cmn_r(left
, tmp
);
423 m_assembler
.cmp_r(left
, m_assembler
.getImm(right
.m_value
, ARMRegisters::S0
));
425 return Jump(m_assembler
.jmp(ARMCondition(cond
), useConstantPool
));
428 Jump
branch32(RelationalCondition cond
, RegisterID left
, Address right
)
430 load32(right
, ARMRegisters::S1
);
431 return branch32(cond
, left
, ARMRegisters::S1
);
434 Jump
branch32(RelationalCondition cond
, Address left
, RegisterID right
)
436 load32(left
, ARMRegisters::S1
);
437 return branch32(cond
, ARMRegisters::S1
, right
);
440 Jump
branch32(RelationalCondition cond
, Address left
, TrustedImm32 right
)
442 load32(left
, ARMRegisters::S1
);
443 return branch32(cond
, ARMRegisters::S1
, right
);
446 Jump
branch32(RelationalCondition cond
, BaseIndex left
, TrustedImm32 right
)
448 load32(left
, ARMRegisters::S1
);
449 return branch32(cond
, ARMRegisters::S1
, right
);
452 Jump
branch32WithUnalignedHalfWords(RelationalCondition cond
, BaseIndex left
, TrustedImm32 right
)
454 load32WithUnalignedHalfWords(left
, ARMRegisters::S1
);
455 return branch32(cond
, ARMRegisters::S1
, right
);
458 Jump
branch16(RelationalCondition cond
, BaseIndex left
, RegisterID right
)
463 ASSERT_NOT_REACHED();
467 Jump
branch16(RelationalCondition cond
, BaseIndex left
, TrustedImm32 right
)
469 load16(left
, ARMRegisters::S0
);
470 move(right
, ARMRegisters::S1
);
471 m_assembler
.cmp_r(ARMRegisters::S0
, ARMRegisters::S1
);
472 return m_assembler
.jmp(ARMCondition(cond
));
475 Jump
branchTest8(ResultCondition cond
, Address address
, TrustedImm32 mask
= TrustedImm32(-1))
477 load8(address
, ARMRegisters::S1
);
478 return branchTest32(cond
, ARMRegisters::S1
, mask
);
481 Jump
branchTest32(ResultCondition cond
, RegisterID reg
, RegisterID mask
)
483 ASSERT((cond
== Zero
) || (cond
== NonZero
));
484 m_assembler
.tst_r(reg
, mask
);
485 return Jump(m_assembler
.jmp(ARMCondition(cond
)));
488 Jump
branchTest32(ResultCondition cond
, RegisterID reg
, TrustedImm32 mask
= TrustedImm32(-1))
490 ASSERT((cond
== Zero
) || (cond
== NonZero
));
491 ARMWord w
= m_assembler
.getImm(mask
.m_value
, ARMRegisters::S0
, true);
492 if (w
& ARMAssembler::OP2_INV_IMM
)
493 m_assembler
.bics_r(ARMRegisters::S0
, reg
, w
& ~ARMAssembler::OP2_INV_IMM
);
495 m_assembler
.tst_r(reg
, w
);
496 return Jump(m_assembler
.jmp(ARMCondition(cond
)));
499 Jump
branchTest32(ResultCondition cond
, Address address
, TrustedImm32 mask
= TrustedImm32(-1))
501 load32(address
, ARMRegisters::S1
);
502 return branchTest32(cond
, ARMRegisters::S1
, mask
);
505 Jump
branchTest32(ResultCondition cond
, BaseIndex address
, TrustedImm32 mask
= TrustedImm32(-1))
507 load32(address
, ARMRegisters::S1
);
508 return branchTest32(cond
, ARMRegisters::S1
, mask
);
513 return Jump(m_assembler
.jmp());
516 void jump(RegisterID target
)
518 m_assembler
.bx(target
);
521 void jump(Address address
)
523 load32(address
, ARMRegisters::pc
);
526 Jump
branchAdd32(ResultCondition cond
, RegisterID src
, RegisterID dest
)
528 ASSERT((cond
== Overflow
) || (cond
== Signed
) || (cond
== Zero
) || (cond
== NonZero
));
530 return Jump(m_assembler
.jmp(ARMCondition(cond
)));
533 Jump
branchAdd32(ResultCondition cond
, TrustedImm32 imm
, RegisterID dest
)
535 ASSERT((cond
== Overflow
) || (cond
== Signed
) || (cond
== Zero
) || (cond
== NonZero
));
537 return Jump(m_assembler
.jmp(ARMCondition(cond
)));
540 void mull32(RegisterID src1
, RegisterID src2
, RegisterID dest
)
543 move(src1
, ARMRegisters::S0
);
544 src1
= ARMRegisters::S0
;
546 m_assembler
.mull_r(ARMRegisters::S1
, dest
, src2
, src1
);
547 m_assembler
.cmp_r(ARMRegisters::S1
, m_assembler
.asr(dest
, 31));
550 Jump
branchMul32(ResultCondition cond
, RegisterID src
, RegisterID dest
)
552 ASSERT((cond
== Overflow
) || (cond
== Signed
) || (cond
== Zero
) || (cond
== NonZero
));
553 if (cond
== Overflow
) {
554 mull32(src
, dest
, dest
);
559 return Jump(m_assembler
.jmp(ARMCondition(cond
)));
562 Jump
branchMul32(ResultCondition cond
, TrustedImm32 imm
, RegisterID src
, RegisterID dest
)
564 ASSERT((cond
== Overflow
) || (cond
== Signed
) || (cond
== Zero
) || (cond
== NonZero
));
565 if (cond
== Overflow
) {
566 move(imm
, ARMRegisters::S0
);
567 mull32(ARMRegisters::S0
, src
, dest
);
571 mul32(imm
, src
, dest
);
572 return Jump(m_assembler
.jmp(ARMCondition(cond
)));
575 Jump
branchSub32(ResultCondition cond
, RegisterID src
, RegisterID dest
)
577 ASSERT((cond
== Overflow
) || (cond
== Signed
) || (cond
== Zero
) || (cond
== NonZero
));
579 return Jump(m_assembler
.jmp(ARMCondition(cond
)));
582 Jump
branchSub32(ResultCondition cond
, TrustedImm32 imm
, RegisterID dest
)
584 ASSERT((cond
== Overflow
) || (cond
== Signed
) || (cond
== Zero
) || (cond
== NonZero
));
586 return Jump(m_assembler
.jmp(ARMCondition(cond
)));
589 Jump
branchNeg32(ResultCondition cond
, RegisterID srcDest
)
591 ASSERT((cond
== Overflow
) || (cond
== Signed
) || (cond
== Zero
) || (cond
== NonZero
));
593 return Jump(m_assembler
.jmp(ARMCondition(cond
)));
596 Jump
branchOr32(ResultCondition cond
, RegisterID src
, RegisterID dest
)
598 ASSERT((cond
== Signed
) || (cond
== Zero
) || (cond
== NonZero
));
600 return Jump(m_assembler
.jmp(ARMCondition(cond
)));
610 #if WTF_ARM_ARCH_AT_LEAST(5)
611 ensureSpace(2 * sizeof(ARMWord
), sizeof(ARMWord
));
612 m_assembler
.loadBranchTarget(ARMRegisters::S1
, ARMAssembler::AL
, true);
613 return Call(m_assembler
.blx(ARMRegisters::S1
), Call::LinkableNear
);
616 return Call(m_assembler
.jmp(ARMAssembler::AL
, true), Call::LinkableNear
);
620 Call
call(RegisterID target
)
622 return Call(m_assembler
.blx(target
), Call::None
);
625 void call(Address address
)
627 call32(address
.base
, address
.offset
);
632 m_assembler
.bx(linkRegister
);
635 void compare32(RelationalCondition cond
, RegisterID left
, RegisterID right
, RegisterID dest
)
637 m_assembler
.cmp_r(left
, right
);
638 m_assembler
.mov_r(dest
, ARMAssembler::getOp2(0));
639 m_assembler
.mov_r(dest
, ARMAssembler::getOp2(1), ARMCondition(cond
));
642 void compare32(RelationalCondition cond
, RegisterID left
, TrustedImm32 right
, RegisterID dest
)
644 m_assembler
.cmp_r(left
, m_assembler
.getImm(right
.m_value
, ARMRegisters::S0
));
645 m_assembler
.mov_r(dest
, ARMAssembler::getOp2(0));
646 m_assembler
.mov_r(dest
, ARMAssembler::getOp2(1), ARMCondition(cond
));
649 void test32(ResultCondition cond
, RegisterID reg
, TrustedImm32 mask
, RegisterID dest
)
651 if (mask
.m_value
== -1)
652 m_assembler
.cmp_r(0, reg
);
654 m_assembler
.tst_r(reg
, m_assembler
.getImm(mask
.m_value
, ARMRegisters::S0
));
655 m_assembler
.mov_r(dest
, ARMAssembler::getOp2(0));
656 m_assembler
.mov_r(dest
, ARMAssembler::getOp2(1), ARMCondition(cond
));
659 void test32(ResultCondition cond
, Address address
, TrustedImm32 mask
, RegisterID dest
)
661 load32(address
, ARMRegisters::S1
);
662 test32(cond
, ARMRegisters::S1
, mask
, dest
);
665 void test8(ResultCondition cond
, Address address
, TrustedImm32 mask
, RegisterID dest
)
667 load8(address
, ARMRegisters::S1
);
668 test32(cond
, ARMRegisters::S1
, mask
, dest
);
671 void add32(TrustedImm32 imm
, RegisterID src
, RegisterID dest
)
673 m_assembler
.add_r(dest
, src
, m_assembler
.getImm(imm
.m_value
, ARMRegisters::S0
));
676 void add32(TrustedImm32 imm
, AbsoluteAddress address
)
678 m_assembler
.ldr_un_imm(ARMRegisters::S1
, reinterpret_cast<ARMWord
>(address
.m_ptr
));
679 m_assembler
.dtr_u(true, ARMRegisters::S1
, ARMRegisters::S1
, 0);
680 add32(imm
, ARMRegisters::S1
);
681 m_assembler
.ldr_un_imm(ARMRegisters::S0
, reinterpret_cast<ARMWord
>(address
.m_ptr
));
682 m_assembler
.dtr_u(false, ARMRegisters::S1
, ARMRegisters::S0
, 0);
685 void sub32(TrustedImm32 imm
, AbsoluteAddress address
)
687 m_assembler
.ldr_un_imm(ARMRegisters::S1
, reinterpret_cast<ARMWord
>(address
.m_ptr
));
688 m_assembler
.dtr_u(true, ARMRegisters::S1
, ARMRegisters::S1
, 0);
689 sub32(imm
, ARMRegisters::S1
);
690 m_assembler
.ldr_un_imm(ARMRegisters::S0
, reinterpret_cast<ARMWord
>(address
.m_ptr
));
691 m_assembler
.dtr_u(false, ARMRegisters::S1
, ARMRegisters::S0
, 0);
694 void load32(const void* address
, RegisterID dest
)
696 m_assembler
.ldr_un_imm(ARMRegisters::S0
, reinterpret_cast<ARMWord
>(address
));
697 m_assembler
.dtr_u(true, dest
, ARMRegisters::S0
, 0);
700 Jump
branch32(RelationalCondition cond
, AbsoluteAddress left
, RegisterID right
)
702 load32(left
.m_ptr
, ARMRegisters::S1
);
703 return branch32(cond
, ARMRegisters::S1
, right
);
706 Jump
branch32(RelationalCondition cond
, AbsoluteAddress left
, TrustedImm32 right
)
708 load32(left
.m_ptr
, ARMRegisters::S1
);
709 return branch32(cond
, ARMRegisters::S1
, right
);
712 void relativeTableJump(RegisterID index
, int scale
)
714 ASSERT(scale
>= 0 && scale
<= 31);
715 m_assembler
.add_r(ARMRegisters::pc
, ARMRegisters::pc
, m_assembler
.lsl(index
, scale
));
717 // NOP the default prefetching
718 m_assembler
.mov_r(ARMRegisters::r0
, ARMRegisters::r0
);
723 #if WTF_ARM_ARCH_AT_LEAST(5)
724 ensureSpace(2 * sizeof(ARMWord
), sizeof(ARMWord
));
725 m_assembler
.loadBranchTarget(ARMRegisters::S1
, ARMAssembler::AL
, true);
726 return Call(m_assembler
.blx(ARMRegisters::S1
), Call::Linkable
);
729 return Call(m_assembler
.jmp(ARMAssembler::AL
, true), Call::Linkable
);
733 Call
tailRecursiveCall()
735 return Call::fromTailJump(jump());
738 Call
makeTailRecursiveCall(Jump oldJump
)
740 return Call::fromTailJump(oldJump
);
743 DataLabelPtr
moveWithPatch(TrustedImmPtr initialValue
, RegisterID dest
)
745 DataLabelPtr
dataLabel(this);
746 m_assembler
.ldr_un_imm(dest
, reinterpret_cast<ARMWord
>(initialValue
.m_value
));
750 Jump
branchPtrWithPatch(RelationalCondition cond
, RegisterID left
, DataLabelPtr
& dataLabel
, TrustedImmPtr initialRightValue
= TrustedImmPtr(0))
752 dataLabel
= moveWithPatch(initialRightValue
, ARMRegisters::S1
);
753 Jump jump
= branch32(cond
, left
, ARMRegisters::S1
, true);
757 Jump
branchPtrWithPatch(RelationalCondition cond
, Address left
, DataLabelPtr
& dataLabel
, TrustedImmPtr initialRightValue
= TrustedImmPtr(0))
759 load32(left
, ARMRegisters::S1
);
760 dataLabel
= moveWithPatch(initialRightValue
, ARMRegisters::S0
);
761 Jump jump
= branch32(cond
, ARMRegisters::S0
, ARMRegisters::S1
, true);
765 DataLabelPtr
storePtrWithPatch(TrustedImmPtr initialValue
, ImplicitAddress address
)
767 DataLabelPtr dataLabel
= moveWithPatch(initialValue
, ARMRegisters::S1
);
768 store32(ARMRegisters::S1
, address
);
772 DataLabelPtr
storePtrWithPatch(ImplicitAddress address
)
774 return storePtrWithPatch(TrustedImmPtr(0), address
);
777 // Floating point operators
778 bool supportsFloatingPoint() const
780 return s_isVFPPresent
;
783 bool supportsFloatingPointTruncate() const
785 return s_isVFPPresent
;
788 bool supportsFloatingPointSqrt() const
790 return s_isVFPPresent
;
793 void loadDouble(ImplicitAddress address
, FPRegisterID dest
)
795 m_assembler
.doubleTransfer(true, dest
, address
.base
, address
.offset
);
798 void loadDouble(const void* address
, FPRegisterID dest
)
800 m_assembler
.ldr_un_imm(ARMRegisters::S0
, (ARMWord
)address
);
801 m_assembler
.fdtr_u(true, dest
, ARMRegisters::S0
, 0);
804 void storeDouble(FPRegisterID src
, ImplicitAddress address
)
806 m_assembler
.doubleTransfer(false, src
, address
.base
, address
.offset
);
809 void addDouble(FPRegisterID src
, FPRegisterID dest
)
811 m_assembler
.vadd_f64_r(dest
, dest
, src
);
814 void addDouble(Address src
, FPRegisterID dest
)
816 loadDouble(src
, ARMRegisters::SD0
);
817 addDouble(ARMRegisters::SD0
, dest
);
820 void divDouble(FPRegisterID src
, FPRegisterID dest
)
822 m_assembler
.vdiv_f64_r(dest
, dest
, src
);
825 void divDouble(Address src
, FPRegisterID dest
)
827 ASSERT_NOT_REACHED(); // Untested
828 loadDouble(src
, ARMRegisters::SD0
);
829 divDouble(ARMRegisters::SD0
, dest
);
832 void subDouble(FPRegisterID src
, FPRegisterID dest
)
834 m_assembler
.vsub_f64_r(dest
, dest
, src
);
837 void subDouble(Address src
, FPRegisterID dest
)
839 loadDouble(src
, ARMRegisters::SD0
);
840 subDouble(ARMRegisters::SD0
, dest
);
843 void mulDouble(FPRegisterID src
, FPRegisterID dest
)
845 m_assembler
.vmul_f64_r(dest
, dest
, src
);
848 void mulDouble(Address src
, FPRegisterID dest
)
850 loadDouble(src
, ARMRegisters::SD0
);
851 mulDouble(ARMRegisters::SD0
, dest
);
854 void sqrtDouble(FPRegisterID src
, FPRegisterID dest
)
856 m_assembler
.vsqrt_f64_r(dest
, src
);
859 void convertInt32ToDouble(RegisterID src
, FPRegisterID dest
)
861 m_assembler
.vmov_vfp_r(dest
<< 1, src
);
862 m_assembler
.vcvt_f64_s32_r(dest
, dest
<< 1);
865 void convertInt32ToDouble(Address src
, FPRegisterID dest
)
867 ASSERT_NOT_REACHED(); // Untested
868 // flds does not worth the effort here
869 load32(src
, ARMRegisters::S1
);
870 convertInt32ToDouble(ARMRegisters::S1
, dest
);
873 void convertInt32ToDouble(AbsoluteAddress src
, FPRegisterID dest
)
875 ASSERT_NOT_REACHED(); // Untested
876 // flds does not worth the effort here
877 m_assembler
.ldr_un_imm(ARMRegisters::S1
, (ARMWord
)src
.m_ptr
);
878 m_assembler
.dtr_u(true, ARMRegisters::S1
, ARMRegisters::S1
, 0);
879 convertInt32ToDouble(ARMRegisters::S1
, dest
);
882 Jump
branchDouble(DoubleCondition cond
, FPRegisterID left
, FPRegisterID right
)
884 m_assembler
.vcmp_f64_r(left
, right
);
885 m_assembler
.vmrs_apsr();
886 if (cond
& DoubleConditionBitSpecial
)
887 m_assembler
.cmp_r(ARMRegisters::S0
, ARMRegisters::S0
, ARMAssembler::VS
);
888 return Jump(m_assembler
.jmp(static_cast<ARMAssembler::Condition
>(cond
& ~DoubleConditionMask
)));
891 // Truncates 'src' to an integer, and places the resulting 'dest'.
892 // If the result is not representable as a 32 bit value, branch.
893 // May also branch for some values that are representable in 32 bits
894 // (specifically, in this case, INT_MIN and INT_MAX).
895 Jump
branchTruncateDoubleToInt32(FPRegisterID src
, RegisterID dest
)
897 m_assembler
.vcvtr_s32_f64_r(ARMRegisters::SD0
<< 1, src
);
898 // If VCVTR.S32.F64 can't fit the result into a 32-bit
899 // integer, it saturates at INT_MAX or INT_MIN. Testing this is
900 // probably quicker than testing FPSCR for exception.
901 m_assembler
.vmov_arm_r(dest
, ARMRegisters::SD0
<< 1);
902 m_assembler
.sub_r(ARMRegisters::S0
, dest
, ARMAssembler::getOp2(0x80000000));
903 m_assembler
.cmn_r(ARMRegisters::S0
, ARMAssembler::getOp2(1), ARMCondition(NotEqual
));
904 return Jump(m_assembler
.jmp(ARMCondition(Equal
)));
907 // Convert 'src' to an integer, and places the resulting 'dest'.
908 // If the result is not representable as a 32 bit value, branch.
909 // May also branch for some values that are representable in 32 bits
910 // (specifically, in this case, 0).
911 void branchConvertDoubleToInt32(FPRegisterID src
, RegisterID dest
, JumpList
& failureCases
, FPRegisterID fpTemp
)
913 m_assembler
.vcvt_s32_f64_r(ARMRegisters::SD0
<< 1, src
);
914 m_assembler
.vmov_arm_r(dest
, ARMRegisters::SD0
<< 1);
916 // Convert the integer result back to float & compare to the original value - if not equal or unordered (NaN) then jump.
917 m_assembler
.vcvt_f64_s32_r(ARMRegisters::SD0
, ARMRegisters::SD0
<< 1);
918 failureCases
.append(branchDouble(DoubleNotEqualOrUnordered
, src
, ARMRegisters::SD0
));
920 // If the result is zero, it might have been -0.0, and 0.0 equals to -0.0
921 failureCases
.append(branchTest32(Zero
, dest
));
924 Jump
branchDoubleNonZero(FPRegisterID reg
, FPRegisterID scratch
)
926 m_assembler
.mov_r(ARMRegisters::S0
, ARMAssembler::getOp2(0));
927 convertInt32ToDouble(ARMRegisters::S0
, scratch
);
928 return branchDouble(DoubleNotEqual
, reg
, scratch
);
931 Jump
branchDoubleZeroOrNaN(FPRegisterID reg
, FPRegisterID scratch
)
933 m_assembler
.mov_r(ARMRegisters::S0
, ARMAssembler::getOp2(0));
934 convertInt32ToDouble(ARMRegisters::S0
, scratch
);
935 return branchDouble(DoubleEqualOrUnordered
, reg
, scratch
);
944 ARMAssembler::Condition
ARMCondition(RelationalCondition cond
)
946 return static_cast<ARMAssembler::Condition
>(cond
);
949 ARMAssembler::Condition
ARMCondition(ResultCondition cond
)
951 return static_cast<ARMAssembler::Condition
>(cond
);
954 void ensureSpace(int insnSpace
, int constSpace
)
956 m_assembler
.ensureSpace(insnSpace
, constSpace
);
959 int sizeOfConstantPool()
961 return m_assembler
.sizeOfConstantPool();
966 #if WTF_ARM_ARCH_VERSION < 5
967 ensureSpace(2 * sizeof(ARMWord
), sizeof(ARMWord
));
969 m_assembler
.mov_r(linkRegister
, ARMRegisters::pc
);
973 void call32(RegisterID base
, int32_t offset
)
975 #if WTF_ARM_ARCH_AT_LEAST(5)
976 int targetReg
= ARMRegisters::S1
;
978 int targetReg
= ARMRegisters::pc
;
980 int tmpReg
= ARMRegisters::S1
;
982 if (base
== ARMRegisters::sp
)
986 if (offset
<= 0xfff) {
988 m_assembler
.dtr_u(true, targetReg
, base
, offset
);
989 } else if (offset
<= 0xfffff) {
990 m_assembler
.add_r(tmpReg
, base
, ARMAssembler::OP2_IMM
| (offset
>> 12) | (10 << 8));
992 m_assembler
.dtr_u(true, targetReg
, tmpReg
, offset
& 0xfff);
994 m_assembler
.moveImm(offset
, tmpReg
);
996 m_assembler
.dtr_ur(true, targetReg
, base
, tmpReg
);
1000 if (offset
<= 0xfff) {
1002 m_assembler
.dtr_d(true, targetReg
, base
, offset
);
1003 } else if (offset
<= 0xfffff) {
1004 m_assembler
.sub_r(tmpReg
, base
, ARMAssembler::OP2_IMM
| (offset
>> 12) | (10 << 8));
1006 m_assembler
.dtr_d(true, targetReg
, tmpReg
, offset
& 0xfff);
1008 m_assembler
.moveImm(offset
, tmpReg
);
1010 m_assembler
.dtr_dr(true, targetReg
, base
, tmpReg
);
1013 #if WTF_ARM_ARCH_AT_LEAST(5)
1014 m_assembler
.blx(targetReg
);
1019 friend class LinkBuffer
;
1020 friend class RepatchBuffer
;
1022 static void linkCall(void* code
, Call call
, FunctionPtr function
)
1024 ARMAssembler::linkCall(code
, call
.m_jmp
, function
.value());
1027 static void repatchCall(CodeLocationCall call
, CodeLocationLabel destination
)
1029 ARMAssembler::relinkCall(call
.dataLocation(), destination
.executableAddress());
1032 static void repatchCall(CodeLocationCall call
, FunctionPtr destination
)
1034 ARMAssembler::relinkCall(call
.dataLocation(), destination
.executableAddress());
1037 static const bool s_isVFPPresent
;
1042 #endif // ENABLE(ASSEMBLER) && CPU(ARM_TRADITIONAL)
1044 #endif // MacroAssemblerARM_h