2 * Copyright (C) 2008 Apple Inc.
3 * Copyright (C) 2009, 2010 University of Szeged
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
16 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
18 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
19 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
20 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
21 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
22 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
23 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
25 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 #ifndef MacroAssemblerARM_h
29 #define MacroAssemblerARM_h
31 #if ENABLE(ASSEMBLER) && CPU(ARM_TRADITIONAL)
33 #include "ARMAssembler.h"
34 #include "AbstractMacroAssembler.h"
38 class MacroAssemblerARM
: public AbstractMacroAssembler
<ARMAssembler
> {
39 static const int DoubleConditionMask
= 0x0f;
40 static const int DoubleConditionBitSpecial
= 0x10;
41 COMPILE_ASSERT(!(DoubleConditionBitSpecial
& DoubleConditionMask
), DoubleConditionBitSpecial_should_not_interfere_with_ARMAssembler_Condition_codes
);
43 typedef ARMRegisters::FPRegisterID FPRegisterID
;
44 static const int MaximumCompactPtrAlignedAddressOffset
= 0x7FFFFFFF;
46 enum RelationalCondition
{
47 Equal
= ARMAssembler::EQ
,
48 NotEqual
= ARMAssembler::NE
,
49 Above
= ARMAssembler::HI
,
50 AboveOrEqual
= ARMAssembler::CS
,
51 Below
= ARMAssembler::CC
,
52 BelowOrEqual
= ARMAssembler::LS
,
53 GreaterThan
= ARMAssembler::GT
,
54 GreaterThanOrEqual
= ARMAssembler::GE
,
55 LessThan
= ARMAssembler::LT
,
56 LessThanOrEqual
= ARMAssembler::LE
59 enum ResultCondition
{
60 Overflow
= ARMAssembler::VS
,
61 Signed
= ARMAssembler::MI
,
62 Zero
= ARMAssembler::EQ
,
63 NonZero
= ARMAssembler::NE
66 enum DoubleCondition
{
67 // These conditions will only evaluate to true if the comparison is ordered - i.e. neither operand is NaN.
68 DoubleEqual
= ARMAssembler::EQ
,
69 DoubleNotEqual
= ARMAssembler::NE
| DoubleConditionBitSpecial
,
70 DoubleGreaterThan
= ARMAssembler::GT
,
71 DoubleGreaterThanOrEqual
= ARMAssembler::GE
,
72 DoubleLessThan
= ARMAssembler::CC
,
73 DoubleLessThanOrEqual
= ARMAssembler::LS
,
74 // If either operand is NaN, these conditions always evaluate to true.
75 DoubleEqualOrUnordered
= ARMAssembler::EQ
| DoubleConditionBitSpecial
,
76 DoubleNotEqualOrUnordered
= ARMAssembler::NE
,
77 DoubleGreaterThanOrUnordered
= ARMAssembler::HI
,
78 DoubleGreaterThanOrEqualOrUnordered
= ARMAssembler::CS
,
79 DoubleLessThanOrUnordered
= ARMAssembler::LT
,
80 DoubleLessThanOrEqualOrUnordered
= ARMAssembler::LE
,
83 static const RegisterID stackPointerRegister
= ARMRegisters::sp
;
84 static const RegisterID linkRegister
= ARMRegisters::lr
;
86 static const Scale ScalePtr
= TimesFour
;
88 void add32(RegisterID src
, RegisterID dest
)
90 m_assembler
.adds_r(dest
, dest
, src
);
93 void add32(TrustedImm32 imm
, Address address
)
95 load32(address
, ARMRegisters::S1
);
96 add32(imm
, ARMRegisters::S1
);
97 store32(ARMRegisters::S1
, address
);
100 void add32(TrustedImm32 imm
, RegisterID dest
)
102 m_assembler
.adds_r(dest
, dest
, m_assembler
.getImm(imm
.m_value
, ARMRegisters::S0
));
105 void add32(Address src
, RegisterID dest
)
107 load32(src
, ARMRegisters::S1
);
108 add32(ARMRegisters::S1
, dest
);
111 void add32(RegisterID src
, TrustedImm32 imm
, RegisterID dest
)
113 m_assembler
.adds_r(dest
, src
, m_assembler
.getImm(imm
.m_value
, ARMRegisters::S0
));
116 void and32(RegisterID src
, RegisterID dest
)
118 m_assembler
.ands_r(dest
, dest
, src
);
121 void and32(TrustedImm32 imm
, RegisterID dest
)
123 ARMWord w
= m_assembler
.getImm(imm
.m_value
, ARMRegisters::S0
, true);
124 if (w
& ARMAssembler::OP2_INV_IMM
)
125 m_assembler
.bics_r(dest
, dest
, w
& ~ARMAssembler::OP2_INV_IMM
);
127 m_assembler
.ands_r(dest
, dest
, w
);
130 void and32(TrustedImm32 imm
, RegisterID src
, RegisterID dest
)
132 ARMWord w
= m_assembler
.getImm(imm
.m_value
, ARMRegisters::S0
, true);
133 if (w
& ARMAssembler::OP2_INV_IMM
)
134 m_assembler
.bics_r(dest
, src
, w
& ~ARMAssembler::OP2_INV_IMM
);
136 m_assembler
.ands_r(dest
, src
, w
);
139 void lshift32(RegisterID shift_amount
, RegisterID dest
)
141 ARMWord w
= ARMAssembler::getOp2(0x1f);
142 ASSERT(w
!= ARMAssembler::INVALID_IMM
);
143 m_assembler
.and_r(ARMRegisters::S0
, shift_amount
, w
);
145 m_assembler
.movs_r(dest
, m_assembler
.lsl_r(dest
, ARMRegisters::S0
));
148 void lshift32(TrustedImm32 imm
, RegisterID dest
)
150 m_assembler
.movs_r(dest
, m_assembler
.lsl(dest
, imm
.m_value
& 0x1f));
153 void lshift32(RegisterID src
, TrustedImm32 imm
, RegisterID dest
)
155 m_assembler
.movs_r(dest
, m_assembler
.lsl(src
, imm
.m_value
& 0x1f));
158 void mul32(RegisterID src
, RegisterID dest
)
161 move(src
, ARMRegisters::S0
);
162 src
= ARMRegisters::S0
;
164 m_assembler
.muls_r(dest
, dest
, src
);
167 void mul32(TrustedImm32 imm
, RegisterID src
, RegisterID dest
)
169 move(imm
, ARMRegisters::S0
);
170 m_assembler
.muls_r(dest
, src
, ARMRegisters::S0
);
173 void neg32(RegisterID srcDest
)
175 m_assembler
.rsbs_r(srcDest
, srcDest
, ARMAssembler::getOp2(0));
178 void or32(RegisterID src
, RegisterID dest
)
180 m_assembler
.orrs_r(dest
, dest
, src
);
183 void or32(TrustedImm32 imm
, RegisterID dest
)
185 m_assembler
.orrs_r(dest
, dest
, m_assembler
.getImm(imm
.m_value
, ARMRegisters::S0
));
188 void or32(TrustedImm32 imm
, RegisterID src
, RegisterID dest
)
190 m_assembler
.orrs_r(dest
, src
, m_assembler
.getImm(imm
.m_value
, ARMRegisters::S0
));
193 void or32(RegisterID op1
, RegisterID op2
, RegisterID dest
)
195 m_assembler
.orrs_r(dest
, op1
, op2
);
198 void rshift32(RegisterID shift_amount
, RegisterID dest
)
200 ARMWord w
= ARMAssembler::getOp2(0x1f);
201 ASSERT(w
!= ARMAssembler::INVALID_IMM
);
202 m_assembler
.and_r(ARMRegisters::S0
, shift_amount
, w
);
204 m_assembler
.movs_r(dest
, m_assembler
.asr_r(dest
, ARMRegisters::S0
));
207 void rshift32(TrustedImm32 imm
, RegisterID dest
)
209 rshift32(dest
, imm
, dest
);
212 void rshift32(RegisterID src
, TrustedImm32 imm
, RegisterID dest
)
214 m_assembler
.movs_r(dest
, m_assembler
.asr(src
, imm
.m_value
& 0x1f));
217 void urshift32(RegisterID shift_amount
, RegisterID dest
)
219 ARMWord w
= ARMAssembler::getOp2(0x1f);
220 ASSERT(w
!= ARMAssembler::INVALID_IMM
);
221 m_assembler
.and_r(ARMRegisters::S0
, shift_amount
, w
);
223 m_assembler
.movs_r(dest
, m_assembler
.lsr_r(dest
, ARMRegisters::S0
));
226 void urshift32(TrustedImm32 imm
, RegisterID dest
)
228 m_assembler
.movs_r(dest
, m_assembler
.lsr(dest
, imm
.m_value
& 0x1f));
231 void urshift32(RegisterID src
, TrustedImm32 imm
, RegisterID dest
)
233 m_assembler
.movs_r(dest
, m_assembler
.lsr(src
, imm
.m_value
& 0x1f));
236 void sub32(RegisterID src
, RegisterID dest
)
238 m_assembler
.subs_r(dest
, dest
, src
);
241 void sub32(TrustedImm32 imm
, RegisterID dest
)
243 m_assembler
.subs_r(dest
, dest
, m_assembler
.getImm(imm
.m_value
, ARMRegisters::S0
));
246 void sub32(TrustedImm32 imm
, Address address
)
248 load32(address
, ARMRegisters::S1
);
249 sub32(imm
, ARMRegisters::S1
);
250 store32(ARMRegisters::S1
, address
);
253 void sub32(Address src
, RegisterID dest
)
255 load32(src
, ARMRegisters::S1
);
256 sub32(ARMRegisters::S1
, dest
);
259 void sub32(RegisterID src
, TrustedImm32 imm
, RegisterID dest
)
261 m_assembler
.subs_r(dest
, src
, m_assembler
.getImm(imm
.m_value
, ARMRegisters::S0
));
264 void xor32(RegisterID src
, RegisterID dest
)
266 m_assembler
.eors_r(dest
, dest
, src
);
269 void xor32(TrustedImm32 imm
, RegisterID dest
)
271 if (imm
.m_value
== -1)
272 m_assembler
.mvns_r(dest
, dest
);
274 m_assembler
.eors_r(dest
, dest
, m_assembler
.getImm(imm
.m_value
, ARMRegisters::S0
));
277 void xor32(TrustedImm32 imm
, RegisterID src
, RegisterID dest
)
279 if (imm
.m_value
== -1)
280 m_assembler
.mvns_r(dest
, src
);
282 m_assembler
.eors_r(dest
, src
, m_assembler
.getImm(imm
.m_value
, ARMRegisters::S0
));
285 void countLeadingZeros32(RegisterID src
, RegisterID dest
)
287 #if WTF_ARM_ARCH_AT_LEAST(5)
288 m_assembler
.clz_r(dest
, src
);
292 ASSERT_NOT_REACHED();
296 void load8(ImplicitAddress address
, RegisterID dest
)
298 m_assembler
.dataTransfer32(true, dest
, address
.base
, address
.offset
, true);
301 void load8(BaseIndex address
, RegisterID dest
)
303 m_assembler
.baseIndexTransfer32(true, dest
, address
.base
, address
.index
, static_cast<int>(address
.scale
), address
.offset
, true);
306 void load32(ImplicitAddress address
, RegisterID dest
)
308 m_assembler
.dataTransfer32(true, dest
, address
.base
, address
.offset
);
311 void load32(BaseIndex address
, RegisterID dest
)
313 m_assembler
.baseIndexTransfer32(true, dest
, address
.base
, address
.index
, static_cast<int>(address
.scale
), address
.offset
);
316 #if CPU(ARMV5_OR_LOWER)
317 void load32WithUnalignedHalfWords(BaseIndex address
, RegisterID dest
);
319 void load32WithUnalignedHalfWords(BaseIndex address
, RegisterID dest
)
321 load32(address
, dest
);
325 void load16Unaligned(BaseIndex address
, RegisterID dest
)
327 load16(address
, dest
);
330 DataLabel32
load32WithAddressOffsetPatch(Address address
, RegisterID dest
)
332 DataLabel32
dataLabel(this);
333 m_assembler
.ldr_un_imm(ARMRegisters::S0
, 0);
334 m_assembler
.dtr_ur(true, dest
, address
.base
, ARMRegisters::S0
);
338 DataLabelCompact
load32WithCompactAddressOffsetPatch(Address address
, RegisterID dest
)
340 DataLabelCompact
dataLabel(this);
341 load32WithAddressOffsetPatch(address
, dest
);
345 void load16(BaseIndex address
, RegisterID dest
)
347 m_assembler
.add_r(ARMRegisters::S1
, address
.base
, m_assembler
.lsl(address
.index
, address
.scale
));
348 load16(Address(ARMRegisters::S1
, address
.offset
), dest
);
351 void load16(ImplicitAddress address
, RegisterID dest
)
353 if (address
.offset
>= 0)
354 m_assembler
.ldrh_u(dest
, address
.base
, m_assembler
.getOffsetForHalfwordDataTransfer(address
.offset
, ARMRegisters::S0
));
356 m_assembler
.ldrh_d(dest
, address
.base
, m_assembler
.getOffsetForHalfwordDataTransfer(-address
.offset
, ARMRegisters::S0
));
359 DataLabel32
store32WithAddressOffsetPatch(RegisterID src
, Address address
)
361 DataLabel32
dataLabel(this);
362 m_assembler
.ldr_un_imm(ARMRegisters::S0
, 0);
363 m_assembler
.dtr_ur(false, src
, address
.base
, ARMRegisters::S0
);
367 void store32(RegisterID src
, ImplicitAddress address
)
369 m_assembler
.dataTransfer32(false, src
, address
.base
, address
.offset
);
372 void store32(RegisterID src
, BaseIndex address
)
374 m_assembler
.baseIndexTransfer32(false, src
, address
.base
, address
.index
, static_cast<int>(address
.scale
), address
.offset
);
377 void store32(TrustedImm32 imm
, ImplicitAddress address
)
380 m_assembler
.ldr_un_imm(ARMRegisters::S1
, imm
.m_value
);
382 move(imm
, ARMRegisters::S1
);
383 store32(ARMRegisters::S1
, address
);
386 void store32(RegisterID src
, void* address
)
388 m_assembler
.ldr_un_imm(ARMRegisters::S0
, reinterpret_cast<ARMWord
>(address
));
389 m_assembler
.dtr_u(false, src
, ARMRegisters::S0
, 0);
392 void store32(TrustedImm32 imm
, void* address
)
394 m_assembler
.ldr_un_imm(ARMRegisters::S0
, reinterpret_cast<ARMWord
>(address
));
396 m_assembler
.ldr_un_imm(ARMRegisters::S1
, imm
.m_value
);
398 m_assembler
.moveImm(imm
.m_value
, ARMRegisters::S1
);
399 m_assembler
.dtr_u(false, ARMRegisters::S1
, ARMRegisters::S0
, 0);
402 void pop(RegisterID dest
)
404 m_assembler
.pop_r(dest
);
407 void push(RegisterID src
)
409 m_assembler
.push_r(src
);
412 void push(Address address
)
414 load32(address
, ARMRegisters::S1
);
415 push(ARMRegisters::S1
);
418 void push(TrustedImm32 imm
)
420 move(imm
, ARMRegisters::S0
);
421 push(ARMRegisters::S0
);
424 void move(TrustedImm32 imm
, RegisterID dest
)
427 m_assembler
.ldr_un_imm(dest
, imm
.m_value
);
429 m_assembler
.moveImm(imm
.m_value
, dest
);
432 void move(RegisterID src
, RegisterID dest
)
434 m_assembler
.mov_r(dest
, src
);
437 void move(TrustedImmPtr imm
, RegisterID dest
)
439 move(TrustedImm32(imm
), dest
);
442 void swap(RegisterID reg1
, RegisterID reg2
)
444 m_assembler
.mov_r(ARMRegisters::S0
, reg1
);
445 m_assembler
.mov_r(reg1
, reg2
);
446 m_assembler
.mov_r(reg2
, ARMRegisters::S0
);
449 void signExtend32ToPtr(RegisterID src
, RegisterID dest
)
455 void zeroExtend32ToPtr(RegisterID src
, RegisterID dest
)
461 Jump
branch8(RelationalCondition cond
, Address left
, TrustedImm32 right
)
463 load8(left
, ARMRegisters::S1
);
464 return branch32(cond
, ARMRegisters::S1
, right
);
467 Jump
branch8(RelationalCondition cond
, BaseIndex left
, TrustedImm32 right
)
469 ASSERT(!(right
.m_value
& 0xFFFFFF00));
470 load8(left
, ARMRegisters::S1
);
471 return branch32(cond
, ARMRegisters::S1
, right
);
474 Jump
branch32(RelationalCondition cond
, RegisterID left
, RegisterID right
, int useConstantPool
= 0)
476 m_assembler
.cmp_r(left
, right
);
477 return Jump(m_assembler
.jmp(ARMCondition(cond
), useConstantPool
));
480 Jump
branch32(RelationalCondition cond
, RegisterID left
, TrustedImm32 right
, int useConstantPool
= 0)
482 if (right
.m_isPointer
) {
483 m_assembler
.ldr_un_imm(ARMRegisters::S0
, right
.m_value
);
484 m_assembler
.cmp_r(left
, ARMRegisters::S0
);
486 ARMWord tmp
= (right
.m_value
== 0x80000000) ? ARMAssembler::INVALID_IMM
: m_assembler
.getOp2(-right
.m_value
);
487 if (tmp
!= ARMAssembler::INVALID_IMM
)
488 m_assembler
.cmn_r(left
, tmp
);
490 m_assembler
.cmp_r(left
, m_assembler
.getImm(right
.m_value
, ARMRegisters::S0
));
492 return Jump(m_assembler
.jmp(ARMCondition(cond
), useConstantPool
));
495 Jump
branch32(RelationalCondition cond
, RegisterID left
, Address right
)
497 load32(right
, ARMRegisters::S1
);
498 return branch32(cond
, left
, ARMRegisters::S1
);
501 Jump
branch32(RelationalCondition cond
, Address left
, RegisterID right
)
503 load32(left
, ARMRegisters::S1
);
504 return branch32(cond
, ARMRegisters::S1
, right
);
507 Jump
branch32(RelationalCondition cond
, Address left
, TrustedImm32 right
)
509 load32(left
, ARMRegisters::S1
);
510 return branch32(cond
, ARMRegisters::S1
, right
);
513 Jump
branch32(RelationalCondition cond
, BaseIndex left
, TrustedImm32 right
)
515 load32(left
, ARMRegisters::S1
);
516 return branch32(cond
, ARMRegisters::S1
, right
);
519 Jump
branch32WithUnalignedHalfWords(RelationalCondition cond
, BaseIndex left
, TrustedImm32 right
)
521 load32WithUnalignedHalfWords(left
, ARMRegisters::S1
);
522 return branch32(cond
, ARMRegisters::S1
, right
);
525 Jump
branchTest8(ResultCondition cond
, Address address
, TrustedImm32 mask
= TrustedImm32(-1))
527 load8(address
, ARMRegisters::S1
);
528 return branchTest32(cond
, ARMRegisters::S1
, mask
);
531 Jump
branchTest32(ResultCondition cond
, RegisterID reg
, RegisterID mask
)
533 ASSERT((cond
== Zero
) || (cond
== NonZero
));
534 m_assembler
.tst_r(reg
, mask
);
535 return Jump(m_assembler
.jmp(ARMCondition(cond
)));
538 Jump
branchTest32(ResultCondition cond
, RegisterID reg
, TrustedImm32 mask
= TrustedImm32(-1))
540 ASSERT((cond
== Zero
) || (cond
== NonZero
));
541 ARMWord w
= m_assembler
.getImm(mask
.m_value
, ARMRegisters::S0
, true);
542 if (w
& ARMAssembler::OP2_INV_IMM
)
543 m_assembler
.bics_r(ARMRegisters::S0
, reg
, w
& ~ARMAssembler::OP2_INV_IMM
);
545 m_assembler
.tst_r(reg
, w
);
546 return Jump(m_assembler
.jmp(ARMCondition(cond
)));
549 Jump
branchTest32(ResultCondition cond
, Address address
, TrustedImm32 mask
= TrustedImm32(-1))
551 load32(address
, ARMRegisters::S1
);
552 return branchTest32(cond
, ARMRegisters::S1
, mask
);
555 Jump
branchTest32(ResultCondition cond
, BaseIndex address
, TrustedImm32 mask
= TrustedImm32(-1))
557 load32(address
, ARMRegisters::S1
);
558 return branchTest32(cond
, ARMRegisters::S1
, mask
);
563 return Jump(m_assembler
.jmp());
566 void jump(RegisterID target
)
568 m_assembler
.bx(target
);
571 void jump(Address address
)
573 load32(address
, ARMRegisters::pc
);
576 Jump
branchAdd32(ResultCondition cond
, RegisterID src
, RegisterID dest
)
578 ASSERT((cond
== Overflow
) || (cond
== Signed
) || (cond
== Zero
) || (cond
== NonZero
));
580 return Jump(m_assembler
.jmp(ARMCondition(cond
)));
583 Jump
branchAdd32(ResultCondition cond
, TrustedImm32 imm
, RegisterID dest
)
585 ASSERT((cond
== Overflow
) || (cond
== Signed
) || (cond
== Zero
) || (cond
== NonZero
));
587 return Jump(m_assembler
.jmp(ARMCondition(cond
)));
590 Jump
branchAdd32(ResultCondition cond
, RegisterID src
, TrustedImm32 imm
, RegisterID dest
)
592 ASSERT((cond
== Overflow
) || (cond
== Signed
) || (cond
== Zero
) || (cond
== NonZero
));
593 add32(src
, imm
, dest
);
594 return Jump(m_assembler
.jmp(ARMCondition(cond
)));
597 void mull32(RegisterID src1
, RegisterID src2
, RegisterID dest
)
600 move(src1
, ARMRegisters::S0
);
601 src1
= ARMRegisters::S0
;
603 m_assembler
.mull_r(ARMRegisters::S1
, dest
, src2
, src1
);
604 m_assembler
.cmp_r(ARMRegisters::S1
, m_assembler
.asr(dest
, 31));
607 Jump
branchMul32(ResultCondition cond
, RegisterID src
, RegisterID dest
)
609 ASSERT((cond
== Overflow
) || (cond
== Signed
) || (cond
== Zero
) || (cond
== NonZero
));
610 if (cond
== Overflow
) {
611 mull32(src
, dest
, dest
);
616 return Jump(m_assembler
.jmp(ARMCondition(cond
)));
619 Jump
branchMul32(ResultCondition cond
, TrustedImm32 imm
, RegisterID src
, RegisterID dest
)
621 ASSERT((cond
== Overflow
) || (cond
== Signed
) || (cond
== Zero
) || (cond
== NonZero
));
622 if (cond
== Overflow
) {
623 move(imm
, ARMRegisters::S0
);
624 mull32(ARMRegisters::S0
, src
, dest
);
628 mul32(imm
, src
, dest
);
629 return Jump(m_assembler
.jmp(ARMCondition(cond
)));
632 Jump
branchSub32(ResultCondition cond
, RegisterID src
, RegisterID dest
)
634 ASSERT((cond
== Overflow
) || (cond
== Signed
) || (cond
== Zero
) || (cond
== NonZero
));
636 return Jump(m_assembler
.jmp(ARMCondition(cond
)));
639 Jump
branchSub32(ResultCondition cond
, TrustedImm32 imm
, RegisterID dest
)
641 ASSERT((cond
== Overflow
) || (cond
== Signed
) || (cond
== Zero
) || (cond
== NonZero
));
643 return Jump(m_assembler
.jmp(ARMCondition(cond
)));
646 Jump
branchSub32(ResultCondition cond
, RegisterID src
, TrustedImm32 imm
, RegisterID dest
)
648 ASSERT((cond
== Overflow
) || (cond
== Signed
) || (cond
== Zero
) || (cond
== NonZero
));
649 sub32(src
, imm
, dest
);
650 return Jump(m_assembler
.jmp(ARMCondition(cond
)));
653 Jump
branchSub32(ResultCondition cond
, RegisterID op1
, RegisterID op2
, RegisterID dest
)
655 ASSERT((cond
== Overflow
) || (cond
== Signed
) || (cond
== Zero
) || (cond
== NonZero
));
656 m_assembler
.subs_r(dest
, op1
, op2
);
657 return Jump(m_assembler
.jmp(ARMCondition(cond
)));
660 Jump
branchNeg32(ResultCondition cond
, RegisterID srcDest
)
662 ASSERT((cond
== Overflow
) || (cond
== Signed
) || (cond
== Zero
) || (cond
== NonZero
));
664 return Jump(m_assembler
.jmp(ARMCondition(cond
)));
667 Jump
branchOr32(ResultCondition cond
, RegisterID src
, RegisterID dest
)
669 ASSERT((cond
== Signed
) || (cond
== Zero
) || (cond
== NonZero
));
671 return Jump(m_assembler
.jmp(ARMCondition(cond
)));
681 #if WTF_ARM_ARCH_AT_LEAST(5)
682 ensureSpace(2 * sizeof(ARMWord
), sizeof(ARMWord
));
683 m_assembler
.loadBranchTarget(ARMRegisters::S1
, ARMAssembler::AL
, true);
684 return Call(m_assembler
.blx(ARMRegisters::S1
), Call::LinkableNear
);
687 return Call(m_assembler
.jmp(ARMAssembler::AL
, true), Call::LinkableNear
);
691 Call
call(RegisterID target
)
693 return Call(m_assembler
.blx(target
), Call::None
);
696 void call(Address address
)
698 call32(address
.base
, address
.offset
);
703 m_assembler
.bx(linkRegister
);
706 void compare32(RelationalCondition cond
, RegisterID left
, RegisterID right
, RegisterID dest
)
708 m_assembler
.cmp_r(left
, right
);
709 m_assembler
.mov_r(dest
, ARMAssembler::getOp2(0));
710 m_assembler
.mov_r(dest
, ARMAssembler::getOp2(1), ARMCondition(cond
));
713 void compare32(RelationalCondition cond
, RegisterID left
, TrustedImm32 right
, RegisterID dest
)
715 m_assembler
.cmp_r(left
, m_assembler
.getImm(right
.m_value
, ARMRegisters::S0
));
716 m_assembler
.mov_r(dest
, ARMAssembler::getOp2(0));
717 m_assembler
.mov_r(dest
, ARMAssembler::getOp2(1), ARMCondition(cond
));
720 void compare8(RelationalCondition cond
, Address left
, TrustedImm32 right
, RegisterID dest
)
722 load8(left
, ARMRegisters::S1
);
723 compare32(cond
, ARMRegisters::S1
, right
, dest
);
726 void test32(ResultCondition cond
, RegisterID reg
, TrustedImm32 mask
, RegisterID dest
)
728 if (mask
.m_value
== -1)
729 m_assembler
.cmp_r(0, reg
);
731 m_assembler
.tst_r(reg
, m_assembler
.getImm(mask
.m_value
, ARMRegisters::S0
));
732 m_assembler
.mov_r(dest
, ARMAssembler::getOp2(0));
733 m_assembler
.mov_r(dest
, ARMAssembler::getOp2(1), ARMCondition(cond
));
736 void test32(ResultCondition cond
, Address address
, TrustedImm32 mask
, RegisterID dest
)
738 load32(address
, ARMRegisters::S1
);
739 test32(cond
, ARMRegisters::S1
, mask
, dest
);
742 void test8(ResultCondition cond
, Address address
, TrustedImm32 mask
, RegisterID dest
)
744 load8(address
, ARMRegisters::S1
);
745 test32(cond
, ARMRegisters::S1
, mask
, dest
);
748 void add32(TrustedImm32 imm
, RegisterID src
, RegisterID dest
)
750 m_assembler
.add_r(dest
, src
, m_assembler
.getImm(imm
.m_value
, ARMRegisters::S0
));
753 void add32(TrustedImm32 imm
, AbsoluteAddress address
)
755 m_assembler
.ldr_un_imm(ARMRegisters::S1
, reinterpret_cast<ARMWord
>(address
.m_ptr
));
756 m_assembler
.dtr_u(true, ARMRegisters::S1
, ARMRegisters::S1
, 0);
757 add32(imm
, ARMRegisters::S1
);
758 m_assembler
.ldr_un_imm(ARMRegisters::S0
, reinterpret_cast<ARMWord
>(address
.m_ptr
));
759 m_assembler
.dtr_u(false, ARMRegisters::S1
, ARMRegisters::S0
, 0);
762 void sub32(TrustedImm32 imm
, AbsoluteAddress address
)
764 m_assembler
.ldr_un_imm(ARMRegisters::S1
, reinterpret_cast<ARMWord
>(address
.m_ptr
));
765 m_assembler
.dtr_u(true, ARMRegisters::S1
, ARMRegisters::S1
, 0);
766 sub32(imm
, ARMRegisters::S1
);
767 m_assembler
.ldr_un_imm(ARMRegisters::S0
, reinterpret_cast<ARMWord
>(address
.m_ptr
));
768 m_assembler
.dtr_u(false, ARMRegisters::S1
, ARMRegisters::S0
, 0);
771 void load32(const void* address
, RegisterID dest
)
773 m_assembler
.ldr_un_imm(ARMRegisters::S0
, reinterpret_cast<ARMWord
>(address
));
774 m_assembler
.dtr_u(true, dest
, ARMRegisters::S0
, 0);
777 Jump
branch32(RelationalCondition cond
, AbsoluteAddress left
, RegisterID right
)
779 load32(left
.m_ptr
, ARMRegisters::S1
);
780 return branch32(cond
, ARMRegisters::S1
, right
);
783 Jump
branch32(RelationalCondition cond
, AbsoluteAddress left
, TrustedImm32 right
)
785 load32(left
.m_ptr
, ARMRegisters::S1
);
786 return branch32(cond
, ARMRegisters::S1
, right
);
789 void relativeTableJump(RegisterID index
, int scale
)
791 ASSERT(scale
>= 0 && scale
<= 31);
792 m_assembler
.add_r(ARMRegisters::pc
, ARMRegisters::pc
, m_assembler
.lsl(index
, scale
));
794 // NOP the default prefetching
795 m_assembler
.mov_r(ARMRegisters::r0
, ARMRegisters::r0
);
800 #if WTF_ARM_ARCH_AT_LEAST(5)
801 ensureSpace(2 * sizeof(ARMWord
), sizeof(ARMWord
));
802 m_assembler
.loadBranchTarget(ARMRegisters::S1
, ARMAssembler::AL
, true);
803 return Call(m_assembler
.blx(ARMRegisters::S1
), Call::Linkable
);
806 return Call(m_assembler
.jmp(ARMAssembler::AL
, true), Call::Linkable
);
810 Call
tailRecursiveCall()
812 return Call::fromTailJump(jump());
815 Call
makeTailRecursiveCall(Jump oldJump
)
817 return Call::fromTailJump(oldJump
);
820 DataLabelPtr
moveWithPatch(TrustedImmPtr initialValue
, RegisterID dest
)
822 DataLabelPtr
dataLabel(this);
823 m_assembler
.ldr_un_imm(dest
, reinterpret_cast<ARMWord
>(initialValue
.m_value
));
827 Jump
branchPtrWithPatch(RelationalCondition cond
, RegisterID left
, DataLabelPtr
& dataLabel
, TrustedImmPtr initialRightValue
= TrustedImmPtr(0))
829 dataLabel
= moveWithPatch(initialRightValue
, ARMRegisters::S1
);
830 Jump jump
= branch32(cond
, left
, ARMRegisters::S1
, true);
834 Jump
branchPtrWithPatch(RelationalCondition cond
, Address left
, DataLabelPtr
& dataLabel
, TrustedImmPtr initialRightValue
= TrustedImmPtr(0))
836 load32(left
, ARMRegisters::S1
);
837 dataLabel
= moveWithPatch(initialRightValue
, ARMRegisters::S0
);
838 Jump jump
= branch32(cond
, ARMRegisters::S0
, ARMRegisters::S1
, true);
842 DataLabelPtr
storePtrWithPatch(TrustedImmPtr initialValue
, ImplicitAddress address
)
844 DataLabelPtr dataLabel
= moveWithPatch(initialValue
, ARMRegisters::S1
);
845 store32(ARMRegisters::S1
, address
);
849 DataLabelPtr
storePtrWithPatch(ImplicitAddress address
)
851 return storePtrWithPatch(TrustedImmPtr(0), address
);
854 // Floating point operators
855 static bool supportsFloatingPoint()
857 return s_isVFPPresent
;
860 static bool supportsFloatingPointTruncate()
865 static bool supportsFloatingPointSqrt()
867 return s_isVFPPresent
;
869 static bool supportsFloatingPointAbs() { return false; }
871 void loadDouble(ImplicitAddress address
, FPRegisterID dest
)
873 m_assembler
.doubleTransfer(true, dest
, address
.base
, address
.offset
);
876 void loadDouble(const void* address
, FPRegisterID dest
)
878 m_assembler
.ldr_un_imm(ARMRegisters::S0
, (ARMWord
)address
);
879 m_assembler
.fdtr_u(true, dest
, ARMRegisters::S0
, 0);
882 void storeDouble(FPRegisterID src
, ImplicitAddress address
)
884 m_assembler
.doubleTransfer(false, src
, address
.base
, address
.offset
);
887 void addDouble(FPRegisterID src
, FPRegisterID dest
)
889 m_assembler
.vadd_f64_r(dest
, dest
, src
);
892 void addDouble(Address src
, FPRegisterID dest
)
894 loadDouble(src
, ARMRegisters::SD0
);
895 addDouble(ARMRegisters::SD0
, dest
);
898 void divDouble(FPRegisterID src
, FPRegisterID dest
)
900 m_assembler
.vdiv_f64_r(dest
, dest
, src
);
903 void divDouble(Address src
, FPRegisterID dest
)
905 ASSERT_NOT_REACHED(); // Untested
906 loadDouble(src
, ARMRegisters::SD0
);
907 divDouble(ARMRegisters::SD0
, dest
);
910 void subDouble(FPRegisterID src
, FPRegisterID dest
)
912 m_assembler
.vsub_f64_r(dest
, dest
, src
);
915 void subDouble(Address src
, FPRegisterID dest
)
917 loadDouble(src
, ARMRegisters::SD0
);
918 subDouble(ARMRegisters::SD0
, dest
);
921 void mulDouble(FPRegisterID src
, FPRegisterID dest
)
923 m_assembler
.vmul_f64_r(dest
, dest
, src
);
926 void mulDouble(Address src
, FPRegisterID dest
)
928 loadDouble(src
, ARMRegisters::SD0
);
929 mulDouble(ARMRegisters::SD0
, dest
);
932 void sqrtDouble(FPRegisterID src
, FPRegisterID dest
)
934 m_assembler
.vsqrt_f64_r(dest
, src
);
937 void absDouble(FPRegisterID
, FPRegisterID
)
939 ASSERT_NOT_REACHED();
942 void convertInt32ToDouble(RegisterID src
, FPRegisterID dest
)
944 m_assembler
.vmov_vfp_r(dest
<< 1, src
);
945 m_assembler
.vcvt_f64_s32_r(dest
, dest
<< 1);
948 void convertInt32ToDouble(Address src
, FPRegisterID dest
)
950 ASSERT_NOT_REACHED(); // Untested
951 // flds does not worth the effort here
952 load32(src
, ARMRegisters::S1
);
953 convertInt32ToDouble(ARMRegisters::S1
, dest
);
956 void convertInt32ToDouble(AbsoluteAddress src
, FPRegisterID dest
)
958 ASSERT_NOT_REACHED(); // Untested
959 // flds does not worth the effort here
960 m_assembler
.ldr_un_imm(ARMRegisters::S1
, (ARMWord
)src
.m_ptr
);
961 m_assembler
.dtr_u(true, ARMRegisters::S1
, ARMRegisters::S1
, 0);
962 convertInt32ToDouble(ARMRegisters::S1
, dest
);
965 Jump
branchDouble(DoubleCondition cond
, FPRegisterID left
, FPRegisterID right
)
967 m_assembler
.vcmp_f64_r(left
, right
);
968 m_assembler
.vmrs_apsr();
969 if (cond
& DoubleConditionBitSpecial
)
970 m_assembler
.cmp_r(ARMRegisters::S0
, ARMRegisters::S0
, ARMAssembler::VS
);
971 return Jump(m_assembler
.jmp(static_cast<ARMAssembler::Condition
>(cond
& ~DoubleConditionMask
)));
974 // Truncates 'src' to an integer, and places the resulting 'dest'.
975 // If the result is not representable as a 32 bit value, branch.
976 // May also branch for some values that are representable in 32 bits
977 // (specifically, in this case, INT_MIN).
978 Jump
branchTruncateDoubleToInt32(FPRegisterID src
, RegisterID dest
)
982 ASSERT_NOT_REACHED();
986 // Convert 'src' to an integer, and places the resulting 'dest'.
987 // If the result is not representable as a 32 bit value, branch.
988 // May also branch for some values that are representable in 32 bits
989 // (specifically, in this case, 0).
990 void branchConvertDoubleToInt32(FPRegisterID src
, RegisterID dest
, JumpList
& failureCases
, FPRegisterID fpTemp
)
992 m_assembler
.vcvt_s32_f64_r(ARMRegisters::SD0
<< 1, src
);
993 m_assembler
.vmov_arm_r(dest
, ARMRegisters::SD0
<< 1);
995 // Convert the integer result back to float & compare to the original value - if not equal or unordered (NaN) then jump.
996 m_assembler
.vcvt_f64_s32_r(ARMRegisters::SD0
, ARMRegisters::SD0
<< 1);
997 failureCases
.append(branchDouble(DoubleNotEqualOrUnordered
, src
, ARMRegisters::SD0
));
999 // If the result is zero, it might have been -0.0, and 0.0 equals to -0.0
1000 failureCases
.append(branchTest32(Zero
, dest
));
1003 Jump
branchDoubleNonZero(FPRegisterID reg
, FPRegisterID scratch
)
1005 m_assembler
.mov_r(ARMRegisters::S0
, ARMAssembler::getOp2(0));
1006 convertInt32ToDouble(ARMRegisters::S0
, scratch
);
1007 return branchDouble(DoubleNotEqual
, reg
, scratch
);
1010 Jump
branchDoubleZeroOrNaN(FPRegisterID reg
, FPRegisterID scratch
)
1012 m_assembler
.mov_r(ARMRegisters::S0
, ARMAssembler::getOp2(0));
1013 convertInt32ToDouble(ARMRegisters::S0
, scratch
);
1014 return branchDouble(DoubleEqualOrUnordered
, reg
, scratch
);
1022 static FunctionPtr
readCallTarget(CodeLocationCall call
)
1024 return FunctionPtr(reinterpret_cast<void(*)()>(ARMAssembler::readCallTarget(call
.dataLocation())));
1028 ARMAssembler::Condition
ARMCondition(RelationalCondition cond
)
1030 return static_cast<ARMAssembler::Condition
>(cond
);
1033 ARMAssembler::Condition
ARMCondition(ResultCondition cond
)
1035 return static_cast<ARMAssembler::Condition
>(cond
);
1038 void ensureSpace(int insnSpace
, int constSpace
)
1040 m_assembler
.ensureSpace(insnSpace
, constSpace
);
1043 int sizeOfConstantPool()
1045 return m_assembler
.sizeOfConstantPool();
1050 #if WTF_ARM_ARCH_VERSION < 5
1051 ensureSpace(2 * sizeof(ARMWord
), sizeof(ARMWord
));
1053 m_assembler
.mov_r(linkRegister
, ARMRegisters::pc
);
1057 void call32(RegisterID base
, int32_t offset
)
1059 #if WTF_ARM_ARCH_AT_LEAST(5)
1060 int targetReg
= ARMRegisters::S1
;
1062 int targetReg
= ARMRegisters::pc
;
1064 int tmpReg
= ARMRegisters::S1
;
1066 if (base
== ARMRegisters::sp
)
1070 if (offset
<= 0xfff) {
1072 m_assembler
.dtr_u(true, targetReg
, base
, offset
);
1073 } else if (offset
<= 0xfffff) {
1074 m_assembler
.add_r(tmpReg
, base
, ARMAssembler::OP2_IMM
| (offset
>> 12) | (10 << 8));
1076 m_assembler
.dtr_u(true, targetReg
, tmpReg
, offset
& 0xfff);
1078 m_assembler
.moveImm(offset
, tmpReg
);
1080 m_assembler
.dtr_ur(true, targetReg
, base
, tmpReg
);
1084 if (offset
<= 0xfff) {
1086 m_assembler
.dtr_d(true, targetReg
, base
, offset
);
1087 } else if (offset
<= 0xfffff) {
1088 m_assembler
.sub_r(tmpReg
, base
, ARMAssembler::OP2_IMM
| (offset
>> 12) | (10 << 8));
1090 m_assembler
.dtr_d(true, targetReg
, tmpReg
, offset
& 0xfff);
1092 m_assembler
.moveImm(offset
, tmpReg
);
1094 m_assembler
.dtr_dr(true, targetReg
, base
, tmpReg
);
1097 #if WTF_ARM_ARCH_AT_LEAST(5)
1098 m_assembler
.blx(targetReg
);
1103 friend class LinkBuffer
;
1104 friend class RepatchBuffer
;
1106 static void linkCall(void* code
, Call call
, FunctionPtr function
)
1108 ARMAssembler::linkCall(code
, call
.m_label
, function
.value());
1111 static void repatchCall(CodeLocationCall call
, CodeLocationLabel destination
)
1113 ARMAssembler::relinkCall(call
.dataLocation(), destination
.executableAddress());
1116 static void repatchCall(CodeLocationCall call
, FunctionPtr destination
)
1118 ARMAssembler::relinkCall(call
.dataLocation(), destination
.executableAddress());
1121 static const bool s_isVFPPresent
;
1126 #endif // ENABLE(ASSEMBLER) && CPU(ARM_TRADITIONAL)
1128 #endif // MacroAssemblerARM_h