2 * Copyright (C) 2008 Apple Inc.
3 * Copyright (C) 2009 University of Szeged
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
16 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
18 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
19 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
20 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
21 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
22 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
23 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
25 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 #ifndef MacroAssemblerARM_h
29 #define MacroAssemblerARM_h
31 #include <wtf/Platform.h>
33 #if ENABLE(ASSEMBLER) && CPU(ARM_TRADITIONAL)
35 #include "ARMAssembler.h"
36 #include "AbstractMacroAssembler.h"
40 class MacroAssemblerARM
: public AbstractMacroAssembler
<ARMAssembler
> {
41 static const int DoubleConditionMask
= 0x0f;
42 static const int DoubleConditionBitSpecial
= 0x10;
43 COMPILE_ASSERT(!(DoubleConditionBitSpecial
& DoubleConditionMask
), DoubleConditionBitSpecial_should_not_interfere_with_ARMAssembler_Condition_codes
);
46 Equal
= ARMAssembler::EQ
,
47 NotEqual
= ARMAssembler::NE
,
48 Above
= ARMAssembler::HI
,
49 AboveOrEqual
= ARMAssembler::CS
,
50 Below
= ARMAssembler::CC
,
51 BelowOrEqual
= ARMAssembler::LS
,
52 GreaterThan
= ARMAssembler::GT
,
53 GreaterThanOrEqual
= ARMAssembler::GE
,
54 LessThan
= ARMAssembler::LT
,
55 LessThanOrEqual
= ARMAssembler::LE
,
56 Overflow
= ARMAssembler::VS
,
57 Signed
= ARMAssembler::MI
,
58 Zero
= ARMAssembler::EQ
,
59 NonZero
= ARMAssembler::NE
62 enum DoubleCondition
{
63 // These conditions will only evaluate to true if the comparison is ordered - i.e. neither operand is NaN.
64 DoubleEqual
= ARMAssembler::EQ
,
65 DoubleNotEqual
= ARMAssembler::NE
| DoubleConditionBitSpecial
,
66 DoubleGreaterThan
= ARMAssembler::GT
,
67 DoubleGreaterThanOrEqual
= ARMAssembler::GE
,
68 DoubleLessThan
= ARMAssembler::CC
,
69 DoubleLessThanOrEqual
= ARMAssembler::LS
,
70 // If either operand is NaN, these conditions always evaluate to true.
71 DoubleEqualOrUnordered
= ARMAssembler::EQ
| DoubleConditionBitSpecial
,
72 DoubleNotEqualOrUnordered
= ARMAssembler::NE
,
73 DoubleGreaterThanOrUnordered
= ARMAssembler::HI
,
74 DoubleGreaterThanOrEqualOrUnordered
= ARMAssembler::CS
,
75 DoubleLessThanOrUnordered
= ARMAssembler::LT
,
76 DoubleLessThanOrEqualOrUnordered
= ARMAssembler::LE
,
79 static const RegisterID stackPointerRegister
= ARMRegisters::sp
;
80 static const RegisterID linkRegister
= ARMRegisters::lr
;
82 static const Scale ScalePtr
= TimesFour
;
84 void add32(RegisterID src
, RegisterID dest
)
86 m_assembler
.adds_r(dest
, dest
, src
);
89 void add32(Imm32 imm
, Address address
)
91 load32(address
, ARMRegisters::S1
);
92 add32(imm
, ARMRegisters::S1
);
93 store32(ARMRegisters::S1
, address
);
96 void add32(Imm32 imm
, RegisterID dest
)
98 m_assembler
.adds_r(dest
, dest
, m_assembler
.getImm(imm
.m_value
, ARMRegisters::S0
));
101 void add32(Address src
, RegisterID dest
)
103 load32(src
, ARMRegisters::S1
);
104 add32(ARMRegisters::S1
, dest
);
107 void and32(RegisterID src
, RegisterID dest
)
109 m_assembler
.ands_r(dest
, dest
, src
);
112 void and32(Imm32 imm
, RegisterID dest
)
114 ARMWord w
= m_assembler
.getImm(imm
.m_value
, ARMRegisters::S0
, true);
115 if (w
& ARMAssembler::OP2_INV_IMM
)
116 m_assembler
.bics_r(dest
, dest
, w
& ~ARMAssembler::OP2_INV_IMM
);
118 m_assembler
.ands_r(dest
, dest
, w
);
121 void lshift32(RegisterID shift_amount
, RegisterID dest
)
123 ARMWord w
= ARMAssembler::getOp2(0x1f);
124 ASSERT(w
!= ARMAssembler::INVALID_IMM
);
125 m_assembler
.and_r(ARMRegisters::S0
, shift_amount
, w
);
127 m_assembler
.movs_r(dest
, m_assembler
.lsl_r(dest
, ARMRegisters::S0
));
130 void lshift32(Imm32 imm
, RegisterID dest
)
132 m_assembler
.movs_r(dest
, m_assembler
.lsl(dest
, imm
.m_value
& 0x1f));
135 void mul32(RegisterID src
, RegisterID dest
)
138 move(src
, ARMRegisters::S0
);
139 src
= ARMRegisters::S0
;
141 m_assembler
.muls_r(dest
, dest
, src
);
144 void mul32(Imm32 imm
, RegisterID src
, RegisterID dest
)
146 move(imm
, ARMRegisters::S0
);
147 m_assembler
.muls_r(dest
, src
, ARMRegisters::S0
);
150 void neg32(RegisterID srcDest
)
152 m_assembler
.rsbs_r(srcDest
, srcDest
, ARMAssembler::getOp2(0));
155 void not32(RegisterID dest
)
157 m_assembler
.mvns_r(dest
, dest
);
160 void or32(RegisterID src
, RegisterID dest
)
162 m_assembler
.orrs_r(dest
, dest
, src
);
165 void or32(Imm32 imm
, RegisterID dest
)
167 m_assembler
.orrs_r(dest
, dest
, m_assembler
.getImm(imm
.m_value
, ARMRegisters::S0
));
170 void rshift32(RegisterID shift_amount
, RegisterID dest
)
172 ARMWord w
= ARMAssembler::getOp2(0x1f);
173 ASSERT(w
!= ARMAssembler::INVALID_IMM
);
174 m_assembler
.and_r(ARMRegisters::S0
, shift_amount
, w
);
176 m_assembler
.movs_r(dest
, m_assembler
.asr_r(dest
, ARMRegisters::S0
));
179 void rshift32(Imm32 imm
, RegisterID dest
)
181 m_assembler
.movs_r(dest
, m_assembler
.asr(dest
, imm
.m_value
& 0x1f));
184 void sub32(RegisterID src
, RegisterID dest
)
186 m_assembler
.subs_r(dest
, dest
, src
);
189 void sub32(Imm32 imm
, RegisterID dest
)
191 m_assembler
.subs_r(dest
, dest
, m_assembler
.getImm(imm
.m_value
, ARMRegisters::S0
));
194 void sub32(Imm32 imm
, Address address
)
196 load32(address
, ARMRegisters::S1
);
197 sub32(imm
, ARMRegisters::S1
);
198 store32(ARMRegisters::S1
, address
);
201 void sub32(Address src
, RegisterID dest
)
203 load32(src
, ARMRegisters::S1
);
204 sub32(ARMRegisters::S1
, dest
);
207 void xor32(RegisterID src
, RegisterID dest
)
209 m_assembler
.eors_r(dest
, dest
, src
);
212 void xor32(Imm32 imm
, RegisterID dest
)
214 m_assembler
.eors_r(dest
, dest
, m_assembler
.getImm(imm
.m_value
, ARMRegisters::S0
));
217 void load32(ImplicitAddress address
, RegisterID dest
)
219 m_assembler
.dataTransfer32(true, dest
, address
.base
, address
.offset
);
222 void load32(BaseIndex address
, RegisterID dest
)
224 m_assembler
.baseIndexTransfer32(true, dest
, address
.base
, address
.index
, static_cast<int>(address
.scale
), address
.offset
);
227 #if CPU(ARMV5_OR_LOWER)
228 void load32WithUnalignedHalfWords(BaseIndex address
, RegisterID dest
);
230 void load32WithUnalignedHalfWords(BaseIndex address
, RegisterID dest
)
232 load32(address
, dest
);
236 DataLabel32
load32WithAddressOffsetPatch(Address address
, RegisterID dest
)
238 DataLabel32
dataLabel(this);
239 m_assembler
.ldr_un_imm(ARMRegisters::S0
, 0);
240 m_assembler
.dtr_ur(true, dest
, address
.base
, ARMRegisters::S0
);
244 Label
loadPtrWithPatchToLEA(Address address
, RegisterID dest
)
247 load32(address
, dest
);
251 void load16(BaseIndex address
, RegisterID dest
)
253 m_assembler
.add_r(ARMRegisters::S0
, address
.base
, m_assembler
.lsl(address
.index
, address
.scale
));
254 if (address
.offset
>=0)
255 m_assembler
.ldrh_u(dest
, ARMRegisters::S0
, ARMAssembler::getOp2Byte(address
.offset
));
257 m_assembler
.ldrh_d(dest
, ARMRegisters::S0
, ARMAssembler::getOp2Byte(-address
.offset
));
260 DataLabel32
store32WithAddressOffsetPatch(RegisterID src
, Address address
)
262 DataLabel32
dataLabel(this);
263 m_assembler
.ldr_un_imm(ARMRegisters::S0
, 0);
264 m_assembler
.dtr_ur(false, src
, address
.base
, ARMRegisters::S0
);
268 void store32(RegisterID src
, ImplicitAddress address
)
270 m_assembler
.dataTransfer32(false, src
, address
.base
, address
.offset
);
273 void store32(RegisterID src
, BaseIndex address
)
275 m_assembler
.baseIndexTransfer32(false, src
, address
.base
, address
.index
, static_cast<int>(address
.scale
), address
.offset
);
278 void store32(Imm32 imm
, ImplicitAddress address
)
281 m_assembler
.ldr_un_imm(ARMRegisters::S1
, imm
.m_value
);
283 move(imm
, ARMRegisters::S1
);
284 store32(ARMRegisters::S1
, address
);
287 void store32(RegisterID src
, void* address
)
289 m_assembler
.ldr_un_imm(ARMRegisters::S0
, reinterpret_cast<ARMWord
>(address
));
290 m_assembler
.dtr_u(false, src
, ARMRegisters::S0
, 0);
293 void store32(Imm32 imm
, void* address
)
295 m_assembler
.ldr_un_imm(ARMRegisters::S0
, reinterpret_cast<ARMWord
>(address
));
297 m_assembler
.ldr_un_imm(ARMRegisters::S1
, imm
.m_value
);
299 m_assembler
.moveImm(imm
.m_value
, ARMRegisters::S1
);
300 m_assembler
.dtr_u(false, ARMRegisters::S1
, ARMRegisters::S0
, 0);
303 void pop(RegisterID dest
)
305 m_assembler
.pop_r(dest
);
308 void push(RegisterID src
)
310 m_assembler
.push_r(src
);
313 void push(Address address
)
315 load32(address
, ARMRegisters::S1
);
316 push(ARMRegisters::S1
);
321 move(imm
, ARMRegisters::S0
);
322 push(ARMRegisters::S0
);
325 void move(Imm32 imm
, RegisterID dest
)
328 m_assembler
.ldr_un_imm(dest
, imm
.m_value
);
330 m_assembler
.moveImm(imm
.m_value
, dest
);
333 void move(RegisterID src
, RegisterID dest
)
335 m_assembler
.mov_r(dest
, src
);
338 void move(ImmPtr imm
, RegisterID dest
)
340 move(Imm32(imm
), dest
);
343 void swap(RegisterID reg1
, RegisterID reg2
)
345 m_assembler
.mov_r(ARMRegisters::S0
, reg1
);
346 m_assembler
.mov_r(reg1
, reg2
);
347 m_assembler
.mov_r(reg2
, ARMRegisters::S0
);
350 void signExtend32ToPtr(RegisterID src
, RegisterID dest
)
356 void zeroExtend32ToPtr(RegisterID src
, RegisterID dest
)
362 Jump
branch32(Condition cond
, RegisterID left
, RegisterID right
, int useConstantPool
= 0)
364 m_assembler
.cmp_r(left
, right
);
365 return Jump(m_assembler
.jmp(ARMCondition(cond
), useConstantPool
));
368 Jump
branch32(Condition cond
, RegisterID left
, Imm32 right
, int useConstantPool
= 0)
370 if (right
.m_isPointer
) {
371 m_assembler
.ldr_un_imm(ARMRegisters::S0
, right
.m_value
);
372 m_assembler
.cmp_r(left
, ARMRegisters::S0
);
374 m_assembler
.cmp_r(left
, m_assembler
.getImm(right
.m_value
, ARMRegisters::S0
));
375 return Jump(m_assembler
.jmp(ARMCondition(cond
), useConstantPool
));
378 Jump
branch32(Condition cond
, RegisterID left
, Address right
)
380 load32(right
, ARMRegisters::S1
);
381 return branch32(cond
, left
, ARMRegisters::S1
);
384 Jump
branch32(Condition cond
, Address left
, RegisterID right
)
386 load32(left
, ARMRegisters::S1
);
387 return branch32(cond
, ARMRegisters::S1
, right
);
390 Jump
branch32(Condition cond
, Address left
, Imm32 right
)
392 load32(left
, ARMRegisters::S1
);
393 return branch32(cond
, ARMRegisters::S1
, right
);
396 Jump
branch32(Condition cond
, BaseIndex left
, Imm32 right
)
398 load32(left
, ARMRegisters::S1
);
399 return branch32(cond
, ARMRegisters::S1
, right
);
402 Jump
branch32WithUnalignedHalfWords(Condition cond
, BaseIndex left
, Imm32 right
)
404 load32WithUnalignedHalfWords(left
, ARMRegisters::S1
);
405 return branch32(cond
, ARMRegisters::S1
, right
);
408 Jump
branch16(Condition cond
, BaseIndex left
, RegisterID right
)
413 ASSERT_NOT_REACHED();
417 Jump
branch16(Condition cond
, BaseIndex left
, Imm32 right
)
419 load16(left
, ARMRegisters::S0
);
420 move(right
, ARMRegisters::S1
);
421 m_assembler
.cmp_r(ARMRegisters::S0
, ARMRegisters::S1
);
422 return m_assembler
.jmp(ARMCondition(cond
));
425 Jump
branchTest32(Condition cond
, RegisterID reg
, RegisterID mask
)
427 ASSERT((cond
== Zero
) || (cond
== NonZero
));
428 m_assembler
.tst_r(reg
, mask
);
429 return Jump(m_assembler
.jmp(ARMCondition(cond
)));
432 Jump
branchTest32(Condition cond
, RegisterID reg
, Imm32 mask
= Imm32(-1))
434 ASSERT((cond
== Zero
) || (cond
== NonZero
));
435 ARMWord w
= m_assembler
.getImm(mask
.m_value
, ARMRegisters::S0
, true);
436 if (w
& ARMAssembler::OP2_INV_IMM
)
437 m_assembler
.bics_r(ARMRegisters::S0
, reg
, w
& ~ARMAssembler::OP2_INV_IMM
);
439 m_assembler
.tst_r(reg
, w
);
440 return Jump(m_assembler
.jmp(ARMCondition(cond
)));
443 Jump
branchTest32(Condition cond
, Address address
, Imm32 mask
= Imm32(-1))
445 load32(address
, ARMRegisters::S1
);
446 return branchTest32(cond
, ARMRegisters::S1
, mask
);
449 Jump
branchTest32(Condition cond
, BaseIndex address
, Imm32 mask
= Imm32(-1))
451 load32(address
, ARMRegisters::S1
);
452 return branchTest32(cond
, ARMRegisters::S1
, mask
);
457 return Jump(m_assembler
.jmp());
460 void jump(RegisterID target
)
462 move(target
, ARMRegisters::pc
);
465 void jump(Address address
)
467 load32(address
, ARMRegisters::pc
);
470 Jump
branchAdd32(Condition cond
, RegisterID src
, RegisterID dest
)
472 ASSERT((cond
== Overflow
) || (cond
== Signed
) || (cond
== Zero
) || (cond
== NonZero
));
474 return Jump(m_assembler
.jmp(ARMCondition(cond
)));
477 Jump
branchAdd32(Condition cond
, Imm32 imm
, RegisterID dest
)
479 ASSERT((cond
== Overflow
) || (cond
== Signed
) || (cond
== Zero
) || (cond
== NonZero
));
481 return Jump(m_assembler
.jmp(ARMCondition(cond
)));
484 void mull32(RegisterID src1
, RegisterID src2
, RegisterID dest
)
487 move(src1
, ARMRegisters::S0
);
488 src1
= ARMRegisters::S0
;
490 m_assembler
.mull_r(ARMRegisters::S1
, dest
, src2
, src1
);
491 m_assembler
.cmp_r(ARMRegisters::S1
, m_assembler
.asr(dest
, 31));
494 Jump
branchMul32(Condition cond
, RegisterID src
, RegisterID dest
)
496 ASSERT((cond
== Overflow
) || (cond
== Signed
) || (cond
== Zero
) || (cond
== NonZero
));
497 if (cond
== Overflow
) {
498 mull32(src
, dest
, dest
);
503 return Jump(m_assembler
.jmp(ARMCondition(cond
)));
506 Jump
branchMul32(Condition cond
, Imm32 imm
, RegisterID src
, RegisterID dest
)
508 ASSERT((cond
== Overflow
) || (cond
== Signed
) || (cond
== Zero
) || (cond
== NonZero
));
509 if (cond
== Overflow
) {
510 move(imm
, ARMRegisters::S0
);
511 mull32(ARMRegisters::S0
, src
, dest
);
515 mul32(imm
, src
, dest
);
516 return Jump(m_assembler
.jmp(ARMCondition(cond
)));
519 Jump
branchSub32(Condition cond
, RegisterID src
, RegisterID dest
)
521 ASSERT((cond
== Overflow
) || (cond
== Signed
) || (cond
== Zero
) || (cond
== NonZero
));
523 return Jump(m_assembler
.jmp(ARMCondition(cond
)));
526 Jump
branchSub32(Condition cond
, Imm32 imm
, RegisterID dest
)
528 ASSERT((cond
== Overflow
) || (cond
== Signed
) || (cond
== Zero
) || (cond
== NonZero
));
530 return Jump(m_assembler
.jmp(ARMCondition(cond
)));
533 Jump
branchOr32(Condition cond
, RegisterID src
, RegisterID dest
)
535 ASSERT((cond
== Signed
) || (cond
== Zero
) || (cond
== NonZero
));
537 return Jump(m_assembler
.jmp(ARMCondition(cond
)));
548 return Call(m_assembler
.jmp(ARMAssembler::AL
, true), Call::LinkableNear
);
551 Call
call(RegisterID target
)
554 move(ARMRegisters::pc
, target
);
556 return Call(jmpSrc
, Call::None
);
559 void call(Address address
)
561 call32(address
.base
, address
.offset
);
566 m_assembler
.mov_r(ARMRegisters::pc
, linkRegister
);
569 void set32(Condition cond
, RegisterID left
, RegisterID right
, RegisterID dest
)
571 m_assembler
.cmp_r(left
, right
);
572 m_assembler
.mov_r(dest
, ARMAssembler::getOp2(0));
573 m_assembler
.mov_r(dest
, ARMAssembler::getOp2(1), ARMCondition(cond
));
576 void set32(Condition cond
, RegisterID left
, Imm32 right
, RegisterID dest
)
578 m_assembler
.cmp_r(left
, m_assembler
.getImm(right
.m_value
, ARMRegisters::S0
));
579 m_assembler
.mov_r(dest
, ARMAssembler::getOp2(0));
580 m_assembler
.mov_r(dest
, ARMAssembler::getOp2(1), ARMCondition(cond
));
583 void set8(Condition cond
, RegisterID left
, RegisterID right
, RegisterID dest
)
585 // ARM doesn't have byte registers
586 set32(cond
, left
, right
, dest
);
589 void set8(Condition cond
, Address left
, RegisterID right
, RegisterID dest
)
591 // ARM doesn't have byte registers
592 load32(left
, ARMRegisters::S1
);
593 set32(cond
, ARMRegisters::S1
, right
, dest
);
596 void set8(Condition cond
, RegisterID left
, Imm32 right
, RegisterID dest
)
598 // ARM doesn't have byte registers
599 set32(cond
, left
, right
, dest
);
602 void setTest32(Condition cond
, Address address
, Imm32 mask
, RegisterID dest
)
604 load32(address
, ARMRegisters::S1
);
605 if (mask
.m_value
== -1)
606 m_assembler
.cmp_r(0, ARMRegisters::S1
);
608 m_assembler
.tst_r(ARMRegisters::S1
, m_assembler
.getImm(mask
.m_value
, ARMRegisters::S0
));
609 m_assembler
.mov_r(dest
, ARMAssembler::getOp2(0));
610 m_assembler
.mov_r(dest
, ARMAssembler::getOp2(1), ARMCondition(cond
));
613 void setTest8(Condition cond
, Address address
, Imm32 mask
, RegisterID dest
)
615 // ARM doesn't have byte registers
616 setTest32(cond
, address
, mask
, dest
);
619 void add32(Imm32 imm
, RegisterID src
, RegisterID dest
)
621 m_assembler
.add_r(dest
, src
, m_assembler
.getImm(imm
.m_value
, ARMRegisters::S0
));
624 void add32(Imm32 imm
, AbsoluteAddress address
)
626 m_assembler
.ldr_un_imm(ARMRegisters::S1
, reinterpret_cast<ARMWord
>(address
.m_ptr
));
627 m_assembler
.dtr_u(true, ARMRegisters::S1
, ARMRegisters::S1
, 0);
628 add32(imm
, ARMRegisters::S1
);
629 m_assembler
.ldr_un_imm(ARMRegisters::S0
, reinterpret_cast<ARMWord
>(address
.m_ptr
));
630 m_assembler
.dtr_u(false, ARMRegisters::S1
, ARMRegisters::S0
, 0);
633 void sub32(Imm32 imm
, AbsoluteAddress address
)
635 m_assembler
.ldr_un_imm(ARMRegisters::S1
, reinterpret_cast<ARMWord
>(address
.m_ptr
));
636 m_assembler
.dtr_u(true, ARMRegisters::S1
, ARMRegisters::S1
, 0);
637 sub32(imm
, ARMRegisters::S1
);
638 m_assembler
.ldr_un_imm(ARMRegisters::S0
, reinterpret_cast<ARMWord
>(address
.m_ptr
));
639 m_assembler
.dtr_u(false, ARMRegisters::S1
, ARMRegisters::S0
, 0);
642 void load32(void* address
, RegisterID dest
)
644 m_assembler
.ldr_un_imm(ARMRegisters::S0
, reinterpret_cast<ARMWord
>(address
));
645 m_assembler
.dtr_u(true, dest
, ARMRegisters::S0
, 0);
648 Jump
branch32(Condition cond
, AbsoluteAddress left
, RegisterID right
)
650 load32(left
.m_ptr
, ARMRegisters::S1
);
651 return branch32(cond
, ARMRegisters::S1
, right
);
654 Jump
branch32(Condition cond
, AbsoluteAddress left
, Imm32 right
)
656 load32(left
.m_ptr
, ARMRegisters::S1
);
657 return branch32(cond
, ARMRegisters::S1
, right
);
663 return Call(m_assembler
.jmp(ARMAssembler::AL
, true), Call::Linkable
);
666 Call
tailRecursiveCall()
668 return Call::fromTailJump(jump());
671 Call
makeTailRecursiveCall(Jump oldJump
)
673 return Call::fromTailJump(oldJump
);
676 DataLabelPtr
moveWithPatch(ImmPtr initialValue
, RegisterID dest
)
678 DataLabelPtr
dataLabel(this);
679 m_assembler
.ldr_un_imm(dest
, reinterpret_cast<ARMWord
>(initialValue
.m_value
));
683 Jump
branchPtrWithPatch(Condition cond
, RegisterID left
, DataLabelPtr
& dataLabel
, ImmPtr initialRightValue
= ImmPtr(0))
685 dataLabel
= moveWithPatch(initialRightValue
, ARMRegisters::S1
);
686 Jump jump
= branch32(cond
, left
, ARMRegisters::S1
, true);
690 Jump
branchPtrWithPatch(Condition cond
, Address left
, DataLabelPtr
& dataLabel
, ImmPtr initialRightValue
= ImmPtr(0))
692 load32(left
, ARMRegisters::S1
);
693 dataLabel
= moveWithPatch(initialRightValue
, ARMRegisters::S0
);
694 Jump jump
= branch32(cond
, ARMRegisters::S0
, ARMRegisters::S1
, true);
698 DataLabelPtr
storePtrWithPatch(ImmPtr initialValue
, ImplicitAddress address
)
700 DataLabelPtr dataLabel
= moveWithPatch(initialValue
, ARMRegisters::S1
);
701 store32(ARMRegisters::S1
, address
);
705 DataLabelPtr
storePtrWithPatch(ImplicitAddress address
)
707 return storePtrWithPatch(ImmPtr(0), address
);
710 // Floating point operators
711 bool supportsFloatingPoint() const
713 return s_isVFPPresent
;
716 bool supportsFloatingPointTruncate() const
721 void loadDouble(ImplicitAddress address
, FPRegisterID dest
)
723 m_assembler
.doubleTransfer(true, dest
, address
.base
, address
.offset
);
726 void loadDouble(void* address
, FPRegisterID dest
)
728 m_assembler
.ldr_un_imm(ARMRegisters::S0
, (ARMWord
)address
);
729 m_assembler
.fdtr_u(true, dest
, ARMRegisters::S0
, 0);
732 void storeDouble(FPRegisterID src
, ImplicitAddress address
)
734 m_assembler
.doubleTransfer(false, src
, address
.base
, address
.offset
);
737 void addDouble(FPRegisterID src
, FPRegisterID dest
)
739 m_assembler
.faddd_r(dest
, dest
, src
);
742 void addDouble(Address src
, FPRegisterID dest
)
744 loadDouble(src
, ARMRegisters::SD0
);
745 addDouble(ARMRegisters::SD0
, dest
);
748 void divDouble(FPRegisterID src
, FPRegisterID dest
)
750 m_assembler
.fdivd_r(dest
, dest
, src
);
753 void divDouble(Address src
, FPRegisterID dest
)
755 ASSERT_NOT_REACHED(); // Untested
756 loadDouble(src
, ARMRegisters::SD0
);
757 divDouble(ARMRegisters::SD0
, dest
);
760 void subDouble(FPRegisterID src
, FPRegisterID dest
)
762 m_assembler
.fsubd_r(dest
, dest
, src
);
765 void subDouble(Address src
, FPRegisterID dest
)
767 loadDouble(src
, ARMRegisters::SD0
);
768 subDouble(ARMRegisters::SD0
, dest
);
771 void mulDouble(FPRegisterID src
, FPRegisterID dest
)
773 m_assembler
.fmuld_r(dest
, dest
, src
);
776 void mulDouble(Address src
, FPRegisterID dest
)
778 loadDouble(src
, ARMRegisters::SD0
);
779 mulDouble(ARMRegisters::SD0
, dest
);
782 void convertInt32ToDouble(RegisterID src
, FPRegisterID dest
)
784 m_assembler
.fmsr_r(dest
, src
);
785 m_assembler
.fsitod_r(dest
, dest
);
788 void convertInt32ToDouble(Address src
, FPRegisterID dest
)
790 ASSERT_NOT_REACHED(); // Untested
791 // flds does not worth the effort here
792 load32(src
, ARMRegisters::S1
);
793 convertInt32ToDouble(ARMRegisters::S1
, dest
);
796 void convertInt32ToDouble(AbsoluteAddress src
, FPRegisterID dest
)
798 ASSERT_NOT_REACHED(); // Untested
799 // flds does not worth the effort here
800 m_assembler
.ldr_un_imm(ARMRegisters::S1
, (ARMWord
)src
.m_ptr
);
801 m_assembler
.dtr_u(true, ARMRegisters::S1
, ARMRegisters::S1
, 0);
802 convertInt32ToDouble(ARMRegisters::S1
, dest
);
805 Jump
branchDouble(DoubleCondition cond
, FPRegisterID left
, FPRegisterID right
)
807 m_assembler
.fcmpd_r(left
, right
);
808 m_assembler
.fmstat();
809 if (cond
& DoubleConditionBitSpecial
)
810 m_assembler
.cmp_r(ARMRegisters::S0
, ARMRegisters::S0
, ARMAssembler::VS
);
811 return Jump(m_assembler
.jmp(static_cast<ARMAssembler::Condition
>(cond
& ~DoubleConditionMask
)));
814 // Truncates 'src' to an integer, and places the resulting 'dest'.
815 // If the result is not representable as a 32 bit value, branch.
816 // May also branch for some values that are representable in 32 bits
817 // (specifically, in this case, INT_MIN).
818 Jump
branchTruncateDoubleToInt32(FPRegisterID src
, RegisterID dest
)
822 ASSERT_NOT_REACHED();
826 // Convert 'src' to an integer, and places the resulting 'dest'.
827 // If the result is not representable as a 32 bit value, branch.
828 // May also branch for some values that are representable in 32 bits
829 // (specifically, in this case, 0).
830 void branchConvertDoubleToInt32(FPRegisterID src
, RegisterID dest
, JumpList
& failureCases
, FPRegisterID fpTemp
)
832 m_assembler
.ftosid_r(ARMRegisters::SD0
, src
);
833 m_assembler
.fmrs_r(dest
, ARMRegisters::SD0
);
835 // Convert the integer result back to float & compare to the original value - if not equal or unordered (NaN) then jump.
836 m_assembler
.fsitod_r(ARMRegisters::SD0
, ARMRegisters::SD0
);
837 failureCases
.append(branchDouble(DoubleNotEqualOrUnordered
, src
, ARMRegisters::SD0
));
839 // If the result is zero, it might have been -0.0, and 0.0 equals to -0.0
840 failureCases
.append(branchTest32(Zero
, dest
));
843 void zeroDouble(FPRegisterID srcDest
)
845 m_assembler
.mov_r(ARMRegisters::S0
, ARMAssembler::getOp2(0));
846 convertInt32ToDouble(ARMRegisters::S0
, srcDest
);
850 ARMAssembler::Condition
ARMCondition(Condition cond
)
852 return static_cast<ARMAssembler::Condition
>(cond
);
855 void ensureSpace(int insnSpace
, int constSpace
)
857 m_assembler
.ensureSpace(insnSpace
, constSpace
);
860 int sizeOfConstantPool()
862 return m_assembler
.sizeOfConstantPool();
867 ensureSpace(2 * sizeof(ARMWord
), sizeof(ARMWord
));
869 m_assembler
.mov_r(linkRegister
, ARMRegisters::pc
);
872 void call32(RegisterID base
, int32_t offset
)
874 if (base
== ARMRegisters::sp
)
878 if (offset
<= 0xfff) {
880 m_assembler
.dtr_u(true, ARMRegisters::pc
, base
, offset
);
881 } else if (offset
<= 0xfffff) {
882 m_assembler
.add_r(ARMRegisters::S0
, base
, ARMAssembler::OP2_IMM
| (offset
>> 12) | (10 << 8));
884 m_assembler
.dtr_u(true, ARMRegisters::pc
, ARMRegisters::S0
, offset
& 0xfff);
886 ARMWord reg
= m_assembler
.getImm(offset
, ARMRegisters::S0
);
888 m_assembler
.dtr_ur(true, ARMRegisters::pc
, base
, reg
);
892 if (offset
<= 0xfff) {
894 m_assembler
.dtr_d(true, ARMRegisters::pc
, base
, offset
);
895 } else if (offset
<= 0xfffff) {
896 m_assembler
.sub_r(ARMRegisters::S0
, base
, ARMAssembler::OP2_IMM
| (offset
>> 12) | (10 << 8));
898 m_assembler
.dtr_d(true, ARMRegisters::pc
, ARMRegisters::S0
, offset
& 0xfff);
900 ARMWord reg
= m_assembler
.getImm(offset
, ARMRegisters::S0
);
902 m_assembler
.dtr_dr(true, ARMRegisters::pc
, base
, reg
);
908 friend class LinkBuffer
;
909 friend class RepatchBuffer
;
911 static void linkCall(void* code
, Call call
, FunctionPtr function
)
913 ARMAssembler::linkCall(code
, call
.m_jmp
, function
.value());
916 static void repatchCall(CodeLocationCall call
, CodeLocationLabel destination
)
918 ARMAssembler::relinkCall(call
.dataLocation(), destination
.executableAddress());
921 static void repatchCall(CodeLocationCall call
, FunctionPtr destination
)
923 ARMAssembler::relinkCall(call
.dataLocation(), destination
.executableAddress());
926 static const bool s_isVFPPresent
;
931 #endif // ENABLE(ASSEMBLER) && CPU(ARM_TRADITIONAL)
933 #endif // MacroAssemblerARM_h