2 * Copyright (C) 2009, 2010, 2014 Apple Inc. All rights reserved.
3 * Copyright (C) 2010 University of Szeged
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
15 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
18 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
19 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
20 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
21 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
22 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
24 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #ifndef MacroAssemblerARMv7_h
28 #define MacroAssemblerARMv7_h
32 #include "ARMv7Assembler.h"
33 #include "AbstractMacroAssembler.h"
37 class MacroAssemblerARMv7
: public AbstractMacroAssembler
<ARMv7Assembler
> {
38 static const RegisterID dataTempRegister
= ARMRegisters::ip
;
39 static const RegisterID addressTempRegister
= ARMRegisters::r6
;
41 static const ARMRegisters::FPDoubleRegisterID fpTempRegister
= ARMRegisters::d7
;
42 inline ARMRegisters::FPSingleRegisterID
fpTempRegisterAsSingle() { return ARMRegisters::asSingle(fpTempRegister
); }
46 : m_makeJumpPatchable(false)
50 typedef ARMv7Assembler::LinkRecord LinkRecord
;
51 typedef ARMv7Assembler::JumpType JumpType
;
52 typedef ARMv7Assembler::JumpLinkType JumpLinkType
;
53 typedef ARMv7Assembler::Condition Condition
;
55 static const ARMv7Assembler::Condition DefaultCondition
= ARMv7Assembler::ConditionInvalid
;
56 static const ARMv7Assembler::JumpType DefaultJump
= ARMv7Assembler::JumpNoConditionFixedSize
;
58 static bool isCompactPtrAlignedAddressOffset(ptrdiff_t value
)
60 return value
>= -255 && value
<= 255;
63 Vector
<LinkRecord
, 0, UnsafeVectorOverflow
>& jumpsToLink() { return m_assembler
.jumpsToLink(); }
64 void* unlinkedCode() { return m_assembler
.unlinkedCode(); }
65 static bool canCompact(JumpType jumpType
) { return ARMv7Assembler::canCompact(jumpType
); }
66 static JumpLinkType
computeJumpType(JumpType jumpType
, const uint8_t* from
, const uint8_t* to
) { return ARMv7Assembler::computeJumpType(jumpType
, from
, to
); }
67 static JumpLinkType
computeJumpType(LinkRecord
& record
, const uint8_t* from
, const uint8_t* to
) { return ARMv7Assembler::computeJumpType(record
, from
, to
); }
68 static int jumpSizeDelta(JumpType jumpType
, JumpLinkType jumpLinkType
) { return ARMv7Assembler::jumpSizeDelta(jumpType
, jumpLinkType
); }
69 static void link(LinkRecord
& record
, uint8_t* from
, uint8_t* to
) { return ARMv7Assembler::link(record
, from
, to
); }
85 explicit ArmAddress(RegisterID base
, int32_t offset
= 0)
92 explicit ArmAddress(RegisterID base
, RegisterID index
, Scale scale
= TimesOne
)
102 static const Scale ScalePtr
= TimesFour
;
104 enum RelationalCondition
{
105 Equal
= ARMv7Assembler::ConditionEQ
,
106 NotEqual
= ARMv7Assembler::ConditionNE
,
107 Above
= ARMv7Assembler::ConditionHI
,
108 AboveOrEqual
= ARMv7Assembler::ConditionHS
,
109 Below
= ARMv7Assembler::ConditionLO
,
110 BelowOrEqual
= ARMv7Assembler::ConditionLS
,
111 GreaterThan
= ARMv7Assembler::ConditionGT
,
112 GreaterThanOrEqual
= ARMv7Assembler::ConditionGE
,
113 LessThan
= ARMv7Assembler::ConditionLT
,
114 LessThanOrEqual
= ARMv7Assembler::ConditionLE
117 enum ResultCondition
{
118 Overflow
= ARMv7Assembler::ConditionVS
,
119 Signed
= ARMv7Assembler::ConditionMI
,
120 PositiveOrZero
= ARMv7Assembler::ConditionPL
,
121 Zero
= ARMv7Assembler::ConditionEQ
,
122 NonZero
= ARMv7Assembler::ConditionNE
125 enum DoubleCondition
{
126 // These conditions will only evaluate to true if the comparison is ordered - i.e. neither operand is NaN.
127 DoubleEqual
= ARMv7Assembler::ConditionEQ
,
128 DoubleNotEqual
= ARMv7Assembler::ConditionVC
, // Not the right flag! check for this & handle differently.
129 DoubleGreaterThan
= ARMv7Assembler::ConditionGT
,
130 DoubleGreaterThanOrEqual
= ARMv7Assembler::ConditionGE
,
131 DoubleLessThan
= ARMv7Assembler::ConditionLO
,
132 DoubleLessThanOrEqual
= ARMv7Assembler::ConditionLS
,
133 // If either operand is NaN, these conditions always evaluate to true.
134 DoubleEqualOrUnordered
= ARMv7Assembler::ConditionVS
, // Not the right flag! check for this & handle differently.
135 DoubleNotEqualOrUnordered
= ARMv7Assembler::ConditionNE
,
136 DoubleGreaterThanOrUnordered
= ARMv7Assembler::ConditionHI
,
137 DoubleGreaterThanOrEqualOrUnordered
= ARMv7Assembler::ConditionHS
,
138 DoubleLessThanOrUnordered
= ARMv7Assembler::ConditionLT
,
139 DoubleLessThanOrEqualOrUnordered
= ARMv7Assembler::ConditionLE
,
142 static const RegisterID stackPointerRegister
= ARMRegisters::sp
;
143 static const RegisterID framePointerRegister
= ARMRegisters::fp
;
144 static const RegisterID linkRegister
= ARMRegisters::lr
;
146 // Integer arithmetic operations:
148 // Operations are typically two operand - operation(source, srcDst)
149 // For many operations the source may be an TrustedImm32, the srcDst operand
150 // may often be a memory location (explictly described using an Address
153 void add32(RegisterID src
, RegisterID dest
)
155 m_assembler
.add(dest
, dest
, src
);
158 void add32(TrustedImm32 imm
, RegisterID dest
)
160 add32(imm
, dest
, dest
);
163 void add32(AbsoluteAddress src
, RegisterID dest
)
165 load32(src
.m_ptr
, dataTempRegister
);
166 add32(dataTempRegister
, dest
);
169 void add32(TrustedImm32 imm
, RegisterID src
, RegisterID dest
)
171 ARMThumbImmediate armImm
= ARMThumbImmediate::makeUInt12OrEncodedImm(imm
.m_value
);
173 // For adds with stack pointer destination, moving the src first to sp is
174 // needed to avoid unpredictable instruction
175 if (dest
== ARMRegisters::sp
&& src
!= dest
) {
176 move(src
, ARMRegisters::sp
);
177 src
= ARMRegisters::sp
;
180 if (armImm
.isValid())
181 m_assembler
.add(dest
, src
, armImm
);
183 move(imm
, dataTempRegister
);
184 m_assembler
.add(dest
, src
, dataTempRegister
);
188 void add32(TrustedImm32 imm
, Address address
)
190 load32(address
, dataTempRegister
);
192 ARMThumbImmediate armImm
= ARMThumbImmediate::makeUInt12OrEncodedImm(imm
.m_value
);
193 if (armImm
.isValid())
194 m_assembler
.add(dataTempRegister
, dataTempRegister
, armImm
);
196 // Hrrrm, since dataTempRegister holds the data loaded,
197 // use addressTempRegister to hold the immediate.
198 move(imm
, addressTempRegister
);
199 m_assembler
.add(dataTempRegister
, dataTempRegister
, addressTempRegister
);
202 store32(dataTempRegister
, address
);
205 void add32(Address src
, RegisterID dest
)
207 load32(src
, dataTempRegister
);
208 add32(dataTempRegister
, dest
);
211 void add32(TrustedImm32 imm
, AbsoluteAddress address
)
213 load32(address
.m_ptr
, dataTempRegister
);
215 ARMThumbImmediate armImm
= ARMThumbImmediate::makeUInt12OrEncodedImm(imm
.m_value
);
216 if (armImm
.isValid())
217 m_assembler
.add(dataTempRegister
, dataTempRegister
, armImm
);
219 // Hrrrm, since dataTempRegister holds the data loaded,
220 // use addressTempRegister to hold the immediate.
221 move(imm
, addressTempRegister
);
222 m_assembler
.add(dataTempRegister
, dataTempRegister
, addressTempRegister
);
225 store32(dataTempRegister
, address
.m_ptr
);
228 void addPtrNoFlags(TrustedImm32 imm
, RegisterID srcDest
)
233 void add64(TrustedImm32 imm
, AbsoluteAddress address
)
235 move(TrustedImmPtr(address
.m_ptr
), addressTempRegister
);
237 m_assembler
.ldr(dataTempRegister
, addressTempRegister
, ARMThumbImmediate::makeUInt12(0));
238 ARMThumbImmediate armImm
= ARMThumbImmediate::makeEncodedImm(imm
.m_value
);
239 if (armImm
.isValid())
240 m_assembler
.add_S(dataTempRegister
, dataTempRegister
, armImm
);
242 move(imm
, addressTempRegister
);
243 m_assembler
.add_S(dataTempRegister
, dataTempRegister
, addressTempRegister
);
244 move(TrustedImmPtr(address
.m_ptr
), addressTempRegister
);
246 m_assembler
.str(dataTempRegister
, addressTempRegister
, ARMThumbImmediate::makeUInt12(0));
248 m_assembler
.ldr(dataTempRegister
, addressTempRegister
, ARMThumbImmediate::makeUInt12(4));
249 m_assembler
.adc(dataTempRegister
, dataTempRegister
, ARMThumbImmediate::makeEncodedImm(imm
.m_value
>> 31));
250 m_assembler
.str(dataTempRegister
, addressTempRegister
, ARMThumbImmediate::makeUInt12(4));
253 void and32(RegisterID op1
, RegisterID op2
, RegisterID dest
)
255 m_assembler
.ARM_and(dest
, op1
, op2
);
258 void and32(TrustedImm32 imm
, RegisterID src
, RegisterID dest
)
260 ARMThumbImmediate armImm
= ARMThumbImmediate::makeEncodedImm(imm
.m_value
);
261 if (armImm
.isValid())
262 m_assembler
.ARM_and(dest
, src
, armImm
);
264 move(imm
, dataTempRegister
);
265 m_assembler
.ARM_and(dest
, src
, dataTempRegister
);
269 void and32(RegisterID src
, RegisterID dest
)
271 and32(dest
, src
, dest
);
274 void and32(TrustedImm32 imm
, RegisterID dest
)
276 and32(imm
, dest
, dest
);
279 void and32(Address src
, RegisterID dest
)
281 load32(src
, dataTempRegister
);
282 and32(dataTempRegister
, dest
);
285 void countLeadingZeros32(RegisterID src
, RegisterID dest
)
287 m_assembler
.clz(dest
, src
);
290 void lshift32(RegisterID src
, RegisterID shiftAmount
, RegisterID dest
)
292 // Clamp the shift to the range 0..31
293 ARMThumbImmediate armImm
= ARMThumbImmediate::makeEncodedImm(0x1f);
294 ASSERT(armImm
.isValid());
295 m_assembler
.ARM_and(dataTempRegister
, shiftAmount
, armImm
);
297 m_assembler
.lsl(dest
, src
, dataTempRegister
);
300 void lshift32(RegisterID src
, TrustedImm32 imm
, RegisterID dest
)
302 m_assembler
.lsl(dest
, src
, imm
.m_value
& 0x1f);
305 void lshift32(RegisterID shiftAmount
, RegisterID dest
)
307 lshift32(dest
, shiftAmount
, dest
);
310 void lshift32(TrustedImm32 imm
, RegisterID dest
)
312 lshift32(dest
, imm
, dest
);
315 void mul32(RegisterID src
, RegisterID dest
)
317 m_assembler
.smull(dest
, dataTempRegister
, dest
, src
);
320 void mul32(TrustedImm32 imm
, RegisterID src
, RegisterID dest
)
322 move(imm
, dataTempRegister
);
323 m_assembler
.smull(dest
, dataTempRegister
, src
, dataTempRegister
);
326 void neg32(RegisterID srcDest
)
328 m_assembler
.neg(srcDest
, srcDest
);
331 void or32(RegisterID src
, RegisterID dest
)
333 m_assembler
.orr(dest
, dest
, src
);
336 void or32(RegisterID src
, AbsoluteAddress dest
)
338 move(TrustedImmPtr(dest
.m_ptr
), addressTempRegister
);
339 load32(addressTempRegister
, dataTempRegister
);
340 or32(src
, dataTempRegister
);
341 store32(dataTempRegister
, addressTempRegister
);
344 void or32(TrustedImm32 imm
, Address address
)
346 load32(address
, dataTempRegister
);
347 or32(imm
, dataTempRegister
, dataTempRegister
);
348 store32(dataTempRegister
, address
);
351 void or32(TrustedImm32 imm
, RegisterID dest
)
353 or32(imm
, dest
, dest
);
356 void or32(RegisterID op1
, RegisterID op2
, RegisterID dest
)
358 m_assembler
.orr(dest
, op1
, op2
);
361 void or32(TrustedImm32 imm
, RegisterID src
, RegisterID dest
)
363 ARMThumbImmediate armImm
= ARMThumbImmediate::makeEncodedImm(imm
.m_value
);
364 if (armImm
.isValid())
365 m_assembler
.orr(dest
, src
, armImm
);
367 move(imm
, dataTempRegister
);
368 m_assembler
.orr(dest
, src
, dataTempRegister
);
372 void rshift32(RegisterID src
, RegisterID shiftAmount
, RegisterID dest
)
374 // Clamp the shift to the range 0..31
375 ARMThumbImmediate armImm
= ARMThumbImmediate::makeEncodedImm(0x1f);
376 ASSERT(armImm
.isValid());
377 m_assembler
.ARM_and(dataTempRegister
, shiftAmount
, armImm
);
379 m_assembler
.asr(dest
, src
, dataTempRegister
);
382 void rshift32(RegisterID src
, TrustedImm32 imm
, RegisterID dest
)
384 m_assembler
.asr(dest
, src
, imm
.m_value
& 0x1f);
387 void rshift32(RegisterID shiftAmount
, RegisterID dest
)
389 rshift32(dest
, shiftAmount
, dest
);
392 void rshift32(TrustedImm32 imm
, RegisterID dest
)
394 rshift32(dest
, imm
, dest
);
397 void urshift32(RegisterID src
, RegisterID shiftAmount
, RegisterID dest
)
399 // Clamp the shift to the range 0..31
400 ARMThumbImmediate armImm
= ARMThumbImmediate::makeEncodedImm(0x1f);
401 ASSERT(armImm
.isValid());
402 m_assembler
.ARM_and(dataTempRegister
, shiftAmount
, armImm
);
404 m_assembler
.lsr(dest
, src
, dataTempRegister
);
407 void urshift32(RegisterID src
, TrustedImm32 imm
, RegisterID dest
)
409 m_assembler
.lsr(dest
, src
, imm
.m_value
& 0x1f);
412 void urshift32(RegisterID shiftAmount
, RegisterID dest
)
414 urshift32(dest
, shiftAmount
, dest
);
417 void urshift32(TrustedImm32 imm
, RegisterID dest
)
419 urshift32(dest
, imm
, dest
);
422 void sub32(RegisterID src
, RegisterID dest
)
424 m_assembler
.sub(dest
, dest
, src
);
427 void sub32(TrustedImm32 imm
, RegisterID dest
)
429 ARMThumbImmediate armImm
= ARMThumbImmediate::makeUInt12OrEncodedImm(imm
.m_value
);
430 if (armImm
.isValid())
431 m_assembler
.sub(dest
, dest
, armImm
);
433 move(imm
, dataTempRegister
);
434 m_assembler
.sub(dest
, dest
, dataTempRegister
);
438 void sub32(TrustedImm32 imm
, Address address
)
440 load32(address
, dataTempRegister
);
442 ARMThumbImmediate armImm
= ARMThumbImmediate::makeUInt12OrEncodedImm(imm
.m_value
);
443 if (armImm
.isValid())
444 m_assembler
.sub(dataTempRegister
, dataTempRegister
, armImm
);
446 // Hrrrm, since dataTempRegister holds the data loaded,
447 // use addressTempRegister to hold the immediate.
448 move(imm
, addressTempRegister
);
449 m_assembler
.sub(dataTempRegister
, dataTempRegister
, addressTempRegister
);
452 store32(dataTempRegister
, address
);
455 void sub32(Address src
, RegisterID dest
)
457 load32(src
, dataTempRegister
);
458 sub32(dataTempRegister
, dest
);
461 void sub32(TrustedImm32 imm
, AbsoluteAddress address
)
463 load32(address
.m_ptr
, dataTempRegister
);
465 ARMThumbImmediate armImm
= ARMThumbImmediate::makeUInt12OrEncodedImm(imm
.m_value
);
466 if (armImm
.isValid())
467 m_assembler
.sub(dataTempRegister
, dataTempRegister
, armImm
);
469 // Hrrrm, since dataTempRegister holds the data loaded,
470 // use addressTempRegister to hold the immediate.
471 move(imm
, addressTempRegister
);
472 m_assembler
.sub(dataTempRegister
, dataTempRegister
, addressTempRegister
);
475 store32(dataTempRegister
, address
.m_ptr
);
478 void xor32(RegisterID op1
, RegisterID op2
, RegisterID dest
)
480 m_assembler
.eor(dest
, op1
, op2
);
483 void xor32(TrustedImm32 imm
, RegisterID src
, RegisterID dest
)
485 if (imm
.m_value
== -1) {
486 m_assembler
.mvn(dest
, src
);
490 ARMThumbImmediate armImm
= ARMThumbImmediate::makeEncodedImm(imm
.m_value
);
491 if (armImm
.isValid())
492 m_assembler
.eor(dest
, src
, armImm
);
494 move(imm
, dataTempRegister
);
495 m_assembler
.eor(dest
, src
, dataTempRegister
);
499 void xor32(RegisterID src
, RegisterID dest
)
501 xor32(dest
, src
, dest
);
504 void xor32(TrustedImm32 imm
, RegisterID dest
)
506 if (imm
.m_value
== -1)
507 m_assembler
.mvn(dest
, dest
);
509 xor32(imm
, dest
, dest
);
513 // Memory access operations:
515 // Loads are of the form load(address, destination) and stores of the form
516 // store(source, address). The source for a store may be an TrustedImm32. Address
517 // operand objects to loads and store will be implicitly constructed if a
518 // register is passed.
521 void load32(ArmAddress address
, RegisterID dest
)
523 if (address
.type
== ArmAddress::HasIndex
)
524 m_assembler
.ldr(dest
, address
.base
, address
.u
.index
, address
.u
.scale
);
525 else if (address
.u
.offset
>= 0) {
526 ARMThumbImmediate armImm
= ARMThumbImmediate::makeUInt12(address
.u
.offset
);
527 ASSERT(armImm
.isValid());
528 m_assembler
.ldr(dest
, address
.base
, armImm
);
530 ASSERT(address
.u
.offset
>= -255);
531 m_assembler
.ldr(dest
, address
.base
, address
.u
.offset
, true, false);
535 void load16(ArmAddress address
, RegisterID dest
)
537 if (address
.type
== ArmAddress::HasIndex
)
538 m_assembler
.ldrh(dest
, address
.base
, address
.u
.index
, address
.u
.scale
);
539 else if (address
.u
.offset
>= 0) {
540 ARMThumbImmediate armImm
= ARMThumbImmediate::makeUInt12(address
.u
.offset
);
541 ASSERT(armImm
.isValid());
542 m_assembler
.ldrh(dest
, address
.base
, armImm
);
544 ASSERT(address
.u
.offset
>= -255);
545 m_assembler
.ldrh(dest
, address
.base
, address
.u
.offset
, true, false);
549 void load16Signed(ArmAddress address
, RegisterID dest
)
551 ASSERT(address
.type
== ArmAddress::HasIndex
);
552 m_assembler
.ldrsh(dest
, address
.base
, address
.u
.index
, address
.u
.scale
);
555 void load8(ArmAddress address
, RegisterID dest
)
557 if (address
.type
== ArmAddress::HasIndex
)
558 m_assembler
.ldrb(dest
, address
.base
, address
.u
.index
, address
.u
.scale
);
559 else if (address
.u
.offset
>= 0) {
560 ARMThumbImmediate armImm
= ARMThumbImmediate::makeUInt12(address
.u
.offset
);
561 ASSERT(armImm
.isValid());
562 m_assembler
.ldrb(dest
, address
.base
, armImm
);
564 ASSERT(address
.u
.offset
>= -255);
565 m_assembler
.ldrb(dest
, address
.base
, address
.u
.offset
, true, false);
569 void load8Signed(ArmAddress address
, RegisterID dest
)
571 ASSERT(address
.type
== ArmAddress::HasIndex
);
572 m_assembler
.ldrsb(dest
, address
.base
, address
.u
.index
, address
.u
.scale
);
576 void store32(RegisterID src
, ArmAddress address
)
578 if (address
.type
== ArmAddress::HasIndex
)
579 m_assembler
.str(src
, address
.base
, address
.u
.index
, address
.u
.scale
);
580 else if (address
.u
.offset
>= 0) {
581 ARMThumbImmediate armImm
= ARMThumbImmediate::makeUInt12(address
.u
.offset
);
582 ASSERT(armImm
.isValid());
583 m_assembler
.str(src
, address
.base
, armImm
);
585 ASSERT(address
.u
.offset
>= -255);
586 m_assembler
.str(src
, address
.base
, address
.u
.offset
, true, false);
591 void store8(RegisterID src
, ArmAddress address
)
593 if (address
.type
== ArmAddress::HasIndex
)
594 m_assembler
.strb(src
, address
.base
, address
.u
.index
, address
.u
.scale
);
595 else if (address
.u
.offset
>= 0) {
596 ARMThumbImmediate armImm
= ARMThumbImmediate::makeUInt12(address
.u
.offset
);
597 ASSERT(armImm
.isValid());
598 m_assembler
.strb(src
, address
.base
, armImm
);
600 ASSERT(address
.u
.offset
>= -255);
601 m_assembler
.strb(src
, address
.base
, address
.u
.offset
, true, false);
605 void store16(RegisterID src
, ArmAddress address
)
607 if (address
.type
== ArmAddress::HasIndex
)
608 m_assembler
.strh(src
, address
.base
, address
.u
.index
, address
.u
.scale
);
609 else if (address
.u
.offset
>= 0) {
610 ARMThumbImmediate armImm
= ARMThumbImmediate::makeUInt12(address
.u
.offset
);
611 ASSERT(armImm
.isValid());
612 m_assembler
.strh(src
, address
.base
, armImm
);
614 ASSERT(address
.u
.offset
>= -255);
615 m_assembler
.strh(src
, address
.base
, address
.u
.offset
, true, false);
620 void load32(ImplicitAddress address
, RegisterID dest
)
622 load32(setupArmAddress(address
), dest
);
625 void load32(BaseIndex address
, RegisterID dest
)
627 load32(setupArmAddress(address
), dest
);
630 void load32WithUnalignedHalfWords(BaseIndex address
, RegisterID dest
)
632 load32(setupArmAddress(address
), dest
);
635 void load16Unaligned(BaseIndex address
, RegisterID dest
)
637 load16(setupArmAddress(address
), dest
);
640 void load32(const void* address
, RegisterID dest
)
642 move(TrustedImmPtr(address
), addressTempRegister
);
643 m_assembler
.ldr(dest
, addressTempRegister
, ARMThumbImmediate::makeUInt16(0));
646 void abortWithReason(AbortReason reason
)
648 move(TrustedImm32(reason
), dataTempRegister
);
652 void abortWithReason(AbortReason reason
, intptr_t misc
)
654 move(TrustedImm32(misc
), addressTempRegister
);
655 abortWithReason(reason
);
658 ConvertibleLoadLabel
convertibleLoadPtr(Address address
, RegisterID dest
)
660 ConvertibleLoadLabel
result(this);
661 ASSERT(address
.offset
>= 0 && address
.offset
<= 255);
662 m_assembler
.ldrWide8BitImmediate(dest
, address
.base
, address
.offset
);
666 void load8(ImplicitAddress address
, RegisterID dest
)
668 load8(setupArmAddress(address
), dest
);
671 void load8Signed(ImplicitAddress
, RegisterID
)
673 UNREACHABLE_FOR_PLATFORM();
676 void load8(BaseIndex address
, RegisterID dest
)
678 load8(setupArmAddress(address
), dest
);
681 void load8Signed(BaseIndex address
, RegisterID dest
)
683 load8Signed(setupArmAddress(address
), dest
);
686 void load8(const void* address
, RegisterID dest
)
688 move(TrustedImmPtr(address
), dest
);
692 DataLabel32
load32WithAddressOffsetPatch(Address address
, RegisterID dest
)
694 DataLabel32 label
= moveWithPatch(TrustedImm32(address
.offset
), dataTempRegister
);
695 load32(ArmAddress(address
.base
, dataTempRegister
), dest
);
699 DataLabelCompact
load32WithCompactAddressOffsetPatch(Address address
, RegisterID dest
)
703 RegisterID base
= address
.base
;
705 DataLabelCompact
label(this);
706 ASSERT(isCompactPtrAlignedAddressOffset(address
.offset
));
708 m_assembler
.ldr(dest
, base
, address
.offset
, true, false);
712 void load16(BaseIndex address
, RegisterID dest
)
714 m_assembler
.ldrh(dest
, makeBaseIndexBase(address
), address
.index
, address
.scale
);
717 void load16Signed(BaseIndex address
, RegisterID dest
)
719 load16Signed(setupArmAddress(address
), dest
);
722 void load16(ImplicitAddress address
, RegisterID dest
)
724 ARMThumbImmediate armImm
= ARMThumbImmediate::makeUInt12(address
.offset
);
725 if (armImm
.isValid())
726 m_assembler
.ldrh(dest
, address
.base
, armImm
);
728 move(TrustedImm32(address
.offset
), dataTempRegister
);
729 m_assembler
.ldrh(dest
, address
.base
, dataTempRegister
);
733 void load16Signed(ImplicitAddress
, RegisterID
)
735 UNREACHABLE_FOR_PLATFORM();
738 DataLabel32
store32WithAddressOffsetPatch(RegisterID src
, Address address
)
740 DataLabel32 label
= moveWithPatch(TrustedImm32(address
.offset
), dataTempRegister
);
741 store32(src
, ArmAddress(address
.base
, dataTempRegister
));
745 void store32(RegisterID src
, ImplicitAddress address
)
747 store32(src
, setupArmAddress(address
));
750 void store32(RegisterID src
, BaseIndex address
)
752 store32(src
, setupArmAddress(address
));
755 void store32(TrustedImm32 imm
, ImplicitAddress address
)
757 move(imm
, dataTempRegister
);
758 store32(dataTempRegister
, setupArmAddress(address
));
761 void store32(TrustedImm32 imm
, BaseIndex address
)
763 move(imm
, dataTempRegister
);
764 store32(dataTempRegister
, setupArmAddress(address
));
767 void store32(RegisterID src
, const void* address
)
769 move(TrustedImmPtr(address
), addressTempRegister
);
770 m_assembler
.str(src
, addressTempRegister
, ARMThumbImmediate::makeUInt16(0));
773 void store32(TrustedImm32 imm
, const void* address
)
775 move(imm
, dataTempRegister
);
776 store32(dataTempRegister
, address
);
779 void store8(RegisterID src
, Address address
)
781 store8(src
, setupArmAddress(address
));
784 void store8(RegisterID src
, BaseIndex address
)
786 store8(src
, setupArmAddress(address
));
789 void store8(RegisterID src
, void* address
)
791 move(TrustedImmPtr(address
), addressTempRegister
);
792 store8(src
, ArmAddress(addressTempRegister
, 0));
795 void store8(TrustedImm32 imm
, void* address
)
797 move(imm
, dataTempRegister
);
798 store8(dataTempRegister
, address
);
801 void store8(TrustedImm32 imm
, Address address
)
803 move(imm
, dataTempRegister
);
804 store8(dataTempRegister
, address
);
807 void store16(RegisterID src
, BaseIndex address
)
809 store16(src
, setupArmAddress(address
));
812 // Possibly clobbers src, but not on this architecture.
813 void moveDoubleToInts(FPRegisterID src
, RegisterID dest1
, RegisterID dest2
)
815 m_assembler
.vmov(dest1
, dest2
, src
);
818 void moveIntsToDouble(RegisterID src1
, RegisterID src2
, FPRegisterID dest
, FPRegisterID scratch
)
820 UNUSED_PARAM(scratch
);
821 m_assembler
.vmov(dest
, src1
, src2
);
824 static bool shouldBlindForSpecificArch(uint32_t value
)
826 ARMThumbImmediate immediate
= ARMThumbImmediate::makeEncodedImm(value
);
828 // Couldn't be encoded as an immediate, so assume it's untrusted.
829 if (!immediate
.isValid())
832 // If we can encode the immediate, we have less than 16 attacker
834 if (immediate
.isEncodedImm())
837 // Don't let any more than 12 bits of an instruction word
838 // be controlled by an attacker.
839 return !immediate
.isUInt12();
842 // Floating-point operations:
844 static bool supportsFloatingPoint() { return true; }
845 static bool supportsFloatingPointTruncate() { return true; }
846 static bool supportsFloatingPointSqrt() { return true; }
847 static bool supportsFloatingPointAbs() { return true; }
849 void loadDouble(ImplicitAddress address
, FPRegisterID dest
)
851 RegisterID base
= address
.base
;
852 int32_t offset
= address
.offset
;
854 // Arm vfp addresses can be offset by a 9-bit ones-comp immediate, left shifted by 2.
855 if ((offset
& 3) || (offset
> (255 * 4)) || (offset
< -(255 * 4))) {
856 add32(TrustedImm32(offset
), base
, addressTempRegister
);
857 base
= addressTempRegister
;
861 m_assembler
.vldr(dest
, base
, offset
);
864 void loadFloat(ImplicitAddress address
, FPRegisterID dest
)
866 RegisterID base
= address
.base
;
867 int32_t offset
= address
.offset
;
869 // Arm vfp addresses can be offset by a 9-bit ones-comp immediate, left shifted by 2.
870 if ((offset
& 3) || (offset
> (255 * 4)) || (offset
< -(255 * 4))) {
871 add32(TrustedImm32(offset
), base
, addressTempRegister
);
872 base
= addressTempRegister
;
876 m_assembler
.flds(ARMRegisters::asSingle(dest
), base
, offset
);
879 void loadDouble(BaseIndex address
, FPRegisterID dest
)
881 move(address
.index
, addressTempRegister
);
882 lshift32(TrustedImm32(address
.scale
), addressTempRegister
);
883 add32(address
.base
, addressTempRegister
);
884 loadDouble(Address(addressTempRegister
, address
.offset
), dest
);
887 void loadFloat(BaseIndex address
, FPRegisterID dest
)
889 move(address
.index
, addressTempRegister
);
890 lshift32(TrustedImm32(address
.scale
), addressTempRegister
);
891 add32(address
.base
, addressTempRegister
);
892 loadFloat(Address(addressTempRegister
, address
.offset
), dest
);
895 void moveDouble(FPRegisterID src
, FPRegisterID dest
)
898 m_assembler
.vmov(dest
, src
);
901 void loadDouble(TrustedImmPtr address
, FPRegisterID dest
)
903 move(address
, addressTempRegister
);
904 m_assembler
.vldr(dest
, addressTempRegister
, 0);
907 void storeDouble(FPRegisterID src
, ImplicitAddress address
)
909 RegisterID base
= address
.base
;
910 int32_t offset
= address
.offset
;
912 // Arm vfp addresses can be offset by a 9-bit ones-comp immediate, left shifted by 2.
913 if ((offset
& 3) || (offset
> (255 * 4)) || (offset
< -(255 * 4))) {
914 add32(TrustedImm32(offset
), base
, addressTempRegister
);
915 base
= addressTempRegister
;
919 m_assembler
.vstr(src
, base
, offset
);
922 void storeFloat(FPRegisterID src
, ImplicitAddress address
)
924 RegisterID base
= address
.base
;
925 int32_t offset
= address
.offset
;
927 // Arm vfp addresses can be offset by a 9-bit ones-comp immediate, left shifted by 2.
928 if ((offset
& 3) || (offset
> (255 * 4)) || (offset
< -(255 * 4))) {
929 add32(TrustedImm32(offset
), base
, addressTempRegister
);
930 base
= addressTempRegister
;
934 m_assembler
.fsts(ARMRegisters::asSingle(src
), base
, offset
);
937 void storeDouble(FPRegisterID src
, TrustedImmPtr address
)
939 move(address
, addressTempRegister
);
940 storeDouble(src
, addressTempRegister
);
943 void storeDouble(FPRegisterID src
, BaseIndex address
)
945 move(address
.index
, addressTempRegister
);
946 lshift32(TrustedImm32(address
.scale
), addressTempRegister
);
947 add32(address
.base
, addressTempRegister
);
948 storeDouble(src
, Address(addressTempRegister
, address
.offset
));
951 void storeFloat(FPRegisterID src
, BaseIndex address
)
953 move(address
.index
, addressTempRegister
);
954 lshift32(TrustedImm32(address
.scale
), addressTempRegister
);
955 add32(address
.base
, addressTempRegister
);
956 storeFloat(src
, Address(addressTempRegister
, address
.offset
));
959 void addDouble(FPRegisterID src
, FPRegisterID dest
)
961 m_assembler
.vadd(dest
, dest
, src
);
964 void addDouble(Address src
, FPRegisterID dest
)
966 loadDouble(src
, fpTempRegister
);
967 addDouble(fpTempRegister
, dest
);
970 void addDouble(FPRegisterID op1
, FPRegisterID op2
, FPRegisterID dest
)
972 m_assembler
.vadd(dest
, op1
, op2
);
975 void addDouble(AbsoluteAddress address
, FPRegisterID dest
)
977 loadDouble(TrustedImmPtr(address
.m_ptr
), fpTempRegister
);
978 m_assembler
.vadd(dest
, dest
, fpTempRegister
);
981 void divDouble(FPRegisterID src
, FPRegisterID dest
)
983 m_assembler
.vdiv(dest
, dest
, src
);
986 void divDouble(FPRegisterID op1
, FPRegisterID op2
, FPRegisterID dest
)
988 m_assembler
.vdiv(dest
, op1
, op2
);
991 void subDouble(FPRegisterID src
, FPRegisterID dest
)
993 m_assembler
.vsub(dest
, dest
, src
);
996 void subDouble(Address src
, FPRegisterID dest
)
998 loadDouble(src
, fpTempRegister
);
999 subDouble(fpTempRegister
, dest
);
1002 void subDouble(FPRegisterID op1
, FPRegisterID op2
, FPRegisterID dest
)
1004 m_assembler
.vsub(dest
, op1
, op2
);
1007 void mulDouble(FPRegisterID src
, FPRegisterID dest
)
1009 m_assembler
.vmul(dest
, dest
, src
);
1012 void mulDouble(Address src
, FPRegisterID dest
)
1014 loadDouble(src
, fpTempRegister
);
1015 mulDouble(fpTempRegister
, dest
);
1018 void mulDouble(FPRegisterID op1
, FPRegisterID op2
, FPRegisterID dest
)
1020 m_assembler
.vmul(dest
, op1
, op2
);
1023 void sqrtDouble(FPRegisterID src
, FPRegisterID dest
)
1025 m_assembler
.vsqrt(dest
, src
);
1028 void absDouble(FPRegisterID src
, FPRegisterID dest
)
1030 m_assembler
.vabs(dest
, src
);
1033 void negateDouble(FPRegisterID src
, FPRegisterID dest
)
1035 m_assembler
.vneg(dest
, src
);
1038 void convertInt32ToDouble(RegisterID src
, FPRegisterID dest
)
1040 m_assembler
.vmov(fpTempRegister
, src
, src
);
1041 m_assembler
.vcvt_signedToFloatingPoint(dest
, fpTempRegisterAsSingle());
1044 void convertInt32ToDouble(Address address
, FPRegisterID dest
)
1046 // Fixme: load directly into the fpr!
1047 load32(address
, dataTempRegister
);
1048 m_assembler
.vmov(fpTempRegister
, dataTempRegister
, dataTempRegister
);
1049 m_assembler
.vcvt_signedToFloatingPoint(dest
, fpTempRegisterAsSingle());
1052 void convertInt32ToDouble(AbsoluteAddress address
, FPRegisterID dest
)
1054 // Fixme: load directly into the fpr!
1055 load32(address
.m_ptr
, dataTempRegister
);
1056 m_assembler
.vmov(fpTempRegister
, dataTempRegister
, dataTempRegister
);
1057 m_assembler
.vcvt_signedToFloatingPoint(dest
, fpTempRegisterAsSingle());
1060 void convertFloatToDouble(FPRegisterID src
, FPRegisterID dst
)
1062 m_assembler
.vcvtds(dst
, ARMRegisters::asSingle(src
));
1065 void convertDoubleToFloat(FPRegisterID src
, FPRegisterID dst
)
1067 m_assembler
.vcvtsd(ARMRegisters::asSingle(dst
), src
);
1070 Jump
branchDouble(DoubleCondition cond
, FPRegisterID left
, FPRegisterID right
)
1072 m_assembler
.vcmp(left
, right
);
1075 if (cond
== DoubleNotEqual
) {
1076 // ConditionNE jumps if NotEqual *or* unordered - force the unordered cases not to jump.
1077 Jump unordered
= makeBranch(ARMv7Assembler::ConditionVS
);
1078 Jump result
= makeBranch(ARMv7Assembler::ConditionNE
);
1079 unordered
.link(this);
1082 if (cond
== DoubleEqualOrUnordered
) {
1083 Jump unordered
= makeBranch(ARMv7Assembler::ConditionVS
);
1084 Jump notEqual
= makeBranch(ARMv7Assembler::ConditionNE
);
1085 unordered
.link(this);
1086 // We get here if either unordered or equal.
1087 Jump result
= jump();
1088 notEqual
.link(this);
1091 return makeBranch(cond
);
1094 enum BranchTruncateType
{ BranchIfTruncateFailed
, BranchIfTruncateSuccessful
};
1095 Jump
branchTruncateDoubleToInt32(FPRegisterID src
, RegisterID dest
, BranchTruncateType branchType
= BranchIfTruncateFailed
)
1097 // Convert into dest.
1098 m_assembler
.vcvt_floatingPointToSigned(fpTempRegisterAsSingle(), src
);
1099 m_assembler
.vmov(dest
, fpTempRegisterAsSingle());
1101 // Calculate 2x dest. If the value potentially underflowed, it will have
1102 // clamped to 0x80000000, so 2x dest is zero in this case. In the case of
1103 // overflow the result will be equal to -2.
1104 Jump underflow
= branchAdd32(Zero
, dest
, dest
, dataTempRegister
);
1105 Jump noOverflow
= branch32(NotEqual
, dataTempRegister
, TrustedImm32(-2));
1107 // For BranchIfTruncateSuccessful, we branch if 'noOverflow' jumps.
1108 underflow
.link(this);
1109 if (branchType
== BranchIfTruncateSuccessful
)
1112 // We'll reach the current point in the code on failure, so plant a
1113 // jump here & link the success case.
1114 Jump failure
= jump();
1115 noOverflow
.link(this);
1119 // Result is undefined if the value is outside of the integer range.
1120 void truncateDoubleToInt32(FPRegisterID src
, RegisterID dest
)
1122 m_assembler
.vcvt_floatingPointToSigned(fpTempRegisterAsSingle(), src
);
1123 m_assembler
.vmov(dest
, fpTempRegisterAsSingle());
1126 void truncateDoubleToUint32(FPRegisterID src
, RegisterID dest
)
1128 m_assembler
.vcvt_floatingPointToUnsigned(fpTempRegisterAsSingle(), src
);
1129 m_assembler
.vmov(dest
, fpTempRegisterAsSingle());
1132 // Convert 'src' to an integer, and places the resulting 'dest'.
1133 // If the result is not representable as a 32 bit value, branch.
1134 // May also branch for some values that are representable in 32 bits
1135 // (specifically, in this case, 0).
1136 void branchConvertDoubleToInt32(FPRegisterID src
, RegisterID dest
, JumpList
& failureCases
, FPRegisterID
, bool negZeroCheck
= true)
1138 m_assembler
.vcvt_floatingPointToSigned(fpTempRegisterAsSingle(), src
);
1139 m_assembler
.vmov(dest
, fpTempRegisterAsSingle());
1141 // Convert the integer result back to float & compare to the original value - if not equal or unordered (NaN) then jump.
1142 m_assembler
.vcvt_signedToFloatingPoint(fpTempRegister
, fpTempRegisterAsSingle());
1143 failureCases
.append(branchDouble(DoubleNotEqualOrUnordered
, src
, fpTempRegister
));
1145 // If the result is zero, it might have been -0.0, and the double comparison won't catch this!
1147 failureCases
.append(branchTest32(Zero
, dest
));
1150 Jump
branchDoubleNonZero(FPRegisterID reg
, FPRegisterID
)
1152 m_assembler
.vcmpz(reg
);
1154 Jump unordered
= makeBranch(ARMv7Assembler::ConditionVS
);
1155 Jump result
= makeBranch(ARMv7Assembler::ConditionNE
);
1156 unordered
.link(this);
1160 Jump
branchDoubleZeroOrNaN(FPRegisterID reg
, FPRegisterID
)
1162 m_assembler
.vcmpz(reg
);
1164 Jump unordered
= makeBranch(ARMv7Assembler::ConditionVS
);
1165 Jump notEqual
= makeBranch(ARMv7Assembler::ConditionNE
);
1166 unordered
.link(this);
1167 // We get here if either unordered or equal.
1168 Jump result
= jump();
1169 notEqual
.link(this);
1173 // Stack manipulation operations:
1175 // The ABI is assumed to provide a stack abstraction to memory,
1176 // containing machine word sized units of data. Push and pop
1177 // operations add and remove a single register sized unit of data
1178 // to or from the stack. Peek and poke operations read or write
1179 // values on the stack, without moving the current stack position.
1181 void pop(RegisterID dest
)
1183 m_assembler
.pop(dest
);
1186 void push(RegisterID src
)
1188 m_assembler
.push(src
);
1191 void push(Address address
)
1193 load32(address
, dataTempRegister
);
1194 push(dataTempRegister
);
1197 void push(TrustedImm32 imm
)
1199 move(imm
, dataTempRegister
);
1200 push(dataTempRegister
);
1203 void popPair(RegisterID dest1
, RegisterID dest2
)
1205 m_assembler
.pop(1 << dest1
| 1 << dest2
);
1208 void pushPair(RegisterID src1
, RegisterID src2
)
1210 m_assembler
.push(1 << src1
| 1 << src2
);
1213 // Register move operations:
1215 // Move values in registers.
1217 void move(TrustedImm32 imm
, RegisterID dest
)
1219 uint32_t value
= imm
.m_value
;
1221 ARMThumbImmediate armImm
= ARMThumbImmediate::makeEncodedImm(value
);
1223 if (armImm
.isValid())
1224 m_assembler
.mov(dest
, armImm
);
1225 else if ((armImm
= ARMThumbImmediate::makeEncodedImm(~value
)).isValid())
1226 m_assembler
.mvn(dest
, armImm
);
1228 m_assembler
.mov(dest
, ARMThumbImmediate::makeUInt16(value
));
1229 if (value
& 0xffff0000)
1230 m_assembler
.movt(dest
, ARMThumbImmediate::makeUInt16(value
>> 16));
1234 void move(RegisterID src
, RegisterID dest
)
1237 m_assembler
.mov(dest
, src
);
1240 void move(TrustedImmPtr imm
, RegisterID dest
)
1242 move(TrustedImm32(imm
), dest
);
1245 void swap(RegisterID reg1
, RegisterID reg2
)
1247 move(reg1
, dataTempRegister
);
1249 move(dataTempRegister
, reg2
);
1252 void signExtend32ToPtr(RegisterID src
, RegisterID dest
)
1257 void zeroExtend32ToPtr(RegisterID src
, RegisterID dest
)
1262 // Invert a relational condition, e.g. == becomes !=, < becomes >=, etc.
1263 static RelationalCondition
invert(RelationalCondition cond
)
1265 return static_cast<RelationalCondition
>(cond
^ 1);
1275 m_assembler
.dmbSY();
1278 static void replaceWithJump(CodeLocationLabel instructionStart
, CodeLocationLabel destination
)
1280 ARMv7Assembler::replaceWithJump(instructionStart
.dataLocation(), destination
.dataLocation());
1283 static ptrdiff_t maxJumpReplacementSize()
1285 return ARMv7Assembler::maxJumpReplacementSize();
1288 // Forwards / external control flow operations:
1290 // This set of jump and conditional branch operations return a Jump
1291 // object which may linked at a later point, allow forwards jump,
1292 // or jumps that will require external linkage (after the code has been
1295 // For branches, signed <, >, <= and >= are denoted as l, g, le, and ge
1296 // respecitvely, for unsigned comparisons the names b, a, be, and ae are
1297 // used (representing the names 'below' and 'above').
1299 // Operands to the comparision are provided in the expected order, e.g.
1300 // jle32(reg1, TrustedImm32(5)) will branch if the value held in reg1, when
1301 // treated as a signed 32bit value, is less than or equal to 5.
1303 // jz and jnz test whether the first operand is equal to zero, and take
1304 // an optional second operand of a mask under which to perform the test.
1307 // Should we be using TEQ for equal/not-equal?
1308 void compare32(RegisterID left
, TrustedImm32 right
)
1310 int32_t imm
= right
.m_value
;
1311 ARMThumbImmediate armImm
= ARMThumbImmediate::makeEncodedImm(imm
);
1312 if (armImm
.isValid())
1313 m_assembler
.cmp(left
, armImm
);
1314 else if ((armImm
= ARMThumbImmediate::makeEncodedImm(-imm
)).isValid())
1315 m_assembler
.cmn(left
, armImm
);
1317 move(TrustedImm32(imm
), dataTempRegister
);
1318 m_assembler
.cmp(left
, dataTempRegister
);
1322 void test32(RegisterID reg
, TrustedImm32 mask
)
1324 int32_t imm
= mask
.m_value
;
1327 m_assembler
.tst(reg
, reg
);
1329 ARMThumbImmediate armImm
= ARMThumbImmediate::makeEncodedImm(imm
);
1330 if (armImm
.isValid()) {
1331 if (reg
== ARMRegisters::sp
) {
1332 move(reg
, addressTempRegister
);
1333 m_assembler
.tst(addressTempRegister
, armImm
);
1335 m_assembler
.tst(reg
, armImm
);
1337 move(mask
, dataTempRegister
);
1338 if (reg
== ARMRegisters::sp
) {
1339 move(reg
, addressTempRegister
);
1340 m_assembler
.tst(addressTempRegister
, dataTempRegister
);
1342 m_assembler
.tst(reg
, dataTempRegister
);
1348 void test32(ResultCondition
, RegisterID reg
, TrustedImm32 mask
)
1353 Jump
branch(ResultCondition cond
)
1355 return Jump(makeBranch(cond
));
1358 Jump
branch32(RelationalCondition cond
, RegisterID left
, RegisterID right
)
1360 m_assembler
.cmp(left
, right
);
1361 return Jump(makeBranch(cond
));
1364 Jump
branch32(RelationalCondition cond
, RegisterID left
, TrustedImm32 right
)
1366 compare32(left
, right
);
1367 return Jump(makeBranch(cond
));
1370 Jump
branch32(RelationalCondition cond
, RegisterID left
, Address right
)
1372 load32(right
, dataTempRegister
);
1373 return branch32(cond
, left
, dataTempRegister
);
1376 Jump
branch32(RelationalCondition cond
, Address left
, RegisterID right
)
1378 load32(left
, dataTempRegister
);
1379 return branch32(cond
, dataTempRegister
, right
);
1382 Jump
branch32(RelationalCondition cond
, Address left
, TrustedImm32 right
)
1384 // use addressTempRegister incase the branch32 we call uses dataTempRegister. :-/
1385 load32(left
, addressTempRegister
);
1386 return branch32(cond
, addressTempRegister
, right
);
1389 Jump
branch32(RelationalCondition cond
, BaseIndex left
, TrustedImm32 right
)
1391 // use addressTempRegister incase the branch32 we call uses dataTempRegister. :-/
1392 load32(left
, addressTempRegister
);
1393 return branch32(cond
, addressTempRegister
, right
);
1396 Jump
branch32WithUnalignedHalfWords(RelationalCondition cond
, BaseIndex left
, TrustedImm32 right
)
1398 // use addressTempRegister incase the branch32 we call uses dataTempRegister. :-/
1399 load32WithUnalignedHalfWords(left
, addressTempRegister
);
1400 return branch32(cond
, addressTempRegister
, right
);
1403 Jump
branch32(RelationalCondition cond
, AbsoluteAddress left
, RegisterID right
)
1405 load32(left
.m_ptr
, dataTempRegister
);
1406 return branch32(cond
, dataTempRegister
, right
);
1409 Jump
branch32(RelationalCondition cond
, AbsoluteAddress left
, TrustedImm32 right
)
1411 // use addressTempRegister incase the branch32 we call uses dataTempRegister. :-/
1412 load32(left
.m_ptr
, addressTempRegister
);
1413 return branch32(cond
, addressTempRegister
, right
);
1416 Jump
branchPtr(RelationalCondition cond
, BaseIndex left
, RegisterID right
)
1418 load32(left
, dataTempRegister
);
1419 return branch32(cond
, dataTempRegister
, right
);
1422 Jump
branch8(RelationalCondition cond
, RegisterID left
, TrustedImm32 right
)
1424 compare32(left
, right
);
1425 return Jump(makeBranch(cond
));
1428 Jump
branch8(RelationalCondition cond
, Address left
, TrustedImm32 right
)
1430 ASSERT(!(0xffffff00 & right
.m_value
));
1431 // use addressTempRegister incase the branch8 we call uses dataTempRegister. :-/
1432 load8(left
, addressTempRegister
);
1433 return branch8(cond
, addressTempRegister
, right
);
1436 Jump
branch8(RelationalCondition cond
, BaseIndex left
, TrustedImm32 right
)
1438 ASSERT(!(0xffffff00 & right
.m_value
));
1439 // use addressTempRegister incase the branch32 we call uses dataTempRegister. :-/
1440 load8(left
, addressTempRegister
);
1441 return branch32(cond
, addressTempRegister
, right
);
1444 Jump
branch8(RelationalCondition cond
, AbsoluteAddress address
, TrustedImm32 right
)
1446 // Use addressTempRegister instead of dataTempRegister, since branch32 uses dataTempRegister.
1447 move(TrustedImmPtr(address
.m_ptr
), addressTempRegister
);
1448 load8(Address(addressTempRegister
), addressTempRegister
);
1449 return branch32(cond
, addressTempRegister
, right
);
1452 Jump
branchTest32(ResultCondition cond
, RegisterID reg
, RegisterID mask
)
1454 m_assembler
.tst(reg
, mask
);
1455 return Jump(makeBranch(cond
));
1458 Jump
branchTest32(ResultCondition cond
, RegisterID reg
, TrustedImm32 mask
= TrustedImm32(-1))
1461 return Jump(makeBranch(cond
));
1464 Jump
branchTest32(ResultCondition cond
, Address address
, TrustedImm32 mask
= TrustedImm32(-1))
1466 // use addressTempRegister incase the branchTest32 we call uses dataTempRegister. :-/
1467 load32(address
, addressTempRegister
);
1468 return branchTest32(cond
, addressTempRegister
, mask
);
1471 Jump
branchTest32(ResultCondition cond
, BaseIndex address
, TrustedImm32 mask
= TrustedImm32(-1))
1473 // use addressTempRegister incase the branchTest32 we call uses dataTempRegister. :-/
1474 load32(address
, addressTempRegister
);
1475 return branchTest32(cond
, addressTempRegister
, mask
);
1478 Jump
branchTest8(ResultCondition cond
, BaseIndex address
, TrustedImm32 mask
= TrustedImm32(-1))
1480 // use addressTempRegister incase the branchTest8 we call uses dataTempRegister. :-/
1481 load8(address
, addressTempRegister
);
1482 return branchTest32(cond
, addressTempRegister
, mask
);
1485 Jump
branchTest8(ResultCondition cond
, Address address
, TrustedImm32 mask
= TrustedImm32(-1))
1487 // use addressTempRegister incase the branchTest8 we call uses dataTempRegister. :-/
1488 load8(address
, addressTempRegister
);
1489 return branchTest32(cond
, addressTempRegister
, mask
);
1492 Jump
branchTest8(ResultCondition cond
, AbsoluteAddress address
, TrustedImm32 mask
= TrustedImm32(-1))
1494 // use addressTempRegister incase the branchTest8 we call uses dataTempRegister. :-/
1495 move(TrustedImmPtr(address
.m_ptr
), addressTempRegister
);
1496 load8(Address(addressTempRegister
), addressTempRegister
);
1497 return branchTest32(cond
, addressTempRegister
, mask
);
1500 void jump(RegisterID target
)
1502 m_assembler
.bx(target
);
1505 // Address is a memory location containing the address to jump to
1506 void jump(Address address
)
1508 load32(address
, dataTempRegister
);
1509 m_assembler
.bx(dataTempRegister
);
1512 void jump(AbsoluteAddress address
)
1514 move(TrustedImmPtr(address
.m_ptr
), dataTempRegister
);
1515 load32(Address(dataTempRegister
), dataTempRegister
);
1516 m_assembler
.bx(dataTempRegister
);
1520 // Arithmetic control flow operations:
1522 // This set of conditional branch operations branch based
1523 // on the result of an arithmetic operation. The operation
1524 // is performed as normal, storing the result.
1526 // * jz operations branch if the result is zero.
1527 // * jo operations branch if the (signed) arithmetic
1528 // operation caused an overflow to occur.
1530 Jump
branchAdd32(ResultCondition cond
, RegisterID op1
, RegisterID op2
, RegisterID dest
)
1532 m_assembler
.add_S(dest
, op1
, op2
);
1533 return Jump(makeBranch(cond
));
1536 Jump
branchAdd32(ResultCondition cond
, RegisterID op1
, TrustedImm32 imm
, RegisterID dest
)
1538 ARMThumbImmediate armImm
= ARMThumbImmediate::makeEncodedImm(imm
.m_value
);
1539 if (armImm
.isValid())
1540 m_assembler
.add_S(dest
, op1
, armImm
);
1542 move(imm
, dataTempRegister
);
1543 m_assembler
.add_S(dest
, op1
, dataTempRegister
);
1545 return Jump(makeBranch(cond
));
1548 Jump
branchAdd32(ResultCondition cond
, RegisterID src
, RegisterID dest
)
1550 return branchAdd32(cond
, dest
, src
, dest
);
1553 Jump
branchAdd32(ResultCondition cond
, Address src
, RegisterID dest
)
1555 load32(src
, dataTempRegister
);
1556 return branchAdd32(cond
, dest
, dataTempRegister
, dest
);
1559 Jump
branchAdd32(ResultCondition cond
, TrustedImm32 imm
, RegisterID dest
)
1561 return branchAdd32(cond
, dest
, imm
, dest
);
1564 Jump
branchAdd32(ResultCondition cond
, TrustedImm32 imm
, AbsoluteAddress dest
)
1566 // Move the high bits of the address into addressTempRegister,
1567 // and load the value into dataTempRegister.
1568 move(TrustedImmPtr(dest
.m_ptr
), addressTempRegister
);
1569 m_assembler
.ldr(dataTempRegister
, addressTempRegister
, ARMThumbImmediate::makeUInt16(0));
1572 ARMThumbImmediate armImm
= ARMThumbImmediate::makeEncodedImm(imm
.m_value
);
1573 if (armImm
.isValid())
1574 m_assembler
.add_S(dataTempRegister
, dataTempRegister
, armImm
);
1576 // If the operand does not fit into an immediate then load it temporarily
1577 // into addressTempRegister; since we're overwriting addressTempRegister
1578 // we'll need to reload it with the high bits of the address afterwards.
1579 move(imm
, addressTempRegister
);
1580 m_assembler
.add_S(dataTempRegister
, dataTempRegister
, addressTempRegister
);
1581 move(TrustedImmPtr(dest
.m_ptr
), addressTempRegister
);
1584 // Store the result.
1585 m_assembler
.str(dataTempRegister
, addressTempRegister
, ARMThumbImmediate::makeUInt16(0));
1587 return Jump(makeBranch(cond
));
1590 Jump
branchMul32(ResultCondition cond
, RegisterID src1
, RegisterID src2
, RegisterID dest
)
1592 m_assembler
.smull(dest
, dataTempRegister
, src1
, src2
);
1594 if (cond
== Overflow
) {
1595 m_assembler
.asr(addressTempRegister
, dest
, 31);
1596 return branch32(NotEqual
, addressTempRegister
, dataTempRegister
);
1599 return branchTest32(cond
, dest
);
1602 Jump
branchMul32(ResultCondition cond
, RegisterID src
, RegisterID dest
)
1604 return branchMul32(cond
, src
, dest
, dest
);
1607 Jump
branchMul32(ResultCondition cond
, TrustedImm32 imm
, RegisterID src
, RegisterID dest
)
1609 move(imm
, dataTempRegister
);
1610 return branchMul32(cond
, dataTempRegister
, src
, dest
);
1613 Jump
branchNeg32(ResultCondition cond
, RegisterID srcDest
)
1615 ARMThumbImmediate zero
= ARMThumbImmediate::makeUInt12(0);
1616 m_assembler
.sub_S(srcDest
, zero
, srcDest
);
1617 return Jump(makeBranch(cond
));
1620 Jump
branchOr32(ResultCondition cond
, RegisterID src
, RegisterID dest
)
1622 m_assembler
.orr_S(dest
, dest
, src
);
1623 return Jump(makeBranch(cond
));
1626 Jump
branchSub32(ResultCondition cond
, RegisterID op1
, RegisterID op2
, RegisterID dest
)
1628 m_assembler
.sub_S(dest
, op1
, op2
);
1629 return Jump(makeBranch(cond
));
1632 Jump
branchSub32(ResultCondition cond
, RegisterID op1
, TrustedImm32 imm
, RegisterID dest
)
1634 ARMThumbImmediate armImm
= ARMThumbImmediate::makeEncodedImm(imm
.m_value
);
1635 if (armImm
.isValid())
1636 m_assembler
.sub_S(dest
, op1
, armImm
);
1638 move(imm
, dataTempRegister
);
1639 m_assembler
.sub_S(dest
, op1
, dataTempRegister
);
1641 return Jump(makeBranch(cond
));
1644 Jump
branchSub32(ResultCondition cond
, RegisterID src
, RegisterID dest
)
1646 return branchSub32(cond
, dest
, src
, dest
);
1649 Jump
branchSub32(ResultCondition cond
, TrustedImm32 imm
, RegisterID dest
)
1651 return branchSub32(cond
, dest
, imm
, dest
);
1654 void relativeTableJump(RegisterID index
, int scale
)
1656 ASSERT(scale
>= 0 && scale
<= 31);
1658 // dataTempRegister will point after the jump if index register contains zero
1659 move(ARMRegisters::pc
, dataTempRegister
);
1660 m_assembler
.add(dataTempRegister
, dataTempRegister
, ARMThumbImmediate::makeEncodedImm(9));
1662 ShiftTypeAndAmount
shift(SRType_LSL
, scale
);
1663 m_assembler
.add(dataTempRegister
, dataTempRegister
, index
, shift
);
1664 jump(dataTempRegister
);
1667 // Miscellaneous operations:
1669 void breakpoint(uint8_t imm
= 0)
1671 m_assembler
.bkpt(imm
);
1674 ALWAYS_INLINE Call
nearCall()
1676 moveFixedWidthEncoding(TrustedImm32(0), dataTempRegister
);
1677 return Call(m_assembler
.blx(dataTempRegister
), Call::LinkableNear
);
1680 ALWAYS_INLINE Call
call()
1682 moveFixedWidthEncoding(TrustedImm32(0), dataTempRegister
);
1683 return Call(m_assembler
.blx(dataTempRegister
), Call::Linkable
);
1686 ALWAYS_INLINE Call
call(RegisterID target
)
1688 return Call(m_assembler
.blx(target
), Call::None
);
1691 ALWAYS_INLINE Call
call(Address address
)
1693 load32(address
, dataTempRegister
);
1694 return Call(m_assembler
.blx(dataTempRegister
), Call::None
);
1697 ALWAYS_INLINE
void ret()
1699 m_assembler
.bx(linkRegister
);
1702 void compare32(RelationalCondition cond
, RegisterID left
, RegisterID right
, RegisterID dest
)
1704 m_assembler
.cmp(left
, right
);
1705 m_assembler
.it(armV7Condition(cond
), false);
1706 m_assembler
.mov(dest
, ARMThumbImmediate::makeUInt16(1));
1707 m_assembler
.mov(dest
, ARMThumbImmediate::makeUInt16(0));
1710 void compare32(RelationalCondition cond
, Address left
, RegisterID right
, RegisterID dest
)
1712 load32(left
, dataTempRegister
);
1713 compare32(cond
, dataTempRegister
, right
, dest
);
1716 void compare8(RelationalCondition cond
, Address left
, TrustedImm32 right
, RegisterID dest
)
1718 load8(left
, addressTempRegister
);
1719 compare32(cond
, addressTempRegister
, right
, dest
);
1722 void compare32(RelationalCondition cond
, RegisterID left
, TrustedImm32 right
, RegisterID dest
)
1724 compare32(left
, right
);
1725 m_assembler
.it(armV7Condition(cond
), false);
1726 m_assembler
.mov(dest
, ARMThumbImmediate::makeUInt16(1));
1727 m_assembler
.mov(dest
, ARMThumbImmediate::makeUInt16(0));
1731 // The mask should be optional... paerhaps the argument order should be
1732 // dest-src, operations always have a dest? ... possibly not true, considering
1733 // asm ops like test, or pseudo ops like pop().
1734 void test32(ResultCondition cond
, Address address
, TrustedImm32 mask
, RegisterID dest
)
1736 load32(address
, dataTempRegister
);
1737 test32(dataTempRegister
, mask
);
1738 m_assembler
.it(armV7Condition(cond
), false);
1739 m_assembler
.mov(dest
, ARMThumbImmediate::makeUInt16(1));
1740 m_assembler
.mov(dest
, ARMThumbImmediate::makeUInt16(0));
1743 void test8(ResultCondition cond
, Address address
, TrustedImm32 mask
, RegisterID dest
)
1745 load8(address
, dataTempRegister
);
1746 test32(dataTempRegister
, mask
);
1747 m_assembler
.it(armV7Condition(cond
), false);
1748 m_assembler
.mov(dest
, ARMThumbImmediate::makeUInt16(1));
1749 m_assembler
.mov(dest
, ARMThumbImmediate::makeUInt16(0));
1752 ALWAYS_INLINE DataLabel32
moveWithPatch(TrustedImm32 imm
, RegisterID dst
)
1755 moveFixedWidthEncoding(imm
, dst
);
1756 return DataLabel32(this);
1759 ALWAYS_INLINE DataLabelPtr
moveWithPatch(TrustedImmPtr imm
, RegisterID dst
)
1762 moveFixedWidthEncoding(TrustedImm32(imm
), dst
);
1763 return DataLabelPtr(this);
1766 ALWAYS_INLINE Jump
branchPtrWithPatch(RelationalCondition cond
, RegisterID left
, DataLabelPtr
& dataLabel
, TrustedImmPtr initialRightValue
= TrustedImmPtr(0))
1768 dataLabel
= moveWithPatch(initialRightValue
, dataTempRegister
);
1769 return branch32(cond
, left
, dataTempRegister
);
1772 ALWAYS_INLINE Jump
branchPtrWithPatch(RelationalCondition cond
, Address left
, DataLabelPtr
& dataLabel
, TrustedImmPtr initialRightValue
= TrustedImmPtr(0))
1774 load32(left
, addressTempRegister
);
1775 dataLabel
= moveWithPatch(initialRightValue
, dataTempRegister
);
1776 return branch32(cond
, addressTempRegister
, dataTempRegister
);
1779 ALWAYS_INLINE Jump
branch32WithPatch(RelationalCondition cond
, Address left
, DataLabel32
& dataLabel
, TrustedImm32 initialRightValue
= TrustedImm32(0))
1781 load32(left
, addressTempRegister
);
1782 dataLabel
= moveWithPatch(initialRightValue
, dataTempRegister
);
1783 return branch32(cond
, addressTempRegister
, dataTempRegister
);
1786 PatchableJump
patchableBranchPtr(RelationalCondition cond
, Address left
, TrustedImmPtr right
= TrustedImmPtr(0))
1788 m_makeJumpPatchable
= true;
1789 Jump result
= branch32(cond
, left
, TrustedImm32(right
));
1790 m_makeJumpPatchable
= false;
1791 return PatchableJump(result
);
1794 PatchableJump
patchableBranchTest32(ResultCondition cond
, RegisterID reg
, TrustedImm32 mask
= TrustedImm32(-1))
1796 m_makeJumpPatchable
= true;
1797 Jump result
= branchTest32(cond
, reg
, mask
);
1798 m_makeJumpPatchable
= false;
1799 return PatchableJump(result
);
1802 PatchableJump
patchableBranch32(RelationalCondition cond
, RegisterID reg
, TrustedImm32 imm
)
1804 m_makeJumpPatchable
= true;
1805 Jump result
= branch32(cond
, reg
, imm
);
1806 m_makeJumpPatchable
= false;
1807 return PatchableJump(result
);
1810 PatchableJump
patchableBranchPtrWithPatch(RelationalCondition cond
, Address left
, DataLabelPtr
& dataLabel
, TrustedImmPtr initialRightValue
= TrustedImmPtr(0))
1812 m_makeJumpPatchable
= true;
1813 Jump result
= branchPtrWithPatch(cond
, left
, dataLabel
, initialRightValue
);
1814 m_makeJumpPatchable
= false;
1815 return PatchableJump(result
);
1818 PatchableJump
patchableBranch32WithPatch(RelationalCondition cond
, Address left
, DataLabel32
& dataLabel
, TrustedImm32 initialRightValue
= TrustedImm32(0))
1820 m_makeJumpPatchable
= true;
1821 Jump result
= branch32WithPatch(cond
, left
, dataLabel
, initialRightValue
);
1822 m_makeJumpPatchable
= false;
1823 return PatchableJump(result
);
1826 PatchableJump
patchableJump()
1829 m_makeJumpPatchable
= true;
1830 Jump result
= jump();
1831 m_makeJumpPatchable
= false;
1832 return PatchableJump(result
);
1835 ALWAYS_INLINE DataLabelPtr
storePtrWithPatch(TrustedImmPtr initialValue
, ImplicitAddress address
)
1837 DataLabelPtr label
= moveWithPatch(initialValue
, dataTempRegister
);
1838 store32(dataTempRegister
, address
);
1841 ALWAYS_INLINE DataLabelPtr
storePtrWithPatch(ImplicitAddress address
) { return storePtrWithPatch(TrustedImmPtr(0), address
); }
1844 ALWAYS_INLINE Call
tailRecursiveCall()
1846 // Like a normal call, but don't link.
1847 moveFixedWidthEncoding(TrustedImm32(0), dataTempRegister
);
1848 return Call(m_assembler
.bx(dataTempRegister
), Call::Linkable
);
1851 ALWAYS_INLINE Call
makeTailRecursiveCall(Jump oldJump
)
1854 return tailRecursiveCall();
1858 static FunctionPtr
readCallTarget(CodeLocationCall call
)
1860 return FunctionPtr(reinterpret_cast<void(*)()>(ARMv7Assembler::readCallTarget(call
.dataLocation())));
1863 static bool canJumpReplacePatchableBranchPtrWithPatch() { return false; }
1864 static bool canJumpReplacePatchableBranch32WithPatch() { return false; }
1866 static CodeLocationLabel
startOfBranchPtrWithPatchOnRegister(CodeLocationDataLabelPtr label
)
1868 const unsigned twoWordOpSize
= 4;
1869 return label
.labelAtOffset(-twoWordOpSize
* 2);
1872 static void revertJumpReplacementToBranchPtrWithPatch(CodeLocationLabel instructionStart
, RegisterID rd
, void* initialValue
)
1875 ARMv7Assembler::revertJumpTo_movT3movtcmpT2(instructionStart
.dataLocation(), rd
, dataTempRegister
, reinterpret_cast<uintptr_t>(initialValue
));
1878 ARMv7Assembler::revertJumpTo_movT3(instructionStart
.dataLocation(), dataTempRegister
, ARMThumbImmediate::makeUInt16(reinterpret_cast<uintptr_t>(initialValue
) & 0xffff));
1882 static CodeLocationLabel
startOfPatchableBranchPtrWithPatchOnAddress(CodeLocationDataLabelPtr
)
1884 UNREACHABLE_FOR_PLATFORM();
1885 return CodeLocationLabel();
1888 static CodeLocationLabel
startOfPatchableBranch32WithPatchOnAddress(CodeLocationDataLabel32
)
1890 UNREACHABLE_FOR_PLATFORM();
1891 return CodeLocationLabel();
1894 static void revertJumpReplacementToPatchableBranchPtrWithPatch(CodeLocationLabel
, Address
, void*)
1896 UNREACHABLE_FOR_PLATFORM();
1899 static void revertJumpReplacementToPatchableBranch32WithPatch(CodeLocationLabel
, Address
, int32_t)
1901 UNREACHABLE_FOR_PLATFORM();
1906 #define DECLARE_REGISTER(_type, _regName) \
1908 FOR_EACH_CPU_REGISTER(DECLARE_REGISTER
)
1909 #undef DECLARE_REGISTER
1912 struct ProbeContext
;
1913 typedef void (*ProbeFunction
)(struct ProbeContext
*);
1915 struct ProbeContext
{
1916 ProbeFunction probeFunction
;
1921 void dump(const char* indentation
= 0);
1923 void dumpCPURegisters(const char* indentation
);
1926 // For details about probe(), see comment in MacroAssemblerX86_64.h.
1927 void probe(ProbeFunction
, void* arg1
= 0, void* arg2
= 0);
1928 #endif // USE(MASM_PROBE)
1931 ALWAYS_INLINE Jump
jump()
1933 m_assembler
.label(); // Force nop-padding if we're in the middle of a watchpoint.
1934 moveFixedWidthEncoding(TrustedImm32(0), dataTempRegister
);
1935 return Jump(m_assembler
.bx(dataTempRegister
), m_makeJumpPatchable
? ARMv7Assembler::JumpNoConditionFixedSize
: ARMv7Assembler::JumpNoCondition
);
1938 ALWAYS_INLINE Jump
makeBranch(ARMv7Assembler::Condition cond
)
1940 m_assembler
.label(); // Force nop-padding if we're in the middle of a watchpoint.
1941 m_assembler
.it(cond
, true, true);
1942 moveFixedWidthEncoding(TrustedImm32(0), dataTempRegister
);
1943 return Jump(m_assembler
.bx(dataTempRegister
), m_makeJumpPatchable
? ARMv7Assembler::JumpConditionFixedSize
: ARMv7Assembler::JumpCondition
, cond
);
1945 ALWAYS_INLINE Jump
makeBranch(RelationalCondition cond
) { return makeBranch(armV7Condition(cond
)); }
1946 ALWAYS_INLINE Jump
makeBranch(ResultCondition cond
) { return makeBranch(armV7Condition(cond
)); }
1947 ALWAYS_INLINE Jump
makeBranch(DoubleCondition cond
) { return makeBranch(armV7Condition(cond
)); }
1949 ArmAddress
setupArmAddress(BaseIndex address
)
1951 if (address
.offset
) {
1952 ARMThumbImmediate imm
= ARMThumbImmediate::makeUInt12OrEncodedImm(address
.offset
);
1954 m_assembler
.add(addressTempRegister
, address
.base
, imm
);
1956 move(TrustedImm32(address
.offset
), addressTempRegister
);
1957 m_assembler
.add(addressTempRegister
, addressTempRegister
, address
.base
);
1960 return ArmAddress(addressTempRegister
, address
.index
, address
.scale
);
1962 return ArmAddress(address
.base
, address
.index
, address
.scale
);
1965 ArmAddress
setupArmAddress(Address address
)
1967 if ((address
.offset
>= -0xff) && (address
.offset
<= 0xfff))
1968 return ArmAddress(address
.base
, address
.offset
);
1970 move(TrustedImm32(address
.offset
), addressTempRegister
);
1971 return ArmAddress(address
.base
, addressTempRegister
);
1974 ArmAddress
setupArmAddress(ImplicitAddress address
)
1976 if ((address
.offset
>= -0xff) && (address
.offset
<= 0xfff))
1977 return ArmAddress(address
.base
, address
.offset
);
1979 move(TrustedImm32(address
.offset
), addressTempRegister
);
1980 return ArmAddress(address
.base
, addressTempRegister
);
1983 RegisterID
makeBaseIndexBase(BaseIndex address
)
1985 if (!address
.offset
)
1986 return address
.base
;
1988 ARMThumbImmediate imm
= ARMThumbImmediate::makeUInt12OrEncodedImm(address
.offset
);
1990 m_assembler
.add(addressTempRegister
, address
.base
, imm
);
1992 move(TrustedImm32(address
.offset
), addressTempRegister
);
1993 m_assembler
.add(addressTempRegister
, addressTempRegister
, address
.base
);
1996 return addressTempRegister
;
1999 void moveFixedWidthEncoding(TrustedImm32 imm
, RegisterID dst
)
2001 uint32_t value
= imm
.m_value
;
2002 m_assembler
.movT3(dst
, ARMThumbImmediate::makeUInt16(value
& 0xffff));
2003 m_assembler
.movt(dst
, ARMThumbImmediate::makeUInt16(value
>> 16));
2006 ARMv7Assembler::Condition
armV7Condition(RelationalCondition cond
)
2008 return static_cast<ARMv7Assembler::Condition
>(cond
);
2011 ARMv7Assembler::Condition
armV7Condition(ResultCondition cond
)
2013 return static_cast<ARMv7Assembler::Condition
>(cond
);
2016 ARMv7Assembler::Condition
armV7Condition(DoubleCondition cond
)
2018 return static_cast<ARMv7Assembler::Condition
>(cond
);
2022 friend class LinkBuffer
;
2023 friend class RepatchBuffer
;
2025 static void linkCall(void* code
, Call call
, FunctionPtr function
)
2027 ARMv7Assembler::linkCall(code
, call
.m_label
, function
.value());
2030 static void repatchCall(CodeLocationCall call
, CodeLocationLabel destination
)
2032 ARMv7Assembler::relinkCall(call
.dataLocation(), destination
.executableAddress());
2035 static void repatchCall(CodeLocationCall call
, FunctionPtr destination
)
2037 ARMv7Assembler::relinkCall(call
.dataLocation(), destination
.executableAddress());
2041 inline TrustedImm32
trustedImm32FromPtr(void* ptr
)
2043 return TrustedImm32(TrustedImmPtr(ptr
));
2046 inline TrustedImm32
trustedImm32FromPtr(ProbeFunction function
)
2048 return TrustedImm32(TrustedImmPtr(reinterpret_cast<void*>(function
)));
2051 inline TrustedImm32
trustedImm32FromPtr(void (*function
)())
2053 return TrustedImm32(TrustedImmPtr(reinterpret_cast<void*>(function
)));
2057 bool m_makeJumpPatchable
;
2062 #endif // ENABLE(ASSEMBLER)
2064 #endif // MacroAssemblerARMv7_h