2 * Copyright (C) 2009, 2010, 2012, 2013 Apple Inc. All rights reserved.
3 * Copyright (C) 2010 University of Szeged
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
15 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
18 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
19 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
20 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
21 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
22 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
24 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #ifndef ARMAssembler_h
28 #define ARMAssembler_h
30 #if ENABLE(ASSEMBLER) && CPU(ARM_THUMB2)
32 #include "AssemblerBuffer.h"
33 #include <wtf/Assertions.h>
34 #include <wtf/Vector.h>
39 namespace ARMRegisters
{
48 r7
, wr
= r7
, // thumb work register
50 r9
, sb
= r9
, // static base
51 r10
, sl
= r10
, // stack limit
52 r11
, fp
= r11
, // frame pointer
127 } FPDoubleRegisterID
;
164 inline FPSingleRegisterID
asSingle(FPDoubleRegisterID reg
)
167 return (FPSingleRegisterID
)(reg
<< 1);
170 inline FPDoubleRegisterID
asDouble(FPSingleRegisterID reg
)
173 return (FPDoubleRegisterID
)(reg
>> 1);
177 class ARMv7Assembler
;
178 class ARMThumbImmediate
{
179 friend class ARMv7Assembler
;
181 typedef uint8_t ThumbImmediateType
;
182 static const ThumbImmediateType TypeInvalid
= 0;
183 static const ThumbImmediateType TypeEncoded
= 1;
184 static const ThumbImmediateType TypeUInt16
= 2;
194 // If this is an encoded immediate, then it may describe a shift, or a pattern.
196 unsigned shiftValue7
: 7;
197 unsigned shiftAmount
: 5;
200 unsigned immediate
: 8;
201 unsigned pattern
: 4;
203 } ThumbImmediateValue
;
205 // byte0 contains least significant bit; not using an array to make client code endian agnostic.
216 ALWAYS_INLINE
static void countLeadingZerosPartial(uint32_t& value
, int32_t& zeros
, const int N
)
218 if (value
& ~((1 << N
) - 1)) /* check for any of the top N bits (of 2N bits) are set */
219 value
>>= N
; /* if any were set, lose the bottom N */
220 else /* if none of the top N bits are set, */
221 zeros
+= N
; /* then we have identified N leading zeros */
224 static int32_t countLeadingZeros(uint32_t value
)
230 countLeadingZerosPartial(value
, zeros
, 16);
231 countLeadingZerosPartial(value
, zeros
, 8);
232 countLeadingZerosPartial(value
, zeros
, 4);
233 countLeadingZerosPartial(value
, zeros
, 2);
234 countLeadingZerosPartial(value
, zeros
, 1);
239 : m_type(TypeInvalid
)
244 ARMThumbImmediate(ThumbImmediateType type
, ThumbImmediateValue value
)
250 ARMThumbImmediate(ThumbImmediateType type
, uint16_t value
)
253 // Make sure this constructor is only reached with type TypeUInt16;
254 // this extra parameter makes the code a little clearer by making it
255 // explicit at call sites which type is being constructed
256 ASSERT_UNUSED(type
, type
== TypeUInt16
);
258 m_value
.asInt
= value
;
262 static ARMThumbImmediate
makeEncodedImm(uint32_t value
)
264 ThumbImmediateValue encoding
;
267 // okay, these are easy.
269 encoding
.immediate
= value
;
270 encoding
.pattern
= 0;
271 return ARMThumbImmediate(TypeEncoded
, encoding
);
274 int32_t leadingZeros
= countLeadingZeros(value
);
275 // if there were 24 or more leading zeros, then we'd have hit the (value < 256) case.
276 ASSERT(leadingZeros
< 24);
278 // Given a number with bit fields Z:B:C, where count(Z)+count(B)+count(C) == 32,
279 // Z are the bits known zero, B is the 8-bit immediate, C are the bits to check for
280 // zero. count(B) == 8, so the count of bits to be checked is 24 - count(Z).
281 int32_t rightShiftAmount
= 24 - leadingZeros
;
282 if (value
== ((value
>> rightShiftAmount
) << rightShiftAmount
)) {
283 // Shift the value down to the low byte position. The assign to
284 // shiftValue7 drops the implicit top bit.
285 encoding
.shiftValue7
= value
>> rightShiftAmount
;
286 // The endoded shift amount is the magnitude of a right rotate.
287 encoding
.shiftAmount
= 8 + leadingZeros
;
288 return ARMThumbImmediate(TypeEncoded
, encoding
);
294 if ((bytes
.byte0
== bytes
.byte1
) && (bytes
.byte0
== bytes
.byte2
) && (bytes
.byte0
== bytes
.byte3
)) {
295 encoding
.immediate
= bytes
.byte0
;
296 encoding
.pattern
= 3;
297 return ARMThumbImmediate(TypeEncoded
, encoding
);
300 if ((bytes
.byte0
== bytes
.byte2
) && !(bytes
.byte1
| bytes
.byte3
)) {
301 encoding
.immediate
= bytes
.byte0
;
302 encoding
.pattern
= 1;
303 return ARMThumbImmediate(TypeEncoded
, encoding
);
306 if ((bytes
.byte1
== bytes
.byte3
) && !(bytes
.byte0
| bytes
.byte2
)) {
307 encoding
.immediate
= bytes
.byte1
;
308 encoding
.pattern
= 2;
309 return ARMThumbImmediate(TypeEncoded
, encoding
);
312 return ARMThumbImmediate();
315 static ARMThumbImmediate
makeUInt12(int32_t value
)
317 return (!(value
& 0xfffff000))
318 ? ARMThumbImmediate(TypeUInt16
, (uint16_t)value
)
319 : ARMThumbImmediate();
322 static ARMThumbImmediate
makeUInt12OrEncodedImm(int32_t value
)
324 // If this is not a 12-bit unsigned it, try making an encoded immediate.
325 return (!(value
& 0xfffff000))
326 ? ARMThumbImmediate(TypeUInt16
, (uint16_t)value
)
327 : makeEncodedImm(value
);
330 // The 'make' methods, above, return a !isValid() value if the argument
331 // cannot be represented as the requested type. This methods is called
332 // 'get' since the argument can always be represented.
333 static ARMThumbImmediate
makeUInt16(uint16_t value
)
335 return ARMThumbImmediate(TypeUInt16
, value
);
340 return m_type
!= TypeInvalid
;
343 uint16_t asUInt16() const { return m_value
.asInt
; }
345 // These methods rely on the format of encoded byte values.
346 bool isUInt3() { return !(m_value
.asInt
& 0xfff8); }
347 bool isUInt4() { return !(m_value
.asInt
& 0xfff0); }
348 bool isUInt5() { return !(m_value
.asInt
& 0xffe0); }
349 bool isUInt6() { return !(m_value
.asInt
& 0xffc0); }
350 bool isUInt7() { return !(m_value
.asInt
& 0xff80); }
351 bool isUInt8() { return !(m_value
.asInt
& 0xff00); }
352 bool isUInt9() { return (m_type
== TypeUInt16
) && !(m_value
.asInt
& 0xfe00); }
353 bool isUInt10() { return (m_type
== TypeUInt16
) && !(m_value
.asInt
& 0xfc00); }
354 bool isUInt12() { return (m_type
== TypeUInt16
) && !(m_value
.asInt
& 0xf000); }
355 bool isUInt16() { return m_type
== TypeUInt16
; }
356 uint8_t getUInt3() { ASSERT(isUInt3()); return m_value
.asInt
; }
357 uint8_t getUInt4() { ASSERT(isUInt4()); return m_value
.asInt
; }
358 uint8_t getUInt5() { ASSERT(isUInt5()); return m_value
.asInt
; }
359 uint8_t getUInt6() { ASSERT(isUInt6()); return m_value
.asInt
; }
360 uint8_t getUInt7() { ASSERT(isUInt7()); return m_value
.asInt
; }
361 uint8_t getUInt8() { ASSERT(isUInt8()); return m_value
.asInt
; }
362 uint16_t getUInt9() { ASSERT(isUInt9()); return m_value
.asInt
; }
363 uint16_t getUInt10() { ASSERT(isUInt10()); return m_value
.asInt
; }
364 uint16_t getUInt12() { ASSERT(isUInt12()); return m_value
.asInt
; }
365 uint16_t getUInt16() { ASSERT(isUInt16()); return m_value
.asInt
; }
367 bool isEncodedImm() { return m_type
== TypeEncoded
; }
370 ThumbImmediateType m_type
;
371 ThumbImmediateValue m_value
;
380 SRType_RRX
= SRType_ROR
383 class ShiftTypeAndAmount
{
384 friend class ARMv7Assembler
;
389 m_u
.type
= (ARMShiftType
)0;
393 ShiftTypeAndAmount(ARMShiftType type
, unsigned amount
)
396 m_u
.amount
= amount
& 31;
399 unsigned lo4() { return m_u
.lo4
; }
400 unsigned hi4() { return m_u
.hi4
; }
415 class ARMv7Assembler
{
417 typedef ARMRegisters::RegisterID RegisterID
;
418 typedef ARMRegisters::FPSingleRegisterID FPSingleRegisterID
;
419 typedef ARMRegisters::FPDoubleRegisterID FPDoubleRegisterID
;
420 typedef ARMRegisters::FPQuadRegisterID FPQuadRegisterID
;
422 // (HS, LO, HI, LS) -> (AE, B, A, BE)
423 // (VS, VC) -> (O, NO)
425 ConditionEQ
, // Zero / Equal.
426 ConditionNE
, // Non-zero / Not equal.
427 ConditionHS
, ConditionCS
= ConditionHS
, // Unsigned higher or same.
428 ConditionLO
, ConditionCC
= ConditionLO
, // Unsigned lower.
429 ConditionMI
, // Negative.
430 ConditionPL
, // Positive or zero.
431 ConditionVS
, // Overflowed.
432 ConditionVC
, // Not overflowed.
433 ConditionHI
, // Unsigned higher.
434 ConditionLS
, // Unsigned lower or same.
435 ConditionGE
, // Signed greater than or equal.
436 ConditionLT
, // Signed less than.
437 ConditionGT
, // Signed greater than.
438 ConditionLE
, // Signed less than or equal.
439 ConditionAL
, // Unconditional / Always execute.
443 #define JUMP_ENUM_WITH_SIZE(index, value) (((value) << 3) | (index))
444 #define JUMP_ENUM_SIZE(jump) ((jump) >> 3)
445 enum JumpType
{ JumpFixed
= JUMP_ENUM_WITH_SIZE(0, 0),
446 JumpNoCondition
= JUMP_ENUM_WITH_SIZE(1, 5 * sizeof(uint16_t)),
447 JumpCondition
= JUMP_ENUM_WITH_SIZE(2, 6 * sizeof(uint16_t)),
448 JumpNoConditionFixedSize
= JUMP_ENUM_WITH_SIZE(3, 5 * sizeof(uint16_t)),
449 JumpConditionFixedSize
= JUMP_ENUM_WITH_SIZE(4, 6 * sizeof(uint16_t))
452 LinkInvalid
= JUMP_ENUM_WITH_SIZE(0, 0),
453 LinkJumpT1
= JUMP_ENUM_WITH_SIZE(1, sizeof(uint16_t)),
454 LinkJumpT2
= JUMP_ENUM_WITH_SIZE(2, sizeof(uint16_t)),
455 LinkJumpT3
= JUMP_ENUM_WITH_SIZE(3, 2 * sizeof(uint16_t)),
456 LinkJumpT4
= JUMP_ENUM_WITH_SIZE(4, 2 * sizeof(uint16_t)),
457 LinkConditionalJumpT4
= JUMP_ENUM_WITH_SIZE(5, 3 * sizeof(uint16_t)),
458 LinkBX
= JUMP_ENUM_WITH_SIZE(6, 5 * sizeof(uint16_t)),
459 LinkConditionalBX
= JUMP_ENUM_WITH_SIZE(7, 6 * sizeof(uint16_t))
464 LinkRecord(intptr_t from
, intptr_t to
, JumpType type
, Condition condition
)
466 data
.realTypes
.m_from
= from
;
467 data
.realTypes
.m_to
= to
;
468 data
.realTypes
.m_type
= type
;
469 data
.realTypes
.m_linkType
= LinkInvalid
;
470 data
.realTypes
.m_condition
= condition
;
472 void operator=(const LinkRecord
& other
)
474 data
.copyTypes
.content
[0] = other
.data
.copyTypes
.content
[0];
475 data
.copyTypes
.content
[1] = other
.data
.copyTypes
.content
[1];
476 data
.copyTypes
.content
[2] = other
.data
.copyTypes
.content
[2];
478 intptr_t from() const { return data
.realTypes
.m_from
; }
479 void setFrom(intptr_t from
) { data
.realTypes
.m_from
= from
; }
480 intptr_t to() const { return data
.realTypes
.m_to
; }
481 JumpType
type() const { return data
.realTypes
.m_type
; }
482 JumpLinkType
linkType() const { return data
.realTypes
.m_linkType
; }
483 void setLinkType(JumpLinkType linkType
) { ASSERT(data
.realTypes
.m_linkType
== LinkInvalid
); data
.realTypes
.m_linkType
= linkType
; }
484 Condition
condition() const { return data
.realTypes
.m_condition
; }
488 intptr_t m_from
: 31;
491 JumpLinkType m_linkType
: 8;
492 Condition m_condition
: 16;
497 COMPILE_ASSERT(sizeof(RealTypes
) == sizeof(CopyTypes
), LinkRecordCopyStructSizeEqualsRealStruct
);
502 : m_indexOfLastWatchpoint(INT_MIN
)
503 , m_indexOfTailOfLastWatchpoint(INT_MIN
)
510 static bool BadReg(RegisterID reg
)
512 return (reg
== ARMRegisters::sp
) || (reg
== ARMRegisters::pc
);
515 uint32_t singleRegisterMask(FPSingleRegisterID rdNum
, int highBitsShift
, int lowBitShift
)
517 uint32_t rdMask
= (rdNum
>> 1) << highBitsShift
;
519 rdMask
|= 1 << lowBitShift
;
523 uint32_t doubleRegisterMask(FPDoubleRegisterID rdNum
, int highBitShift
, int lowBitsShift
)
525 uint32_t rdMask
= (rdNum
& 0xf) << lowBitsShift
;
527 rdMask
|= 1 << highBitShift
;
532 OP_ADD_reg_T1
= 0x1800,
533 OP_SUB_reg_T1
= 0x1A00,
534 OP_ADD_imm_T1
= 0x1C00,
535 OP_SUB_imm_T1
= 0x1E00,
536 OP_MOV_imm_T1
= 0x2000,
537 OP_CMP_imm_T1
= 0x2800,
538 OP_ADD_imm_T2
= 0x3000,
539 OP_SUB_imm_T2
= 0x3800,
540 OP_AND_reg_T1
= 0x4000,
541 OP_EOR_reg_T1
= 0x4040,
542 OP_TST_reg_T1
= 0x4200,
543 OP_RSB_imm_T1
= 0x4240,
544 OP_CMP_reg_T1
= 0x4280,
545 OP_ORR_reg_T1
= 0x4300,
546 OP_MVN_reg_T1
= 0x43C0,
547 OP_ADD_reg_T2
= 0x4400,
548 OP_MOV_reg_T1
= 0x4600,
551 OP_STR_reg_T1
= 0x5000,
552 OP_STRH_reg_T1
= 0x5200,
553 OP_STRB_reg_T1
= 0x5400,
554 OP_LDRSB_reg_T1
= 0x5600,
555 OP_LDR_reg_T1
= 0x5800,
556 OP_LDRH_reg_T1
= 0x5A00,
557 OP_LDRB_reg_T1
= 0x5C00,
558 OP_LDRSH_reg_T1
= 0x5E00,
559 OP_STR_imm_T1
= 0x6000,
560 OP_LDR_imm_T1
= 0x6800,
561 OP_STRB_imm_T1
= 0x7000,
562 OP_LDRB_imm_T1
= 0x7800,
563 OP_STRH_imm_T1
= 0x8000,
564 OP_LDRH_imm_T1
= 0x8800,
565 OP_STR_imm_T2
= 0x9000,
566 OP_LDR_imm_T2
= 0x9800,
567 OP_ADD_SP_imm_T1
= 0xA800,
568 OP_ADD_SP_imm_T2
= 0xB000,
569 OP_SUB_SP_imm_T1
= 0xB080,
578 OP_AND_reg_T2
= 0xEA00,
579 OP_TST_reg_T2
= 0xEA10,
580 OP_ORR_reg_T2
= 0xEA40,
581 OP_ORR_S_reg_T2
= 0xEA50,
582 OP_ASR_imm_T1
= 0xEA4F,
583 OP_LSL_imm_T1
= 0xEA4F,
584 OP_LSR_imm_T1
= 0xEA4F,
585 OP_ROR_imm_T1
= 0xEA4F,
586 OP_MVN_reg_T2
= 0xEA6F,
587 OP_EOR_reg_T2
= 0xEA80,
588 OP_ADD_reg_T3
= 0xEB00,
589 OP_ADD_S_reg_T3
= 0xEB10,
590 OP_SUB_reg_T2
= 0xEBA0,
591 OP_SUB_S_reg_T2
= 0xEBB0,
592 OP_CMP_reg_T2
= 0xEBB0,
593 OP_VMOV_CtoD
= 0xEC00,
594 OP_VMOV_DtoC
= 0xEC10,
599 OP_VMOV_CtoS
= 0xEE00,
600 OP_VMOV_StoC
= 0xEE10,
607 OP_VCVT_FPIVFP
= 0xEEB0,
609 OP_VMOV_IMM_T2
= 0xEEB0,
612 OP_VSQRT_T1
= 0xEEB0,
613 OP_VCVTSD_T1
= 0xEEB0,
614 OP_VCVTDS_T1
= 0xEEB0,
617 OP_AND_imm_T1
= 0xF000,
619 OP_ORR_imm_T1
= 0xF040,
620 OP_MOV_imm_T2
= 0xF040,
622 OP_EOR_imm_T1
= 0xF080,
623 OP_ADD_imm_T3
= 0xF100,
624 OP_ADD_S_imm_T3
= 0xF110,
627 OP_SUB_imm_T3
= 0xF1A0,
628 OP_SUB_S_imm_T3
= 0xF1B0,
629 OP_CMP_imm_T2
= 0xF1B0,
630 OP_RSB_imm_T2
= 0xF1C0,
631 OP_RSB_S_imm_T2
= 0xF1D0,
632 OP_ADD_imm_T4
= 0xF200,
633 OP_MOV_imm_T3
= 0xF240,
634 OP_SUB_imm_T4
= 0xF2A0,
638 OP_STRB_imm_T3
= 0xF800,
639 OP_STRB_reg_T2
= 0xF800,
640 OP_LDRB_imm_T3
= 0xF810,
641 OP_LDRB_reg_T2
= 0xF810,
642 OP_STRH_imm_T3
= 0xF820,
643 OP_STRH_reg_T2
= 0xF820,
644 OP_LDRH_reg_T2
= 0xF830,
645 OP_LDRH_imm_T3
= 0xF830,
646 OP_STR_imm_T4
= 0xF840,
647 OP_STR_reg_T2
= 0xF840,
648 OP_LDR_imm_T4
= 0xF850,
649 OP_LDR_reg_T2
= 0xF850,
650 OP_STRB_imm_T2
= 0xF880,
651 OP_LDRB_imm_T2
= 0xF890,
652 OP_STRH_imm_T2
= 0xF8A0,
653 OP_LDRH_imm_T2
= 0xF8B0,
654 OP_STR_imm_T3
= 0xF8C0,
655 OP_LDR_imm_T3
= 0xF8D0,
656 OP_LDRSB_reg_T2
= 0xF910,
657 OP_LDRSH_reg_T2
= 0xF930,
658 OP_LSL_reg_T2
= 0xFA00,
659 OP_LSR_reg_T2
= 0xFA20,
660 OP_ASR_reg_T2
= 0xFA40,
661 OP_ROR_reg_T2
= 0xFA60,
663 OP_SMULL_T1
= 0xFB80,
664 #if CPU(APPLE_ARMV7S)
671 OP_VADD_T2b
= 0x0A00,
675 OP_VMOV_IMM_T2b
= 0x0A00,
676 OP_VMOV_T2b
= 0x0A40,
677 OP_VMUL_T2b
= 0x0A00,
680 OP_VMOV_StoCb
= 0x0A10,
681 OP_VMOV_CtoSb
= 0x0A10,
682 OP_VMOV_DtoCb
= 0x0A10,
683 OP_VMOV_CtoDb
= 0x0A10,
685 OP_VABS_T2b
= 0x0A40,
687 OP_VCVT_FPIVFPb
= 0x0A40,
688 OP_VNEG_T2b
= 0x0A40,
689 OP_VSUB_T2b
= 0x0A40,
690 OP_VSQRT_T1b
= 0x0A40,
691 OP_VCVTSD_T1b
= 0x0A40,
692 OP_VCVTDS_T1b
= 0x0A40,
699 FourFours(unsigned f3
, unsigned f2
, unsigned f1
, unsigned f0
)
718 class ARMInstructionFormatter
;
721 bool ifThenElseConditionBit(Condition condition
, bool isIf
)
723 return isIf
? (condition
& 1) : !(condition
& 1);
725 uint8_t ifThenElse(Condition condition
, bool inst2if
, bool inst3if
, bool inst4if
)
727 int mask
= (ifThenElseConditionBit(condition
, inst2if
) << 3)
728 | (ifThenElseConditionBit(condition
, inst3if
) << 2)
729 | (ifThenElseConditionBit(condition
, inst4if
) << 1)
731 ASSERT((condition
!= ConditionAL
) || !(mask
& (mask
- 1)));
732 return (condition
<< 4) | mask
;
734 uint8_t ifThenElse(Condition condition
, bool inst2if
, bool inst3if
)
736 int mask
= (ifThenElseConditionBit(condition
, inst2if
) << 3)
737 | (ifThenElseConditionBit(condition
, inst3if
) << 2)
739 ASSERT((condition
!= ConditionAL
) || !(mask
& (mask
- 1)));
740 return (condition
<< 4) | mask
;
742 uint8_t ifThenElse(Condition condition
, bool inst2if
)
744 int mask
= (ifThenElseConditionBit(condition
, inst2if
) << 3)
746 ASSERT((condition
!= ConditionAL
) || !(mask
& (mask
- 1)));
747 return (condition
<< 4) | mask
;
750 uint8_t ifThenElse(Condition condition
)
753 return (condition
<< 4) | mask
;
758 void adc(RegisterID rd
, RegisterID rn
, ARMThumbImmediate imm
)
760 // Rd can only be SP if Rn is also SP.
761 ASSERT((rd
!= ARMRegisters::sp
) || (rn
== ARMRegisters::sp
));
762 ASSERT(rd
!= ARMRegisters::pc
);
763 ASSERT(rn
!= ARMRegisters::pc
);
764 ASSERT(imm
.isEncodedImm());
766 m_formatter
.twoWordOp5i6Imm4Reg4EncodedImm(OP_ADC_imm
, rn
, rd
, imm
);
769 void add(RegisterID rd
, RegisterID rn
, ARMThumbImmediate imm
)
771 // Rd can only be SP if Rn is also SP.
772 ASSERT((rd
!= ARMRegisters::sp
) || (rn
== ARMRegisters::sp
));
773 ASSERT(rd
!= ARMRegisters::pc
);
774 ASSERT(rn
!= ARMRegisters::pc
);
775 ASSERT(imm
.isValid());
777 if (rn
== ARMRegisters::sp
) {
778 ASSERT(!(imm
.getUInt16() & 3));
779 if (!(rd
& 8) && imm
.isUInt10()) {
780 m_formatter
.oneWordOp5Reg3Imm8(OP_ADD_SP_imm_T1
, rd
, static_cast<uint8_t>(imm
.getUInt10() >> 2));
782 } else if ((rd
== ARMRegisters::sp
) && imm
.isUInt9()) {
783 m_formatter
.oneWordOp9Imm7(OP_ADD_SP_imm_T2
, static_cast<uint8_t>(imm
.getUInt9() >> 2));
786 } else if (!((rd
| rn
) & 8)) {
788 m_formatter
.oneWordOp7Reg3Reg3Reg3(OP_ADD_imm_T1
, (RegisterID
)imm
.getUInt3(), rn
, rd
);
790 } else if ((rd
== rn
) && imm
.isUInt8()) {
791 m_formatter
.oneWordOp5Reg3Imm8(OP_ADD_imm_T2
, rd
, imm
.getUInt8());
796 if (imm
.isEncodedImm())
797 m_formatter
.twoWordOp5i6Imm4Reg4EncodedImm(OP_ADD_imm_T3
, rn
, rd
, imm
);
799 ASSERT(imm
.isUInt12());
800 m_formatter
.twoWordOp5i6Imm4Reg4EncodedImm(OP_ADD_imm_T4
, rn
, rd
, imm
);
804 ALWAYS_INLINE
void add(RegisterID rd
, RegisterID rn
, RegisterID rm
, ShiftTypeAndAmount shift
)
806 ASSERT((rd
!= ARMRegisters::sp
) || (rn
== ARMRegisters::sp
));
807 ASSERT(rd
!= ARMRegisters::pc
);
808 ASSERT(rn
!= ARMRegisters::pc
);
810 m_formatter
.twoWordOp12Reg4FourFours(OP_ADD_reg_T3
, rn
, FourFours(shift
.hi4(), rd
, shift
.lo4(), rm
));
813 // NOTE: In an IT block, add doesn't modify the flags register.
814 ALWAYS_INLINE
void add(RegisterID rd
, RegisterID rn
, RegisterID rm
)
817 m_formatter
.oneWordOp8RegReg143(OP_ADD_reg_T2
, rm
, rd
);
819 m_formatter
.oneWordOp8RegReg143(OP_ADD_reg_T2
, rn
, rd
);
820 else if (!((rd
| rn
| rm
) & 8))
821 m_formatter
.oneWordOp7Reg3Reg3Reg3(OP_ADD_reg_T1
, rm
, rn
, rd
);
823 add(rd
, rn
, rm
, ShiftTypeAndAmount());
826 // Not allowed in an IT (if then) block.
827 ALWAYS_INLINE
void add_S(RegisterID rd
, RegisterID rn
, ARMThumbImmediate imm
)
829 // Rd can only be SP if Rn is also SP.
830 ASSERT((rd
!= ARMRegisters::sp
) || (rn
== ARMRegisters::sp
));
831 ASSERT(rd
!= ARMRegisters::pc
);
832 ASSERT(rn
!= ARMRegisters::pc
);
833 ASSERT(imm
.isEncodedImm());
835 if (!((rd
| rn
) & 8)) {
837 m_formatter
.oneWordOp7Reg3Reg3Reg3(OP_ADD_imm_T1
, (RegisterID
)imm
.getUInt3(), rn
, rd
);
839 } else if ((rd
== rn
) && imm
.isUInt8()) {
840 m_formatter
.oneWordOp5Reg3Imm8(OP_ADD_imm_T2
, rd
, imm
.getUInt8());
845 m_formatter
.twoWordOp5i6Imm4Reg4EncodedImm(OP_ADD_S_imm_T3
, rn
, rd
, imm
);
848 // Not allowed in an IT (if then) block?
849 ALWAYS_INLINE
void add_S(RegisterID rd
, RegisterID rn
, RegisterID rm
, ShiftTypeAndAmount shift
)
851 ASSERT((rd
!= ARMRegisters::sp
) || (rn
== ARMRegisters::sp
));
852 ASSERT(rd
!= ARMRegisters::pc
);
853 ASSERT(rn
!= ARMRegisters::pc
);
855 m_formatter
.twoWordOp12Reg4FourFours(OP_ADD_S_reg_T3
, rn
, FourFours(shift
.hi4(), rd
, shift
.lo4(), rm
));
858 // Not allowed in an IT (if then) block.
859 ALWAYS_INLINE
void add_S(RegisterID rd
, RegisterID rn
, RegisterID rm
)
861 if (!((rd
| rn
| rm
) & 8))
862 m_formatter
.oneWordOp7Reg3Reg3Reg3(OP_ADD_reg_T1
, rm
, rn
, rd
);
864 add_S(rd
, rn
, rm
, ShiftTypeAndAmount());
867 ALWAYS_INLINE
void ARM_and(RegisterID rd
, RegisterID rn
, ARMThumbImmediate imm
)
871 ASSERT(imm
.isEncodedImm());
872 m_formatter
.twoWordOp5i6Imm4Reg4EncodedImm(OP_AND_imm_T1
, rn
, rd
, imm
);
875 ALWAYS_INLINE
void ARM_and(RegisterID rd
, RegisterID rn
, RegisterID rm
, ShiftTypeAndAmount shift
)
880 m_formatter
.twoWordOp12Reg4FourFours(OP_AND_reg_T2
, rn
, FourFours(shift
.hi4(), rd
, shift
.lo4(), rm
));
883 ALWAYS_INLINE
void ARM_and(RegisterID rd
, RegisterID rn
, RegisterID rm
)
885 if ((rd
== rn
) && !((rd
| rm
) & 8))
886 m_formatter
.oneWordOp10Reg3Reg3(OP_AND_reg_T1
, rm
, rd
);
887 else if ((rd
== rm
) && !((rd
| rn
) & 8))
888 m_formatter
.oneWordOp10Reg3Reg3(OP_AND_reg_T1
, rn
, rd
);
890 ARM_and(rd
, rn
, rm
, ShiftTypeAndAmount());
893 ALWAYS_INLINE
void asr(RegisterID rd
, RegisterID rm
, int32_t shiftAmount
)
897 ShiftTypeAndAmount
shift(SRType_ASR
, shiftAmount
);
898 m_formatter
.twoWordOp16FourFours(OP_ASR_imm_T1
, FourFours(shift
.hi4(), rd
, shift
.lo4(), rm
));
901 ALWAYS_INLINE
void asr(RegisterID rd
, RegisterID rn
, RegisterID rm
)
906 m_formatter
.twoWordOp12Reg4FourFours(OP_ASR_reg_T2
, rn
, FourFours(0xf, rd
, 0, rm
));
909 // Only allowed in IT (if then) block if last instruction.
910 ALWAYS_INLINE AssemblerLabel
b()
912 m_formatter
.twoWordOp16Op16(OP_B_T4a
, OP_B_T4b
);
913 return m_formatter
.label();
916 // Only allowed in IT (if then) block if last instruction.
917 ALWAYS_INLINE AssemblerLabel
blx(RegisterID rm
)
919 ASSERT(rm
!= ARMRegisters::pc
);
920 m_formatter
.oneWordOp8RegReg143(OP_BLX
, rm
, (RegisterID
)8);
921 return m_formatter
.label();
924 // Only allowed in IT (if then) block if last instruction.
925 ALWAYS_INLINE AssemblerLabel
bx(RegisterID rm
)
927 m_formatter
.oneWordOp8RegReg143(OP_BX
, rm
, (RegisterID
)0);
928 return m_formatter
.label();
931 void bkpt(uint8_t imm
= 0)
933 m_formatter
.oneWordOp8Imm8(OP_BKPT
, imm
);
936 ALWAYS_INLINE
void clz(RegisterID rd
, RegisterID rm
)
940 m_formatter
.twoWordOp12Reg4FourFours(OP_CLZ
, rm
, FourFours(0xf, rd
, 8, rm
));
943 ALWAYS_INLINE
void cmn(RegisterID rn
, ARMThumbImmediate imm
)
945 ASSERT(rn
!= ARMRegisters::pc
);
946 ASSERT(imm
.isEncodedImm());
948 m_formatter
.twoWordOp5i6Imm4Reg4EncodedImm(OP_CMN_imm
, rn
, (RegisterID
)0xf, imm
);
951 ALWAYS_INLINE
void cmp(RegisterID rn
, ARMThumbImmediate imm
)
953 ASSERT(rn
!= ARMRegisters::pc
);
954 ASSERT(imm
.isEncodedImm());
956 if (!(rn
& 8) && imm
.isUInt8())
957 m_formatter
.oneWordOp5Reg3Imm8(OP_CMP_imm_T1
, rn
, imm
.getUInt8());
959 m_formatter
.twoWordOp5i6Imm4Reg4EncodedImm(OP_CMP_imm_T2
, rn
, (RegisterID
)0xf, imm
);
962 ALWAYS_INLINE
void cmp(RegisterID rn
, RegisterID rm
, ShiftTypeAndAmount shift
)
964 ASSERT(rn
!= ARMRegisters::pc
);
966 m_formatter
.twoWordOp12Reg4FourFours(OP_CMP_reg_T2
, rn
, FourFours(shift
.hi4(), 0xf, shift
.lo4(), rm
));
969 ALWAYS_INLINE
void cmp(RegisterID rn
, RegisterID rm
)
972 cmp(rn
, rm
, ShiftTypeAndAmount());
974 m_formatter
.oneWordOp10Reg3Reg3(OP_CMP_reg_T1
, rm
, rn
);
977 // xor is not spelled with an 'e'. :-(
978 ALWAYS_INLINE
void eor(RegisterID rd
, RegisterID rn
, ARMThumbImmediate imm
)
982 ASSERT(imm
.isEncodedImm());
983 m_formatter
.twoWordOp5i6Imm4Reg4EncodedImm(OP_EOR_imm_T1
, rn
, rd
, imm
);
986 // xor is not spelled with an 'e'. :-(
987 ALWAYS_INLINE
void eor(RegisterID rd
, RegisterID rn
, RegisterID rm
, ShiftTypeAndAmount shift
)
992 m_formatter
.twoWordOp12Reg4FourFours(OP_EOR_reg_T2
, rn
, FourFours(shift
.hi4(), rd
, shift
.lo4(), rm
));
995 // xor is not spelled with an 'e'. :-(
996 void eor(RegisterID rd
, RegisterID rn
, RegisterID rm
)
998 if ((rd
== rn
) && !((rd
| rm
) & 8))
999 m_formatter
.oneWordOp10Reg3Reg3(OP_EOR_reg_T1
, rm
, rd
);
1000 else if ((rd
== rm
) && !((rd
| rn
) & 8))
1001 m_formatter
.oneWordOp10Reg3Reg3(OP_EOR_reg_T1
, rn
, rd
);
1003 eor(rd
, rn
, rm
, ShiftTypeAndAmount());
1006 ALWAYS_INLINE
void it(Condition cond
)
1008 m_formatter
.oneWordOp8Imm8(OP_IT
, ifThenElse(cond
));
1011 ALWAYS_INLINE
void it(Condition cond
, bool inst2if
)
1013 m_formatter
.oneWordOp8Imm8(OP_IT
, ifThenElse(cond
, inst2if
));
1016 ALWAYS_INLINE
void it(Condition cond
, bool inst2if
, bool inst3if
)
1018 m_formatter
.oneWordOp8Imm8(OP_IT
, ifThenElse(cond
, inst2if
, inst3if
));
1021 ALWAYS_INLINE
void it(Condition cond
, bool inst2if
, bool inst3if
, bool inst4if
)
1023 m_formatter
.oneWordOp8Imm8(OP_IT
, ifThenElse(cond
, inst2if
, inst3if
, inst4if
));
1026 // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
1027 ALWAYS_INLINE
void ldr(RegisterID rt
, RegisterID rn
, ARMThumbImmediate imm
)
1029 ASSERT(rn
!= ARMRegisters::pc
); // LDR (literal)
1030 ASSERT(imm
.isUInt12());
1032 if (!((rt
| rn
) & 8) && imm
.isUInt7())
1033 m_formatter
.oneWordOp5Imm5Reg3Reg3(OP_LDR_imm_T1
, imm
.getUInt7() >> 2, rn
, rt
);
1034 else if ((rn
== ARMRegisters::sp
) && !(rt
& 8) && imm
.isUInt10())
1035 m_formatter
.oneWordOp5Reg3Imm8(OP_LDR_imm_T2
, rt
, static_cast<uint8_t>(imm
.getUInt10() >> 2));
1037 m_formatter
.twoWordOp12Reg4Reg4Imm12(OP_LDR_imm_T3
, rn
, rt
, imm
.getUInt12());
1040 ALWAYS_INLINE
void ldrWide8BitImmediate(RegisterID rt
, RegisterID rn
, uint8_t immediate
)
1042 ASSERT(rn
!= ARMRegisters::pc
);
1043 m_formatter
.twoWordOp12Reg4Reg4Imm12(OP_LDR_imm_T3
, rn
, rt
, immediate
);
1046 ALWAYS_INLINE
void ldrCompact(RegisterID rt
, RegisterID rn
, ARMThumbImmediate imm
)
1048 ASSERT(rn
!= ARMRegisters::pc
); // LDR (literal)
1049 ASSERT(imm
.isUInt7());
1050 ASSERT(!((rt
| rn
) & 8));
1051 m_formatter
.oneWordOp5Imm5Reg3Reg3(OP_LDR_imm_T1
, imm
.getUInt7() >> 2, rn
, rt
);
1054 // If index is set, this is a regular offset or a pre-indexed load;
1055 // if index is not set then is is a post-index load.
1057 // If wback is set rn is updated - this is a pre or post index load,
1058 // if wback is not set this is a regular offset memory access.
1060 // (-255 <= offset <= 255)
1062 // _tmp = _reg + offset
1063 // MEM[index ? _tmp : _reg] = REG[rt]
1064 // if (wback) REG[rn] = _tmp
1065 ALWAYS_INLINE
void ldr(RegisterID rt
, RegisterID rn
, int offset
, bool index
, bool wback
)
1067 ASSERT(rt
!= ARMRegisters::pc
);
1068 ASSERT(rn
!= ARMRegisters::pc
);
1069 ASSERT(index
|| wback
);
1070 ASSERT(!wback
| (rt
!= rn
));
1077 ASSERT((offset
& ~0xff) == 0);
1079 offset
|= (wback
<< 8);
1080 offset
|= (add
<< 9);
1081 offset
|= (index
<< 10);
1082 offset
|= (1 << 11);
1084 m_formatter
.twoWordOp12Reg4Reg4Imm12(OP_LDR_imm_T4
, rn
, rt
, offset
);
1087 // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
1088 ALWAYS_INLINE
void ldr(RegisterID rt
, RegisterID rn
, RegisterID rm
, unsigned shift
= 0)
1090 ASSERT(rn
!= ARMRegisters::pc
); // LDR (literal)
1091 ASSERT(!BadReg(rm
));
1094 if (!shift
&& !((rt
| rn
| rm
) & 8))
1095 m_formatter
.oneWordOp7Reg3Reg3Reg3(OP_LDR_reg_T1
, rm
, rn
, rt
);
1097 m_formatter
.twoWordOp12Reg4FourFours(OP_LDR_reg_T2
, rn
, FourFours(rt
, 0, shift
, rm
));
1100 // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
1101 ALWAYS_INLINE
void ldrh(RegisterID rt
, RegisterID rn
, ARMThumbImmediate imm
)
1103 ASSERT(rn
!= ARMRegisters::pc
); // LDR (literal)
1104 ASSERT(imm
.isUInt12());
1106 if (!((rt
| rn
) & 8) && imm
.isUInt6())
1107 m_formatter
.oneWordOp5Imm5Reg3Reg3(OP_LDRH_imm_T1
, imm
.getUInt6() >> 2, rn
, rt
);
1109 m_formatter
.twoWordOp12Reg4Reg4Imm12(OP_LDRH_imm_T2
, rn
, rt
, imm
.getUInt12());
1112 // If index is set, this is a regular offset or a pre-indexed load;
1113 // if index is not set then is is a post-index load.
1115 // If wback is set rn is updated - this is a pre or post index load,
1116 // if wback is not set this is a regular offset memory access.
1118 // (-255 <= offset <= 255)
1120 // _tmp = _reg + offset
1121 // MEM[index ? _tmp : _reg] = REG[rt]
1122 // if (wback) REG[rn] = _tmp
1123 ALWAYS_INLINE
void ldrh(RegisterID rt
, RegisterID rn
, int offset
, bool index
, bool wback
)
1125 ASSERT(rt
!= ARMRegisters::pc
);
1126 ASSERT(rn
!= ARMRegisters::pc
);
1127 ASSERT(index
|| wback
);
1128 ASSERT(!wback
| (rt
!= rn
));
1135 ASSERT((offset
& ~0xff) == 0);
1137 offset
|= (wback
<< 8);
1138 offset
|= (add
<< 9);
1139 offset
|= (index
<< 10);
1140 offset
|= (1 << 11);
1142 m_formatter
.twoWordOp12Reg4Reg4Imm12(OP_LDRH_imm_T3
, rn
, rt
, offset
);
1145 ALWAYS_INLINE
void ldrh(RegisterID rt
, RegisterID rn
, RegisterID rm
, unsigned shift
= 0)
1147 ASSERT(!BadReg(rt
)); // Memory hint
1148 ASSERT(rn
!= ARMRegisters::pc
); // LDRH (literal)
1149 ASSERT(!BadReg(rm
));
1152 if (!shift
&& !((rt
| rn
| rm
) & 8))
1153 m_formatter
.oneWordOp7Reg3Reg3Reg3(OP_LDRH_reg_T1
, rm
, rn
, rt
);
1155 m_formatter
.twoWordOp12Reg4FourFours(OP_LDRH_reg_T2
, rn
, FourFours(rt
, 0, shift
, rm
));
1158 void ldrb(RegisterID rt
, RegisterID rn
, ARMThumbImmediate imm
)
1160 ASSERT(rn
!= ARMRegisters::pc
); // LDR (literal)
1161 ASSERT(imm
.isUInt12());
1163 if (!((rt
| rn
) & 8) && imm
.isUInt5())
1164 m_formatter
.oneWordOp5Imm5Reg3Reg3(OP_LDRB_imm_T1
, imm
.getUInt5(), rn
, rt
);
1166 m_formatter
.twoWordOp12Reg4Reg4Imm12(OP_LDRB_imm_T2
, rn
, rt
, imm
.getUInt12());
1169 void ldrb(RegisterID rt
, RegisterID rn
, int offset
, bool index
, bool wback
)
1171 ASSERT(rt
!= ARMRegisters::pc
);
1172 ASSERT(rn
!= ARMRegisters::pc
);
1173 ASSERT(index
|| wback
);
1174 ASSERT(!wback
| (rt
!= rn
));
1182 ASSERT(!(offset
& ~0xff));
1184 offset
|= (wback
<< 8);
1185 offset
|= (add
<< 9);
1186 offset
|= (index
<< 10);
1187 offset
|= (1 << 11);
1189 m_formatter
.twoWordOp12Reg4Reg4Imm12(OP_LDRB_imm_T3
, rn
, rt
, offset
);
1192 ALWAYS_INLINE
void ldrb(RegisterID rt
, RegisterID rn
, RegisterID rm
, unsigned shift
= 0)
1194 ASSERT(rn
!= ARMRegisters::pc
); // LDR (literal)
1195 ASSERT(!BadReg(rm
));
1198 if (!shift
&& !((rt
| rn
| rm
) & 8))
1199 m_formatter
.oneWordOp7Reg3Reg3Reg3(OP_LDRB_reg_T1
, rm
, rn
, rt
);
1201 m_formatter
.twoWordOp12Reg4FourFours(OP_LDRB_reg_T2
, rn
, FourFours(rt
, 0, shift
, rm
));
1204 void ldrsb(RegisterID rt
, RegisterID rn
, RegisterID rm
, unsigned shift
= 0)
1206 ASSERT(rn
!= ARMRegisters::pc
);
1207 ASSERT(!BadReg(rm
));
1210 if (!shift
&& !((rt
| rn
| rm
) & 8))
1211 m_formatter
.oneWordOp7Reg3Reg3Reg3(OP_LDRSB_reg_T1
, rm
, rn
, rt
);
1213 m_formatter
.twoWordOp12Reg4FourFours(OP_LDRSB_reg_T2
, rn
, FourFours(rt
, 0, shift
, rm
));
1216 void ldrsh(RegisterID rt
, RegisterID rn
, RegisterID rm
, unsigned shift
= 0)
1218 ASSERT(rn
!= ARMRegisters::pc
);
1219 ASSERT(!BadReg(rm
));
1222 if (!shift
&& !((rt
| rn
| rm
) & 8))
1223 m_formatter
.oneWordOp7Reg3Reg3Reg3(OP_LDRSH_reg_T1
, rm
, rn
, rt
);
1225 m_formatter
.twoWordOp12Reg4FourFours(OP_LDRSH_reg_T2
, rn
, FourFours(rt
, 0, shift
, rm
));
1228 void lsl(RegisterID rd
, RegisterID rm
, int32_t shiftAmount
)
1230 ASSERT(!BadReg(rd
));
1231 ASSERT(!BadReg(rm
));
1232 ShiftTypeAndAmount
shift(SRType_LSL
, shiftAmount
);
1233 m_formatter
.twoWordOp16FourFours(OP_LSL_imm_T1
, FourFours(shift
.hi4(), rd
, shift
.lo4(), rm
));
1236 ALWAYS_INLINE
void lsl(RegisterID rd
, RegisterID rn
, RegisterID rm
)
1238 ASSERT(!BadReg(rd
));
1239 ASSERT(!BadReg(rn
));
1240 ASSERT(!BadReg(rm
));
1241 m_formatter
.twoWordOp12Reg4FourFours(OP_LSL_reg_T2
, rn
, FourFours(0xf, rd
, 0, rm
));
1244 ALWAYS_INLINE
void lsr(RegisterID rd
, RegisterID rm
, int32_t shiftAmount
)
1246 ASSERT(!BadReg(rd
));
1247 ASSERT(!BadReg(rm
));
1248 ShiftTypeAndAmount
shift(SRType_LSR
, shiftAmount
);
1249 m_formatter
.twoWordOp16FourFours(OP_LSR_imm_T1
, FourFours(shift
.hi4(), rd
, shift
.lo4(), rm
));
1252 ALWAYS_INLINE
void lsr(RegisterID rd
, RegisterID rn
, RegisterID rm
)
1254 ASSERT(!BadReg(rd
));
1255 ASSERT(!BadReg(rn
));
1256 ASSERT(!BadReg(rm
));
1257 m_formatter
.twoWordOp12Reg4FourFours(OP_LSR_reg_T2
, rn
, FourFours(0xf, rd
, 0, rm
));
1260 ALWAYS_INLINE
void movT3(RegisterID rd
, ARMThumbImmediate imm
)
1262 ASSERT(imm
.isValid());
1263 ASSERT(!imm
.isEncodedImm());
1264 ASSERT(!BadReg(rd
));
1266 m_formatter
.twoWordOp5i6Imm4Reg4EncodedImm(OP_MOV_imm_T3
, imm
.m_value
.imm4
, rd
, imm
);
1269 #if OS(LINUX) || OS(QNX)
1270 static void revertJumpTo_movT3movtcmpT2(void* instructionStart
, RegisterID left
, RegisterID right
, uintptr_t imm
)
1272 uint16_t* address
= static_cast<uint16_t*>(instructionStart
);
1273 ARMThumbImmediate lo16
= ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(imm
));
1274 ARMThumbImmediate hi16
= ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(imm
>> 16));
1275 address
[0] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOV_imm_T3
, lo16
);
1276 address
[1] = twoWordOp5i6Imm4Reg4EncodedImmSecond(right
, lo16
);
1277 address
[2] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOVT
, hi16
);
1278 address
[3] = twoWordOp5i6Imm4Reg4EncodedImmSecond(right
, hi16
);
1279 address
[4] = OP_CMP_reg_T2
| left
;
1280 cacheFlush(address
, sizeof(uint16_t) * 5);
1283 static void revertJumpTo_movT3(void* instructionStart
, RegisterID rd
, ARMThumbImmediate imm
)
1285 ASSERT(imm
.isValid());
1286 ASSERT(!imm
.isEncodedImm());
1287 ASSERT(!BadReg(rd
));
1289 uint16_t* address
= static_cast<uint16_t*>(instructionStart
);
1290 address
[0] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOV_imm_T3
, imm
);
1291 address
[1] = twoWordOp5i6Imm4Reg4EncodedImmSecond(rd
, imm
);
1292 cacheFlush(address
, sizeof(uint16_t) * 2);
1296 ALWAYS_INLINE
void mov(RegisterID rd
, ARMThumbImmediate imm
)
1298 ASSERT(imm
.isValid());
1299 ASSERT(!BadReg(rd
));
1301 if ((rd
< 8) && imm
.isUInt8())
1302 m_formatter
.oneWordOp5Reg3Imm8(OP_MOV_imm_T1
, rd
, imm
.getUInt8());
1303 else if (imm
.isEncodedImm())
1304 m_formatter
.twoWordOp5i6Imm4Reg4EncodedImm(OP_MOV_imm_T2
, 0xf, rd
, imm
);
1309 ALWAYS_INLINE
void mov(RegisterID rd
, RegisterID rm
)
1311 m_formatter
.oneWordOp8RegReg143(OP_MOV_reg_T1
, rm
, rd
);
1314 ALWAYS_INLINE
void movt(RegisterID rd
, ARMThumbImmediate imm
)
1316 ASSERT(imm
.isUInt16());
1317 ASSERT(!BadReg(rd
));
1318 m_formatter
.twoWordOp5i6Imm4Reg4EncodedImm(OP_MOVT
, imm
.m_value
.imm4
, rd
, imm
);
1321 ALWAYS_INLINE
void mvn(RegisterID rd
, ARMThumbImmediate imm
)
1323 ASSERT(imm
.isEncodedImm());
1324 ASSERT(!BadReg(rd
));
1326 m_formatter
.twoWordOp5i6Imm4Reg4EncodedImm(OP_MVN_imm
, 0xf, rd
, imm
);
1329 ALWAYS_INLINE
void mvn(RegisterID rd
, RegisterID rm
, ShiftTypeAndAmount shift
)
1331 ASSERT(!BadReg(rd
));
1332 ASSERT(!BadReg(rm
));
1333 m_formatter
.twoWordOp16FourFours(OP_MVN_reg_T2
, FourFours(shift
.hi4(), rd
, shift
.lo4(), rm
));
1336 ALWAYS_INLINE
void mvn(RegisterID rd
, RegisterID rm
)
1338 if (!((rd
| rm
) & 8))
1339 m_formatter
.oneWordOp10Reg3Reg3(OP_MVN_reg_T1
, rm
, rd
);
1341 mvn(rd
, rm
, ShiftTypeAndAmount());
1344 ALWAYS_INLINE
void neg(RegisterID rd
, RegisterID rm
)
1346 ARMThumbImmediate zero
= ARMThumbImmediate::makeUInt12(0);
1350 ALWAYS_INLINE
void orr(RegisterID rd
, RegisterID rn
, ARMThumbImmediate imm
)
1352 ASSERT(!BadReg(rd
));
1353 ASSERT(!BadReg(rn
));
1354 ASSERT(imm
.isEncodedImm());
1355 m_formatter
.twoWordOp5i6Imm4Reg4EncodedImm(OP_ORR_imm_T1
, rn
, rd
, imm
);
1358 ALWAYS_INLINE
void orr(RegisterID rd
, RegisterID rn
, RegisterID rm
, ShiftTypeAndAmount shift
)
1360 ASSERT(!BadReg(rd
));
1361 ASSERT(!BadReg(rn
));
1362 ASSERT(!BadReg(rm
));
1363 m_formatter
.twoWordOp12Reg4FourFours(OP_ORR_reg_T2
, rn
, FourFours(shift
.hi4(), rd
, shift
.lo4(), rm
));
1366 void orr(RegisterID rd
, RegisterID rn
, RegisterID rm
)
1368 if ((rd
== rn
) && !((rd
| rm
) & 8))
1369 m_formatter
.oneWordOp10Reg3Reg3(OP_ORR_reg_T1
, rm
, rd
);
1370 else if ((rd
== rm
) && !((rd
| rn
) & 8))
1371 m_formatter
.oneWordOp10Reg3Reg3(OP_ORR_reg_T1
, rn
, rd
);
1373 orr(rd
, rn
, rm
, ShiftTypeAndAmount());
1376 ALWAYS_INLINE
void orr_S(RegisterID rd
, RegisterID rn
, RegisterID rm
, ShiftTypeAndAmount shift
)
1378 ASSERT(!BadReg(rd
));
1379 ASSERT(!BadReg(rn
));
1380 ASSERT(!BadReg(rm
));
1381 m_formatter
.twoWordOp12Reg4FourFours(OP_ORR_S_reg_T2
, rn
, FourFours(shift
.hi4(), rd
, shift
.lo4(), rm
));
1384 void orr_S(RegisterID rd
, RegisterID rn
, RegisterID rm
)
1386 if ((rd
== rn
) && !((rd
| rm
) & 8))
1387 m_formatter
.oneWordOp10Reg3Reg3(OP_ORR_reg_T1
, rm
, rd
);
1388 else if ((rd
== rm
) && !((rd
| rn
) & 8))
1389 m_formatter
.oneWordOp10Reg3Reg3(OP_ORR_reg_T1
, rn
, rd
);
1391 orr_S(rd
, rn
, rm
, ShiftTypeAndAmount());
1394 ALWAYS_INLINE
void ror(RegisterID rd
, RegisterID rm
, int32_t shiftAmount
)
1396 ASSERT(!BadReg(rd
));
1397 ASSERT(!BadReg(rm
));
1398 ShiftTypeAndAmount
shift(SRType_ROR
, shiftAmount
);
1399 m_formatter
.twoWordOp16FourFours(OP_ROR_imm_T1
, FourFours(shift
.hi4(), rd
, shift
.lo4(), rm
));
1402 ALWAYS_INLINE
void ror(RegisterID rd
, RegisterID rn
, RegisterID rm
)
1404 ASSERT(!BadReg(rd
));
1405 ASSERT(!BadReg(rn
));
1406 ASSERT(!BadReg(rm
));
1407 m_formatter
.twoWordOp12Reg4FourFours(OP_ROR_reg_T2
, rn
, FourFours(0xf, rd
, 0, rm
));
1410 #if CPU(APPLE_ARMV7S)
1411 ALWAYS_INLINE
void sdiv(RegisterID rd
, RegisterID rn
, RegisterID rm
)
1413 ASSERT(!BadReg(rd
));
1414 ASSERT(!BadReg(rn
));
1415 ASSERT(!BadReg(rm
));
1416 m_formatter
.twoWordOp12Reg4FourFours(OP_SDIV_T1
, rn
, FourFours(0xf, rd
, 0xf, rm
));
1420 ALWAYS_INLINE
void smull(RegisterID rdLo
, RegisterID rdHi
, RegisterID rn
, RegisterID rm
)
1422 ASSERT(!BadReg(rdLo
));
1423 ASSERT(!BadReg(rdHi
));
1424 ASSERT(!BadReg(rn
));
1425 ASSERT(!BadReg(rm
));
1426 ASSERT(rdLo
!= rdHi
);
1427 m_formatter
.twoWordOp12Reg4FourFours(OP_SMULL_T1
, rn
, FourFours(rdLo
, rdHi
, 0, rm
));
1430 // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
1431 ALWAYS_INLINE
void str(RegisterID rt
, RegisterID rn
, ARMThumbImmediate imm
)
1433 ASSERT(rt
!= ARMRegisters::pc
);
1434 ASSERT(rn
!= ARMRegisters::pc
);
1435 ASSERT(imm
.isUInt12());
1437 if (!((rt
| rn
) & 8) && imm
.isUInt7())
1438 m_formatter
.oneWordOp5Imm5Reg3Reg3(OP_STR_imm_T1
, imm
.getUInt7() >> 2, rn
, rt
);
1439 else if ((rn
== ARMRegisters::sp
) && !(rt
& 8) && imm
.isUInt10())
1440 m_formatter
.oneWordOp5Reg3Imm8(OP_STR_imm_T2
, rt
, static_cast<uint8_t>(imm
.getUInt10() >> 2));
1442 m_formatter
.twoWordOp12Reg4Reg4Imm12(OP_STR_imm_T3
, rn
, rt
, imm
.getUInt12());
1445 // If index is set, this is a regular offset or a pre-indexed store;
1446 // if index is not set then is is a post-index store.
1448 // If wback is set rn is updated - this is a pre or post index store,
1449 // if wback is not set this is a regular offset memory access.
1451 // (-255 <= offset <= 255)
1453 // _tmp = _reg + offset
1454 // MEM[index ? _tmp : _reg] = REG[rt]
1455 // if (wback) REG[rn] = _tmp
1456 ALWAYS_INLINE
void str(RegisterID rt
, RegisterID rn
, int offset
, bool index
, bool wback
)
1458 ASSERT(rt
!= ARMRegisters::pc
);
1459 ASSERT(rn
!= ARMRegisters::pc
);
1460 ASSERT(index
|| wback
);
1461 ASSERT(!wback
| (rt
!= rn
));
1468 ASSERT((offset
& ~0xff) == 0);
1470 offset
|= (wback
<< 8);
1471 offset
|= (add
<< 9);
1472 offset
|= (index
<< 10);
1473 offset
|= (1 << 11);
1475 m_formatter
.twoWordOp12Reg4Reg4Imm12(OP_STR_imm_T4
, rn
, rt
, offset
);
1478 // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
1479 ALWAYS_INLINE
void str(RegisterID rt
, RegisterID rn
, RegisterID rm
, unsigned shift
= 0)
1481 ASSERT(rn
!= ARMRegisters::pc
);
1482 ASSERT(!BadReg(rm
));
1485 if (!shift
&& !((rt
| rn
| rm
) & 8))
1486 m_formatter
.oneWordOp7Reg3Reg3Reg3(OP_STR_reg_T1
, rm
, rn
, rt
);
1488 m_formatter
.twoWordOp12Reg4FourFours(OP_STR_reg_T2
, rn
, FourFours(rt
, 0, shift
, rm
));
1491 // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
1492 ALWAYS_INLINE
void strb(RegisterID rt
, RegisterID rn
, ARMThumbImmediate imm
)
1494 ASSERT(rt
!= ARMRegisters::pc
);
1495 ASSERT(rn
!= ARMRegisters::pc
);
1496 ASSERT(imm
.isUInt12());
1498 if (!((rt
| rn
) & 8) && imm
.isUInt7())
1499 m_formatter
.oneWordOp5Imm5Reg3Reg3(OP_STRB_imm_T1
, imm
.getUInt7() >> 2, rn
, rt
);
1501 m_formatter
.twoWordOp12Reg4Reg4Imm12(OP_STRB_imm_T2
, rn
, rt
, imm
.getUInt12());
1504 // If index is set, this is a regular offset or a pre-indexed store;
1505 // if index is not set then is is a post-index store.
1507 // If wback is set rn is updated - this is a pre or post index store,
1508 // if wback is not set this is a regular offset memory access.
1510 // (-255 <= offset <= 255)
1512 // _tmp = _reg + offset
1513 // MEM[index ? _tmp : _reg] = REG[rt]
1514 // if (wback) REG[rn] = _tmp
1515 ALWAYS_INLINE
void strb(RegisterID rt
, RegisterID rn
, int offset
, bool index
, bool wback
)
1517 ASSERT(rt
!= ARMRegisters::pc
);
1518 ASSERT(rn
!= ARMRegisters::pc
);
1519 ASSERT(index
|| wback
);
1520 ASSERT(!wback
| (rt
!= rn
));
1527 ASSERT((offset
& ~0xff) == 0);
1529 offset
|= (wback
<< 8);
1530 offset
|= (add
<< 9);
1531 offset
|= (index
<< 10);
1532 offset
|= (1 << 11);
1534 m_formatter
.twoWordOp12Reg4Reg4Imm12(OP_STRB_imm_T3
, rn
, rt
, offset
);
1537 // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
1538 ALWAYS_INLINE
void strb(RegisterID rt
, RegisterID rn
, RegisterID rm
, unsigned shift
= 0)
1540 ASSERT(rn
!= ARMRegisters::pc
);
1541 ASSERT(!BadReg(rm
));
1544 if (!shift
&& !((rt
| rn
| rm
) & 8))
1545 m_formatter
.oneWordOp7Reg3Reg3Reg3(OP_STRB_reg_T1
, rm
, rn
, rt
);
1547 m_formatter
.twoWordOp12Reg4FourFours(OP_STRB_reg_T2
, rn
, FourFours(rt
, 0, shift
, rm
));
1550 // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
1551 ALWAYS_INLINE
void strh(RegisterID rt
, RegisterID rn
, ARMThumbImmediate imm
)
1553 ASSERT(rt
!= ARMRegisters::pc
);
1554 ASSERT(rn
!= ARMRegisters::pc
);
1555 ASSERT(imm
.isUInt12());
1557 if (!((rt
| rn
) & 8) && imm
.isUInt7())
1558 m_formatter
.oneWordOp5Imm5Reg3Reg3(OP_STRH_imm_T1
, imm
.getUInt7() >> 2, rn
, rt
);
1560 m_formatter
.twoWordOp12Reg4Reg4Imm12(OP_STRH_imm_T2
, rn
, rt
, imm
.getUInt12());
1563 // If index is set, this is a regular offset or a pre-indexed store;
1564 // if index is not set then is is a post-index store.
1566 // If wback is set rn is updated - this is a pre or post index store,
1567 // if wback is not set this is a regular offset memory access.
1569 // (-255 <= offset <= 255)
1571 // _tmp = _reg + offset
1572 // MEM[index ? _tmp : _reg] = REG[rt]
1573 // if (wback) REG[rn] = _tmp
1574 ALWAYS_INLINE
void strh(RegisterID rt
, RegisterID rn
, int offset
, bool index
, bool wback
)
1576 ASSERT(rt
!= ARMRegisters::pc
);
1577 ASSERT(rn
!= ARMRegisters::pc
);
1578 ASSERT(index
|| wback
);
1579 ASSERT(!wback
| (rt
!= rn
));
1586 ASSERT(!(offset
& ~0xff));
1588 offset
|= (wback
<< 8);
1589 offset
|= (add
<< 9);
1590 offset
|= (index
<< 10);
1591 offset
|= (1 << 11);
1593 m_formatter
.twoWordOp12Reg4Reg4Imm12(OP_STRH_imm_T3
, rn
, rt
, offset
);
1596 // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
1597 ALWAYS_INLINE
void strh(RegisterID rt
, RegisterID rn
, RegisterID rm
, unsigned shift
= 0)
1599 ASSERT(rn
!= ARMRegisters::pc
);
1600 ASSERT(!BadReg(rm
));
1603 if (!shift
&& !((rt
| rn
| rm
) & 8))
1604 m_formatter
.oneWordOp7Reg3Reg3Reg3(OP_STRH_reg_T1
, rm
, rn
, rt
);
1606 m_formatter
.twoWordOp12Reg4FourFours(OP_STRH_reg_T2
, rn
, FourFours(rt
, 0, shift
, rm
));
1609 ALWAYS_INLINE
void sub(RegisterID rd
, RegisterID rn
, ARMThumbImmediate imm
)
1611 // Rd can only be SP if Rn is also SP.
1612 ASSERT((rd
!= ARMRegisters::sp
) || (rn
== ARMRegisters::sp
));
1613 ASSERT(rd
!= ARMRegisters::pc
);
1614 ASSERT(rn
!= ARMRegisters::pc
);
1615 ASSERT(imm
.isValid());
1617 if ((rn
== ARMRegisters::sp
) && (rd
== ARMRegisters::sp
) && imm
.isUInt9()) {
1618 ASSERT(!(imm
.getUInt16() & 3));
1619 m_formatter
.oneWordOp9Imm7(OP_SUB_SP_imm_T1
, static_cast<uint8_t>(imm
.getUInt9() >> 2));
1621 } else if (!((rd
| rn
) & 8)) {
1622 if (imm
.isUInt3()) {
1623 m_formatter
.oneWordOp7Reg3Reg3Reg3(OP_SUB_imm_T1
, (RegisterID
)imm
.getUInt3(), rn
, rd
);
1625 } else if ((rd
== rn
) && imm
.isUInt8()) {
1626 m_formatter
.oneWordOp5Reg3Imm8(OP_SUB_imm_T2
, rd
, imm
.getUInt8());
1631 if (imm
.isEncodedImm())
1632 m_formatter
.twoWordOp5i6Imm4Reg4EncodedImm(OP_SUB_imm_T3
, rn
, rd
, imm
);
1634 ASSERT(imm
.isUInt12());
1635 m_formatter
.twoWordOp5i6Imm4Reg4EncodedImm(OP_SUB_imm_T4
, rn
, rd
, imm
);
1639 ALWAYS_INLINE
void sub(RegisterID rd
, ARMThumbImmediate imm
, RegisterID rn
)
1641 ASSERT(rd
!= ARMRegisters::pc
);
1642 ASSERT(rn
!= ARMRegisters::pc
);
1643 ASSERT(imm
.isValid());
1644 ASSERT(imm
.isUInt12());
1646 if (!((rd
| rn
) & 8) && !imm
.getUInt12())
1647 m_formatter
.oneWordOp10Reg3Reg3(OP_RSB_imm_T1
, rn
, rd
);
1649 m_formatter
.twoWordOp5i6Imm4Reg4EncodedImm(OP_RSB_imm_T2
, rn
, rd
, imm
);
1652 ALWAYS_INLINE
void sub(RegisterID rd
, RegisterID rn
, RegisterID rm
, ShiftTypeAndAmount shift
)
1654 ASSERT((rd
!= ARMRegisters::sp
) || (rn
== ARMRegisters::sp
));
1655 ASSERT(rd
!= ARMRegisters::pc
);
1656 ASSERT(rn
!= ARMRegisters::pc
);
1657 ASSERT(!BadReg(rm
));
1658 m_formatter
.twoWordOp12Reg4FourFours(OP_SUB_reg_T2
, rn
, FourFours(shift
.hi4(), rd
, shift
.lo4(), rm
));
1661 // NOTE: In an IT block, add doesn't modify the flags register.
1662 ALWAYS_INLINE
void sub(RegisterID rd
, RegisterID rn
, RegisterID rm
)
1664 if (!((rd
| rn
| rm
) & 8))
1665 m_formatter
.oneWordOp7Reg3Reg3Reg3(OP_SUB_reg_T1
, rm
, rn
, rd
);
1667 sub(rd
, rn
, rm
, ShiftTypeAndAmount());
1670 // Not allowed in an IT (if then) block.
1671 void sub_S(RegisterID rd
, RegisterID rn
, ARMThumbImmediate imm
)
1673 // Rd can only be SP if Rn is also SP.
1674 ASSERT((rd
!= ARMRegisters::sp
) || (rn
== ARMRegisters::sp
));
1675 ASSERT(rd
!= ARMRegisters::pc
);
1676 ASSERT(rn
!= ARMRegisters::pc
);
1677 ASSERT(imm
.isValid());
1679 if ((rn
== ARMRegisters::sp
) && (rd
== ARMRegisters::sp
) && imm
.isUInt9()) {
1680 ASSERT(!(imm
.getUInt16() & 3));
1681 m_formatter
.oneWordOp9Imm7(OP_SUB_SP_imm_T1
, static_cast<uint8_t>(imm
.getUInt9() >> 2));
1683 } else if (!((rd
| rn
) & 8)) {
1684 if (imm
.isUInt3()) {
1685 m_formatter
.oneWordOp7Reg3Reg3Reg3(OP_SUB_imm_T1
, (RegisterID
)imm
.getUInt3(), rn
, rd
);
1687 } else if ((rd
== rn
) && imm
.isUInt8()) {
1688 m_formatter
.oneWordOp5Reg3Imm8(OP_SUB_imm_T2
, rd
, imm
.getUInt8());
1693 m_formatter
.twoWordOp5i6Imm4Reg4EncodedImm(OP_SUB_S_imm_T3
, rn
, rd
, imm
);
1696 ALWAYS_INLINE
void sub_S(RegisterID rd
, ARMThumbImmediate imm
, RegisterID rn
)
1698 ASSERT(rd
!= ARMRegisters::pc
);
1699 ASSERT(rn
!= ARMRegisters::pc
);
1700 ASSERT(imm
.isValid());
1701 ASSERT(imm
.isUInt12());
1703 m_formatter
.twoWordOp5i6Imm4Reg4EncodedImm(OP_RSB_S_imm_T2
, rn
, rd
, imm
);
1706 // Not allowed in an IT (if then) block?
1707 ALWAYS_INLINE
void sub_S(RegisterID rd
, RegisterID rn
, RegisterID rm
, ShiftTypeAndAmount shift
)
1709 ASSERT((rd
!= ARMRegisters::sp
) || (rn
== ARMRegisters::sp
));
1710 ASSERT(rd
!= ARMRegisters::pc
);
1711 ASSERT(rn
!= ARMRegisters::pc
);
1712 ASSERT(!BadReg(rm
));
1713 m_formatter
.twoWordOp12Reg4FourFours(OP_SUB_S_reg_T2
, rn
, FourFours(shift
.hi4(), rd
, shift
.lo4(), rm
));
1716 // Not allowed in an IT (if then) block.
1717 ALWAYS_INLINE
void sub_S(RegisterID rd
, RegisterID rn
, RegisterID rm
)
1719 if (!((rd
| rn
| rm
) & 8))
1720 m_formatter
.oneWordOp7Reg3Reg3Reg3(OP_SUB_reg_T1
, rm
, rn
, rd
);
1722 sub_S(rd
, rn
, rm
, ShiftTypeAndAmount());
1725 ALWAYS_INLINE
void tst(RegisterID rn
, ARMThumbImmediate imm
)
1727 ASSERT(!BadReg(rn
));
1728 ASSERT(imm
.isEncodedImm());
1730 m_formatter
.twoWordOp5i6Imm4Reg4EncodedImm(OP_TST_imm
, rn
, (RegisterID
)0xf, imm
);
1733 ALWAYS_INLINE
void tst(RegisterID rn
, RegisterID rm
, ShiftTypeAndAmount shift
)
1735 ASSERT(!BadReg(rn
));
1736 ASSERT(!BadReg(rm
));
1737 m_formatter
.twoWordOp12Reg4FourFours(OP_TST_reg_T2
, rn
, FourFours(shift
.hi4(), 0xf, shift
.lo4(), rm
));
1740 ALWAYS_INLINE
void tst(RegisterID rn
, RegisterID rm
)
1743 tst(rn
, rm
, ShiftTypeAndAmount());
1745 m_formatter
.oneWordOp10Reg3Reg3(OP_TST_reg_T1
, rm
, rn
);
1748 ALWAYS_INLINE
void ubfx(RegisterID rd
, RegisterID rn
, unsigned lsb
, unsigned width
)
1751 ASSERT((width
>= 1) && (width
<= 32));
1752 ASSERT((lsb
+ width
) <= 32);
1753 m_formatter
.twoWordOp12Reg40Imm3Reg4Imm20Imm5(OP_UBFX_T1
, rd
, rn
, (lsb
& 0x1c) << 10, (lsb
& 0x3) << 6, (width
- 1) & 0x1f);
1756 #if CPU(APPLE_ARMV7S)
1757 ALWAYS_INLINE
void udiv(RegisterID rd
, RegisterID rn
, RegisterID rm
)
1759 ASSERT(!BadReg(rd
));
1760 ASSERT(!BadReg(rn
));
1761 ASSERT(!BadReg(rm
));
1762 m_formatter
.twoWordOp12Reg4FourFours(OP_UDIV_T1
, rn
, FourFours(0xf, rd
, 0xf, rm
));
1766 void vadd(FPDoubleRegisterID rd
, FPDoubleRegisterID rn
, FPDoubleRegisterID rm
)
1768 m_formatter
.vfpOp(OP_VADD_T2
, OP_VADD_T2b
, true, rn
, rd
, rm
);
1771 void vcmp(FPDoubleRegisterID rd
, FPDoubleRegisterID rm
)
1773 m_formatter
.vfpOp(OP_VCMP
, OP_VCMPb
, true, VFPOperand(4), rd
, rm
);
1776 void vcmpz(FPDoubleRegisterID rd
)
1778 m_formatter
.vfpOp(OP_VCMP
, OP_VCMPb
, true, VFPOperand(5), rd
, VFPOperand(0));
1781 void vcvt_signedToFloatingPoint(FPDoubleRegisterID rd
, FPSingleRegisterID rm
)
1783 // boolean values are 64bit (toInt, unsigned, roundZero)
1784 m_formatter
.vfpOp(OP_VCVT_FPIVFP
, OP_VCVT_FPIVFPb
, true, vcvtOp(false, false, false), rd
, rm
);
1787 void vcvt_floatingPointToSigned(FPSingleRegisterID rd
, FPDoubleRegisterID rm
)
1789 // boolean values are 64bit (toInt, unsigned, roundZero)
1790 m_formatter
.vfpOp(OP_VCVT_FPIVFP
, OP_VCVT_FPIVFPb
, true, vcvtOp(true, false, true), rd
, rm
);
1793 void vcvt_floatingPointToUnsigned(FPSingleRegisterID rd
, FPDoubleRegisterID rm
)
1795 // boolean values are 64bit (toInt, unsigned, roundZero)
1796 m_formatter
.vfpOp(OP_VCVT_FPIVFP
, OP_VCVT_FPIVFPb
, true, vcvtOp(true, true, true), rd
, rm
);
1799 void vdiv(FPDoubleRegisterID rd
, FPDoubleRegisterID rn
, FPDoubleRegisterID rm
)
1801 m_formatter
.vfpOp(OP_VDIV
, OP_VDIVb
, true, rn
, rd
, rm
);
1804 void vldr(FPDoubleRegisterID rd
, RegisterID rn
, int32_t imm
)
1806 m_formatter
.vfpMemOp(OP_VLDR
, OP_VLDRb
, true, rn
, rd
, imm
);
1809 void flds(FPSingleRegisterID rd
, RegisterID rn
, int32_t imm
)
1811 m_formatter
.vfpMemOp(OP_FLDS
, OP_FLDSb
, false, rn
, rd
, imm
);
1814 void vmov(RegisterID rd
, FPSingleRegisterID rn
)
1816 ASSERT(!BadReg(rd
));
1817 m_formatter
.vfpOp(OP_VMOV_StoC
, OP_VMOV_StoCb
, false, rn
, rd
, VFPOperand(0));
1820 void vmov(FPSingleRegisterID rd
, RegisterID rn
)
1822 ASSERT(!BadReg(rn
));
1823 m_formatter
.vfpOp(OP_VMOV_CtoS
, OP_VMOV_CtoSb
, false, rd
, rn
, VFPOperand(0));
1826 void vmov(RegisterID rd1
, RegisterID rd2
, FPDoubleRegisterID rn
)
1828 ASSERT(!BadReg(rd1
));
1829 ASSERT(!BadReg(rd2
));
1830 m_formatter
.vfpOp(OP_VMOV_DtoC
, OP_VMOV_DtoCb
, true, rd2
, VFPOperand(rd1
| 16), rn
);
1833 void vmov(FPDoubleRegisterID rd
, RegisterID rn1
, RegisterID rn2
)
1835 ASSERT(!BadReg(rn1
));
1836 ASSERT(!BadReg(rn2
));
1837 m_formatter
.vfpOp(OP_VMOV_CtoD
, OP_VMOV_CtoDb
, true, rn2
, VFPOperand(rn1
| 16), rd
);
1840 void vmov(FPDoubleRegisterID rd
, FPDoubleRegisterID rn
)
1842 m_formatter
.vfpOp(OP_VMOV_T2
, OP_VMOV_T2b
, true, VFPOperand(0), rd
, rn
);
1845 void vmrs(RegisterID reg
= ARMRegisters::pc
)
1847 ASSERT(reg
!= ARMRegisters::sp
);
1848 m_formatter
.vfpOp(OP_VMRS
, OP_VMRSb
, false, VFPOperand(1), VFPOperand(0x10 | reg
), VFPOperand(0));
1851 void vmul(FPDoubleRegisterID rd
, FPDoubleRegisterID rn
, FPDoubleRegisterID rm
)
1853 m_formatter
.vfpOp(OP_VMUL_T2
, OP_VMUL_T2b
, true, rn
, rd
, rm
);
1856 void vstr(FPDoubleRegisterID rd
, RegisterID rn
, int32_t imm
)
1858 m_formatter
.vfpMemOp(OP_VSTR
, OP_VSTRb
, true, rn
, rd
, imm
);
1861 void fsts(FPSingleRegisterID rd
, RegisterID rn
, int32_t imm
)
1863 m_formatter
.vfpMemOp(OP_FSTS
, OP_FSTSb
, false, rn
, rd
, imm
);
1866 void vsub(FPDoubleRegisterID rd
, FPDoubleRegisterID rn
, FPDoubleRegisterID rm
)
1868 m_formatter
.vfpOp(OP_VSUB_T2
, OP_VSUB_T2b
, true, rn
, rd
, rm
);
1871 void vabs(FPDoubleRegisterID rd
, FPDoubleRegisterID rm
)
1873 m_formatter
.vfpOp(OP_VABS_T2
, OP_VABS_T2b
, true, VFPOperand(16), rd
, rm
);
1876 void vneg(FPDoubleRegisterID rd
, FPDoubleRegisterID rm
)
1878 m_formatter
.vfpOp(OP_VNEG_T2
, OP_VNEG_T2b
, true, VFPOperand(1), rd
, rm
);
1881 void vsqrt(FPDoubleRegisterID rd
, FPDoubleRegisterID rm
)
1883 m_formatter
.vfpOp(OP_VSQRT_T1
, OP_VSQRT_T1b
, true, VFPOperand(17), rd
, rm
);
1886 void vcvtds(FPDoubleRegisterID rd
, FPSingleRegisterID rm
)
1888 m_formatter
.vfpOp(OP_VCVTDS_T1
, OP_VCVTDS_T1b
, false, VFPOperand(23), rd
, rm
);
1891 void vcvtsd(FPSingleRegisterID rd
, FPDoubleRegisterID rm
)
1893 m_formatter
.vfpOp(OP_VCVTSD_T1
, OP_VCVTSD_T1b
, true, VFPOperand(23), rd
, rm
);
1898 m_formatter
.oneWordOp8Imm8(OP_NOP_T1
, 0);
1903 m_formatter
.twoWordOp16Op16(OP_NOP_T2a
, OP_NOP_T2b
);
1906 AssemblerLabel
labelIgnoringWatchpoints()
1908 return m_formatter
.label();
1911 AssemblerLabel
labelForWatchpoint()
1913 AssemblerLabel result
= m_formatter
.label();
1914 if (static_cast<int>(result
.m_offset
) != m_indexOfLastWatchpoint
)
1916 m_indexOfLastWatchpoint
= result
.m_offset
;
1917 m_indexOfTailOfLastWatchpoint
= result
.m_offset
+ maxJumpReplacementSize();
1921 AssemblerLabel
label()
1923 AssemblerLabel result
= m_formatter
.label();
1924 while (UNLIKELY(static_cast<int>(result
.m_offset
) < m_indexOfTailOfLastWatchpoint
)) {
1925 if (UNLIKELY(static_cast<int>(result
.m_offset
) + 4 <= m_indexOfTailOfLastWatchpoint
))
1929 result
= m_formatter
.label();
1934 AssemblerLabel
align(int alignment
)
1936 while (!m_formatter
.isAligned(alignment
))
1942 static void* getRelocatedAddress(void* code
, AssemblerLabel label
)
1944 ASSERT(label
.isSet());
1945 return reinterpret_cast<void*>(reinterpret_cast<ptrdiff_t>(code
) + label
.m_offset
);
1948 static int getDifferenceBetweenLabels(AssemblerLabel a
, AssemblerLabel b
)
1950 return b
.m_offset
- a
.m_offset
;
1953 int executableOffsetFor(int location
)
1957 return static_cast<int32_t*>(m_formatter
.data())[location
/ sizeof(int32_t) - 1];
1960 int jumpSizeDelta(JumpType jumpType
, JumpLinkType jumpLinkType
) { return JUMP_ENUM_SIZE(jumpType
) - JUMP_ENUM_SIZE(jumpLinkType
); }
1962 // Assembler admin methods:
1964 static ALWAYS_INLINE
bool linkRecordSourceComparator(const LinkRecord
& a
, const LinkRecord
& b
)
1966 return a
.from() < b
.from();
1969 bool canCompact(JumpType jumpType
)
1971 // The following cannot be compacted:
1972 // JumpFixed: represents custom jump sequence
1973 // JumpNoConditionFixedSize: represents unconditional jump that must remain a fixed size
1974 // JumpConditionFixedSize: represents conditional jump that must remain a fixed size
1975 return (jumpType
== JumpNoCondition
) || (jumpType
== JumpCondition
);
1978 JumpLinkType
computeJumpType(JumpType jumpType
, const uint8_t* from
, const uint8_t* to
)
1980 if (jumpType
== JumpFixed
)
1983 // for patchable jump we must leave space for the longest code sequence
1984 if (jumpType
== JumpNoConditionFixedSize
)
1986 if (jumpType
== JumpConditionFixedSize
)
1987 return LinkConditionalBX
;
1989 const int paddingSize
= JUMP_ENUM_SIZE(jumpType
);
1991 if (jumpType
== JumpCondition
) {
1992 // 2-byte conditional T1
1993 const uint16_t* jumpT1Location
= reinterpret_cast_ptr
<const uint16_t*>(from
- (paddingSize
- JUMP_ENUM_SIZE(LinkJumpT1
)));
1994 if (canBeJumpT1(jumpT1Location
, to
))
1996 // 4-byte conditional T3
1997 const uint16_t* jumpT3Location
= reinterpret_cast_ptr
<const uint16_t*>(from
- (paddingSize
- JUMP_ENUM_SIZE(LinkJumpT3
)));
1998 if (canBeJumpT3(jumpT3Location
, to
))
2000 // 4-byte conditional T4 with IT
2001 const uint16_t* conditionalJumpT4Location
=
2002 reinterpret_cast_ptr
<const uint16_t*>(from
- (paddingSize
- JUMP_ENUM_SIZE(LinkConditionalJumpT4
)));
2003 if (canBeJumpT4(conditionalJumpT4Location
, to
))
2004 return LinkConditionalJumpT4
;
2006 // 2-byte unconditional T2
2007 const uint16_t* jumpT2Location
= reinterpret_cast_ptr
<const uint16_t*>(from
- (paddingSize
- JUMP_ENUM_SIZE(LinkJumpT2
)));
2008 if (canBeJumpT2(jumpT2Location
, to
))
2010 // 4-byte unconditional T4
2011 const uint16_t* jumpT4Location
= reinterpret_cast_ptr
<const uint16_t*>(from
- (paddingSize
- JUMP_ENUM_SIZE(LinkJumpT4
)));
2012 if (canBeJumpT4(jumpT4Location
, to
))
2014 // use long jump sequence
2018 ASSERT(jumpType
== JumpCondition
);
2019 return LinkConditionalBX
;
2022 JumpLinkType
computeJumpType(LinkRecord
& record
, const uint8_t* from
, const uint8_t* to
)
2024 JumpLinkType linkType
= computeJumpType(record
.type(), from
, to
);
2025 record
.setLinkType(linkType
);
2029 void recordLinkOffsets(int32_t regionStart
, int32_t regionEnd
, int32_t offset
)
2031 int32_t ptr
= regionStart
/ sizeof(int32_t);
2032 const int32_t end
= regionEnd
/ sizeof(int32_t);
2033 int32_t* offsets
= static_cast<int32_t*>(m_formatter
.data());
2035 offsets
[ptr
++] = offset
;
2038 Vector
<LinkRecord
, 0, UnsafeVectorOverflow
>& jumpsToLink()
2040 std::sort(m_jumpsToLink
.begin(), m_jumpsToLink
.end(), linkRecordSourceComparator
);
2041 return m_jumpsToLink
;
2044 void ALWAYS_INLINE
link(LinkRecord
& record
, uint8_t* from
, uint8_t* to
)
2046 switch (record
.linkType()) {
2048 linkJumpT1(record
.condition(), reinterpret_cast_ptr
<uint16_t*>(from
), to
);
2051 linkJumpT2(reinterpret_cast_ptr
<uint16_t*>(from
), to
);
2054 linkJumpT3(record
.condition(), reinterpret_cast_ptr
<uint16_t*>(from
), to
);
2057 linkJumpT4(reinterpret_cast_ptr
<uint16_t*>(from
), to
);
2059 case LinkConditionalJumpT4
:
2060 linkConditionalJumpT4(record
.condition(), reinterpret_cast_ptr
<uint16_t*>(from
), to
);
2062 case LinkConditionalBX
:
2063 linkConditionalBX(record
.condition(), reinterpret_cast_ptr
<uint16_t*>(from
), to
);
2066 linkBX(reinterpret_cast_ptr
<uint16_t*>(from
), to
);
2069 RELEASE_ASSERT_NOT_REACHED();
2074 void* unlinkedCode() { return m_formatter
.data(); }
2075 size_t codeSize() const { return m_formatter
.codeSize(); }
2077 static unsigned getCallReturnOffset(AssemblerLabel call
)
2079 ASSERT(call
.isSet());
2080 return call
.m_offset
;
2083 // Linking & patching:
2085 // 'link' and 'patch' methods are for use on unprotected code - such as the code
2086 // within the AssemblerBuffer, and code being patched by the patch buffer. Once
2087 // code has been finalized it is (platform support permitting) within a non-
2088 // writable region of memory; to modify the code in an execute-only execuable
2089 // pool the 'repatch' and 'relink' methods should be used.
2091 void linkJump(AssemblerLabel from
, AssemblerLabel to
, JumpType type
, Condition condition
)
2094 ASSERT(from
.isSet());
2095 m_jumpsToLink
.append(LinkRecord(from
.m_offset
, to
.m_offset
, type
, condition
));
2098 static void linkJump(void* code
, AssemblerLabel from
, void* to
)
2100 ASSERT(from
.isSet());
2102 uint16_t* location
= reinterpret_cast<uint16_t*>(reinterpret_cast<intptr_t>(code
) + from
.m_offset
);
2103 linkJumpAbsolute(location
, to
);
2106 static void linkCall(void* code
, AssemblerLabel from
, void* to
)
2108 ASSERT(!(reinterpret_cast<intptr_t>(code
) & 1));
2109 ASSERT(from
.isSet());
2110 ASSERT(reinterpret_cast<intptr_t>(to
) & 1);
2112 setPointer(reinterpret_cast<uint16_t*>(reinterpret_cast<intptr_t>(code
) + from
.m_offset
) - 1, to
, false);
2115 static void linkPointer(void* code
, AssemblerLabel where
, void* value
)
2117 setPointer(reinterpret_cast<char*>(code
) + where
.m_offset
, value
, false);
2120 static void relinkJump(void* from
, void* to
)
2122 ASSERT(!(reinterpret_cast<intptr_t>(from
) & 1));
2123 ASSERT(!(reinterpret_cast<intptr_t>(to
) & 1));
2125 linkJumpAbsolute(reinterpret_cast<uint16_t*>(from
), to
);
2127 cacheFlush(reinterpret_cast<uint16_t*>(from
) - 5, 5 * sizeof(uint16_t));
2130 static void relinkCall(void* from
, void* to
)
2132 ASSERT(!(reinterpret_cast<intptr_t>(from
) & 1));
2133 ASSERT(reinterpret_cast<intptr_t>(to
) & 1);
2135 setPointer(reinterpret_cast<uint16_t*>(from
) - 1, to
, true);
2138 static void* readCallTarget(void* from
)
2140 return readPointer(reinterpret_cast<uint16_t*>(from
) - 1);
2143 static void repatchInt32(void* where
, int32_t value
)
2145 ASSERT(!(reinterpret_cast<intptr_t>(where
) & 1));
2147 setInt32(where
, value
, true);
2150 static void repatchCompact(void* where
, int32_t offset
)
2152 ASSERT(offset
>= -255 && offset
<= 255);
2160 offset
|= (add
<< 9);
2161 offset
|= (1 << 10);
2162 offset
|= (1 << 11);
2164 uint16_t* location
= reinterpret_cast<uint16_t*>(where
);
2165 location
[1] &= ~((1 << 12) - 1);
2166 location
[1] |= offset
;
2167 cacheFlush(location
, sizeof(uint16_t) * 2);
2170 static void repatchPointer(void* where
, void* value
)
2172 ASSERT(!(reinterpret_cast<intptr_t>(where
) & 1));
2174 setPointer(where
, value
, true);
2177 static void* readPointer(void* where
)
2179 return reinterpret_cast<void*>(readInt32(where
));
2182 static void replaceWithJump(void* instructionStart
, void* to
)
2184 ASSERT(!(bitwise_cast
<uintptr_t>(instructionStart
) & 1));
2185 ASSERT(!(bitwise_cast
<uintptr_t>(to
) & 1));
2187 #if OS(LINUX) || OS(QNX)
2188 if (canBeJumpT4(reinterpret_cast<uint16_t*>(instructionStart
), to
)) {
2189 uint16_t* ptr
= reinterpret_cast<uint16_t*>(instructionStart
) + 2;
2190 linkJumpT4(ptr
, to
);
2191 cacheFlush(ptr
- 2, sizeof(uint16_t) * 2);
2193 uint16_t* ptr
= reinterpret_cast<uint16_t*>(instructionStart
) + 5;
2195 cacheFlush(ptr
- 5, sizeof(uint16_t) * 5);
2198 uint16_t* ptr
= reinterpret_cast<uint16_t*>(instructionStart
) + 2;
2199 linkJumpT4(ptr
, to
);
2200 cacheFlush(ptr
- 2, sizeof(uint16_t) * 2);
2204 static ptrdiff_t maxJumpReplacementSize()
2206 #if OS(LINUX) || OS(QNX)
2213 static void replaceWithLoad(void* instructionStart
)
2215 ASSERT(!(bitwise_cast
<uintptr_t>(instructionStart
) & 1));
2216 uint16_t* ptr
= reinterpret_cast<uint16_t*>(instructionStart
);
2217 switch (ptr
[0] & 0xFFF0) {
2221 ASSERT(!(ptr
[1] & 0xF000));
2223 ptr
[0] |= OP_LDR_imm_T3
;
2224 ptr
[1] |= (ptr
[1] & 0x0F00) << 4;
2226 cacheFlush(ptr
, sizeof(uint16_t) * 2);
2229 RELEASE_ASSERT_NOT_REACHED();
2233 static void replaceWithAddressComputation(void* instructionStart
)
2235 ASSERT(!(bitwise_cast
<uintptr_t>(instructionStart
) & 1));
2236 uint16_t* ptr
= reinterpret_cast<uint16_t*>(instructionStart
);
2237 switch (ptr
[0] & 0xFFF0) {
2239 ASSERT(!(ptr
[1] & 0x0F00));
2241 ptr
[0] |= OP_ADD_imm_T3
;
2242 ptr
[1] |= (ptr
[1] & 0xF000) >> 4;
2244 cacheFlush(ptr
, sizeof(uint16_t) * 2);
2249 RELEASE_ASSERT_NOT_REACHED();
2253 unsigned debugOffset() { return m_formatter
.debugOffset(); }
2256 static inline void linuxPageFlush(uintptr_t begin
, uintptr_t end
)
2268 : "r" (begin
), "r" (end
)
2269 : "r0", "r1", "r2");
2273 static void cacheFlush(void* code
, size_t size
)
2276 sys_cache_control(kCacheFunctionPrepareForExecution
, code
, size
);
2278 size_t page
= pageSize();
2279 uintptr_t current
= reinterpret_cast<uintptr_t>(code
);
2280 uintptr_t end
= current
+ size
;
2281 uintptr_t firstPageEnd
= (current
& ~(page
- 1)) + page
;
2283 if (end
<= firstPageEnd
) {
2284 linuxPageFlush(current
, end
);
2288 linuxPageFlush(current
, firstPageEnd
);
2290 for (current
= firstPageEnd
; current
+ page
< end
; current
+= page
)
2291 linuxPageFlush(current
, current
+ page
);
2293 linuxPageFlush(current
, end
);
2295 CacheRangeFlush(code
, size
, CACHE_SYNC_ALL
);
2297 #if !ENABLE(ASSEMBLER_WX_EXCLUSIVE)
2298 msync(code
, size
, MS_INVALIDATE_ICACHE
);
2304 #error "The cacheFlush support is missing on this platform."
2309 // VFP operations commonly take one or more 5-bit operands, typically representing a
2310 // floating point register number. This will commonly be encoded in the instruction
2311 // in two parts, with one single bit field, and one 4-bit field. In the case of
2312 // double precision operands the high bit of the register number will be encoded
2313 // separately, and for single precision operands the high bit of the register number
2314 // will be encoded individually.
2315 // VFPOperand encapsulates a 5-bit VFP operand, with bits 0..3 containing the 4-bit
2316 // field to be encoded together in the instruction (the low 4-bits of a double
2317 // register number, or the high 4-bits of a single register number), and bit 4
2318 // contains the bit value to be encoded individually.
2320 explicit VFPOperand(uint32_t value
)
2323 ASSERT(!(m_value
& ~0x1f));
2326 VFPOperand(FPDoubleRegisterID reg
)
2331 VFPOperand(RegisterID reg
)
2336 VFPOperand(FPSingleRegisterID reg
)
2337 : m_value(((reg
& 1) << 4) | (reg
>> 1)) // rotate the lowest bit of 'reg' to the top.
2343 return m_value
>> 4;
2348 return m_value
& 0xf;
2354 VFPOperand
vcvtOp(bool toInteger
, bool isUnsigned
, bool isRoundZero
)
2356 // Cannot specify rounding when converting to float.
2357 ASSERT(toInteger
|| !isRoundZero
);
2361 // opc2 indicates both toInteger & isUnsigned.
2362 op
|= isUnsigned
? 0x4 : 0x5;
2363 // 'op' field in instruction is isRoundZero
2367 ASSERT(!isRoundZero
);
2368 // 'op' field in instruction is isUnsigned
2372 return VFPOperand(op
);
2375 static void setInt32(void* code
, uint32_t value
, bool flush
)
2377 uint16_t* location
= reinterpret_cast<uint16_t*>(code
);
2378 ASSERT(isMOV_imm_T3(location
- 4) && isMOVT(location
- 2));
2380 ARMThumbImmediate lo16
= ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(value
));
2381 ARMThumbImmediate hi16
= ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(value
>> 16));
2382 location
[-4] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOV_imm_T3
, lo16
);
2383 location
[-3] = twoWordOp5i6Imm4Reg4EncodedImmSecond((location
[-3] >> 8) & 0xf, lo16
);
2384 location
[-2] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOVT
, hi16
);
2385 location
[-1] = twoWordOp5i6Imm4Reg4EncodedImmSecond((location
[-1] >> 8) & 0xf, hi16
);
2388 cacheFlush(location
- 4, 4 * sizeof(uint16_t));
2391 static int32_t readInt32(void* code
)
2393 uint16_t* location
= reinterpret_cast<uint16_t*>(code
);
2394 ASSERT(isMOV_imm_T3(location
- 4) && isMOVT(location
- 2));
2396 ARMThumbImmediate lo16
;
2397 ARMThumbImmediate hi16
;
2398 decodeTwoWordOp5i6Imm4Reg4EncodedImmFirst(lo16
, location
[-4]);
2399 decodeTwoWordOp5i6Imm4Reg4EncodedImmSecond(lo16
, location
[-3]);
2400 decodeTwoWordOp5i6Imm4Reg4EncodedImmFirst(hi16
, location
[-2]);
2401 decodeTwoWordOp5i6Imm4Reg4EncodedImmSecond(hi16
, location
[-1]);
2402 uint32_t result
= hi16
.asUInt16();
2404 result
|= lo16
.asUInt16();
2405 return static_cast<int32_t>(result
);
2408 static void setUInt7ForLoad(void* code
, ARMThumbImmediate imm
)
2410 // Requires us to have planted a LDR_imm_T1
2411 ASSERT(imm
.isValid());
2412 ASSERT(imm
.isUInt7());
2413 uint16_t* location
= reinterpret_cast<uint16_t*>(code
);
2414 location
[0] &= ~((static_cast<uint16_t>(0x7f) >> 2) << 6);
2415 location
[0] |= (imm
.getUInt7() >> 2) << 6;
2416 cacheFlush(location
, sizeof(uint16_t));
2419 static void setPointer(void* code
, void* value
, bool flush
)
2421 setInt32(code
, reinterpret_cast<uint32_t>(value
), flush
);
2424 static bool isB(void* address
)
2426 uint16_t* instruction
= static_cast<uint16_t*>(address
);
2427 return ((instruction
[0] & 0xf800) == OP_B_T4a
) && ((instruction
[1] & 0xd000) == OP_B_T4b
);
2430 static bool isBX(void* address
)
2432 uint16_t* instruction
= static_cast<uint16_t*>(address
);
2433 return (instruction
[0] & 0xff87) == OP_BX
;
2436 static bool isMOV_imm_T3(void* address
)
2438 uint16_t* instruction
= static_cast<uint16_t*>(address
);
2439 return ((instruction
[0] & 0xFBF0) == OP_MOV_imm_T3
) && ((instruction
[1] & 0x8000) == 0);
2442 static bool isMOVT(void* address
)
2444 uint16_t* instruction
= static_cast<uint16_t*>(address
);
2445 return ((instruction
[0] & 0xFBF0) == OP_MOVT
) && ((instruction
[1] & 0x8000) == 0);
2448 static bool isNOP_T1(void* address
)
2450 uint16_t* instruction
= static_cast<uint16_t*>(address
);
2451 return instruction
[0] == OP_NOP_T1
;
2454 static bool isNOP_T2(void* address
)
2456 uint16_t* instruction
= static_cast<uint16_t*>(address
);
2457 return (instruction
[0] == OP_NOP_T2a
) && (instruction
[1] == OP_NOP_T2b
);
2460 static bool canBeJumpT1(const uint16_t* instruction
, const void* target
)
2462 ASSERT(!(reinterpret_cast<intptr_t>(instruction
) & 1));
2463 ASSERT(!(reinterpret_cast<intptr_t>(target
) & 1));
2465 intptr_t relative
= reinterpret_cast<intptr_t>(target
) - (reinterpret_cast<intptr_t>(instruction
));
2466 // It does not appear to be documented in the ARM ARM (big surprise), but
2467 // for OP_B_T1 the branch displacement encoded in the instruction is 2
2468 // less than the actual displacement.
2470 return ((relative
<< 23) >> 23) == relative
;
2473 static bool canBeJumpT2(const uint16_t* instruction
, const void* target
)
2475 ASSERT(!(reinterpret_cast<intptr_t>(instruction
) & 1));
2476 ASSERT(!(reinterpret_cast<intptr_t>(target
) & 1));
2478 intptr_t relative
= reinterpret_cast<intptr_t>(target
) - (reinterpret_cast<intptr_t>(instruction
));
2479 // It does not appear to be documented in the ARM ARM (big surprise), but
2480 // for OP_B_T2 the branch displacement encoded in the instruction is 2
2481 // less than the actual displacement.
2483 return ((relative
<< 20) >> 20) == relative
;
2486 static bool canBeJumpT3(const uint16_t* instruction
, const void* target
)
2488 ASSERT(!(reinterpret_cast<intptr_t>(instruction
) & 1));
2489 ASSERT(!(reinterpret_cast<intptr_t>(target
) & 1));
2491 intptr_t relative
= reinterpret_cast<intptr_t>(target
) - (reinterpret_cast<intptr_t>(instruction
));
2492 return ((relative
<< 11) >> 11) == relative
;
2495 static bool canBeJumpT4(const uint16_t* instruction
, const void* target
)
2497 ASSERT(!(reinterpret_cast<intptr_t>(instruction
) & 1));
2498 ASSERT(!(reinterpret_cast<intptr_t>(target
) & 1));
2500 intptr_t relative
= reinterpret_cast<intptr_t>(target
) - (reinterpret_cast<intptr_t>(instruction
));
2501 return ((relative
<< 7) >> 7) == relative
;
2504 void linkJumpT1(Condition cond
, uint16_t* instruction
, void* target
)
2506 // FIMXE: this should be up in the MacroAssembler layer. :-(
2507 ASSERT(!(reinterpret_cast<intptr_t>(instruction
) & 1));
2508 ASSERT(!(reinterpret_cast<intptr_t>(target
) & 1));
2509 ASSERT(canBeJumpT1(instruction
, target
));
2511 intptr_t relative
= reinterpret_cast<intptr_t>(target
) - (reinterpret_cast<intptr_t>(instruction
));
2512 // It does not appear to be documented in the ARM ARM (big surprise), but
2513 // for OP_B_T1 the branch displacement encoded in the instruction is 2
2514 // less than the actual displacement.
2517 // All branch offsets should be an even distance.
2518 ASSERT(!(relative
& 1));
2519 instruction
[-1] = OP_B_T1
| ((cond
& 0xf) << 8) | ((relative
& 0x1fe) >> 1);
2522 static void linkJumpT2(uint16_t* instruction
, void* target
)
2524 // FIMXE: this should be up in the MacroAssembler layer. :-(
2525 ASSERT(!(reinterpret_cast<intptr_t>(instruction
) & 1));
2526 ASSERT(!(reinterpret_cast<intptr_t>(target
) & 1));
2527 ASSERT(canBeJumpT2(instruction
, target
));
2529 intptr_t relative
= reinterpret_cast<intptr_t>(target
) - (reinterpret_cast<intptr_t>(instruction
));
2530 // It does not appear to be documented in the ARM ARM (big surprise), but
2531 // for OP_B_T2 the branch displacement encoded in the instruction is 2
2532 // less than the actual displacement.
2535 // All branch offsets should be an even distance.
2536 ASSERT(!(relative
& 1));
2537 instruction
[-1] = OP_B_T2
| ((relative
& 0xffe) >> 1);
2540 void linkJumpT3(Condition cond
, uint16_t* instruction
, void* target
)
2542 // FIMXE: this should be up in the MacroAssembler layer. :-(
2543 ASSERT(!(reinterpret_cast<intptr_t>(instruction
) & 1));
2544 ASSERT(!(reinterpret_cast<intptr_t>(target
) & 1));
2545 ASSERT(canBeJumpT3(instruction
, target
));
2547 intptr_t relative
= reinterpret_cast<intptr_t>(target
) - (reinterpret_cast<intptr_t>(instruction
));
2549 // All branch offsets should be an even distance.
2550 ASSERT(!(relative
& 1));
2551 instruction
[-2] = OP_B_T3a
| ((relative
& 0x100000) >> 10) | ((cond
& 0xf) << 6) | ((relative
& 0x3f000) >> 12);
2552 instruction
[-1] = OP_B_T3b
| ((relative
& 0x80000) >> 8) | ((relative
& 0x40000) >> 5) | ((relative
& 0xffe) >> 1);
2555 static void linkJumpT4(uint16_t* instruction
, void* target
)
2557 // FIMXE: this should be up in the MacroAssembler layer. :-(
2558 ASSERT(!(reinterpret_cast<intptr_t>(instruction
) & 1));
2559 ASSERT(!(reinterpret_cast<intptr_t>(target
) & 1));
2560 ASSERT(canBeJumpT4(instruction
, target
));
2562 intptr_t relative
= reinterpret_cast<intptr_t>(target
) - (reinterpret_cast<intptr_t>(instruction
));
2563 // ARM encoding for the top two bits below the sign bit is 'peculiar'.
2565 relative
^= 0xC00000;
2567 // All branch offsets should be an even distance.
2568 ASSERT(!(relative
& 1));
2569 instruction
[-2] = OP_B_T4a
| ((relative
& 0x1000000) >> 14) | ((relative
& 0x3ff000) >> 12);
2570 instruction
[-1] = OP_B_T4b
| ((relative
& 0x800000) >> 10) | ((relative
& 0x400000) >> 11) | ((relative
& 0xffe) >> 1);
2573 void linkConditionalJumpT4(Condition cond
, uint16_t* instruction
, void* target
)
2575 // FIMXE: this should be up in the MacroAssembler layer. :-(
2576 ASSERT(!(reinterpret_cast<intptr_t>(instruction
) & 1));
2577 ASSERT(!(reinterpret_cast<intptr_t>(target
) & 1));
2579 instruction
[-3] = ifThenElse(cond
) | OP_IT
;
2580 linkJumpT4(instruction
, target
);
2583 static void linkBX(uint16_t* instruction
, void* target
)
2585 // FIMXE: this should be up in the MacroAssembler layer. :-(
2586 ASSERT(!(reinterpret_cast<intptr_t>(instruction
) & 1));
2587 ASSERT(!(reinterpret_cast<intptr_t>(target
) & 1));
2589 const uint16_t JUMP_TEMPORARY_REGISTER
= ARMRegisters::ip
;
2590 ARMThumbImmediate lo16
= ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(reinterpret_cast<uint32_t>(target
) + 1));
2591 ARMThumbImmediate hi16
= ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(reinterpret_cast<uint32_t>(target
) >> 16));
2592 instruction
[-5] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOV_imm_T3
, lo16
);
2593 instruction
[-4] = twoWordOp5i6Imm4Reg4EncodedImmSecond(JUMP_TEMPORARY_REGISTER
, lo16
);
2594 instruction
[-3] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOVT
, hi16
);
2595 instruction
[-2] = twoWordOp5i6Imm4Reg4EncodedImmSecond(JUMP_TEMPORARY_REGISTER
, hi16
);
2596 instruction
[-1] = OP_BX
| (JUMP_TEMPORARY_REGISTER
<< 3);
2599 void linkConditionalBX(Condition cond
, uint16_t* instruction
, void* target
)
2601 // FIMXE: this should be up in the MacroAssembler layer. :-(
2602 ASSERT(!(reinterpret_cast<intptr_t>(instruction
) & 1));
2603 ASSERT(!(reinterpret_cast<intptr_t>(target
) & 1));
2605 linkBX(instruction
, target
);
2606 instruction
[-6] = ifThenElse(cond
, true, true) | OP_IT
;
2609 static void linkJumpAbsolute(uint16_t* instruction
, void* target
)
2611 // FIMXE: this should be up in the MacroAssembler layer. :-(
2612 ASSERT(!(reinterpret_cast<intptr_t>(instruction
) & 1));
2613 ASSERT(!(reinterpret_cast<intptr_t>(target
) & 1));
2615 ASSERT((isMOV_imm_T3(instruction
- 5) && isMOVT(instruction
- 3) && isBX(instruction
- 1))
2616 || (isNOP_T1(instruction
- 5) && isNOP_T2(instruction
- 4) && isB(instruction
- 2)));
2618 if (canBeJumpT4(instruction
, target
)) {
2619 // There may be a better way to fix this, but right now put the NOPs first, since in the
2620 // case of an conditional branch this will be coming after an ITTT predicating *three*
2621 // instructions! Looking backwards to modify the ITTT to an IT is not easy, due to
2622 // variable wdith encoding - the previous instruction might *look* like an ITTT but
2623 // actually be the second half of a 2-word op.
2624 instruction
[-5] = OP_NOP_T1
;
2625 instruction
[-4] = OP_NOP_T2a
;
2626 instruction
[-3] = OP_NOP_T2b
;
2627 linkJumpT4(instruction
, target
);
2629 const uint16_t JUMP_TEMPORARY_REGISTER
= ARMRegisters::ip
;
2630 ARMThumbImmediate lo16
= ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(reinterpret_cast<uint32_t>(target
) + 1));
2631 ARMThumbImmediate hi16
= ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(reinterpret_cast<uint32_t>(target
) >> 16));
2632 instruction
[-5] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOV_imm_T3
, lo16
);
2633 instruction
[-4] = twoWordOp5i6Imm4Reg4EncodedImmSecond(JUMP_TEMPORARY_REGISTER
, lo16
);
2634 instruction
[-3] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOVT
, hi16
);
2635 instruction
[-2] = twoWordOp5i6Imm4Reg4EncodedImmSecond(JUMP_TEMPORARY_REGISTER
, hi16
);
2636 instruction
[-1] = OP_BX
| (JUMP_TEMPORARY_REGISTER
<< 3);
2640 static uint16_t twoWordOp5i6Imm4Reg4EncodedImmFirst(uint16_t op
, ARMThumbImmediate imm
)
2642 return op
| (imm
.m_value
.i
<< 10) | imm
.m_value
.imm4
;
2645 static void decodeTwoWordOp5i6Imm4Reg4EncodedImmFirst(ARMThumbImmediate
& result
, uint16_t value
)
2647 result
.m_value
.i
= (value
>> 10) & 1;
2648 result
.m_value
.imm4
= value
& 15;
2651 static uint16_t twoWordOp5i6Imm4Reg4EncodedImmSecond(uint16_t rd
, ARMThumbImmediate imm
)
2653 return (imm
.m_value
.imm3
<< 12) | (rd
<< 8) | imm
.m_value
.imm8
;
2656 static void decodeTwoWordOp5i6Imm4Reg4EncodedImmSecond(ARMThumbImmediate
& result
, uint16_t value
)
2658 result
.m_value
.imm3
= (value
>> 12) & 7;
2659 result
.m_value
.imm8
= value
& 255;
2662 class ARMInstructionFormatter
{
2664 ALWAYS_INLINE
void oneWordOp5Reg3Imm8(OpcodeID op
, RegisterID rd
, uint8_t imm
)
2666 m_buffer
.putShort(op
| (rd
<< 8) | imm
);
2669 ALWAYS_INLINE
void oneWordOp5Imm5Reg3Reg3(OpcodeID op
, uint8_t imm
, RegisterID reg1
, RegisterID reg2
)
2671 m_buffer
.putShort(op
| (imm
<< 6) | (reg1
<< 3) | reg2
);
2674 ALWAYS_INLINE
void oneWordOp7Reg3Reg3Reg3(OpcodeID op
, RegisterID reg1
, RegisterID reg2
, RegisterID reg3
)
2676 m_buffer
.putShort(op
| (reg1
<< 6) | (reg2
<< 3) | reg3
);
2679 ALWAYS_INLINE
void oneWordOp8Imm8(OpcodeID op
, uint8_t imm
)
2681 m_buffer
.putShort(op
| imm
);
2684 ALWAYS_INLINE
void oneWordOp8RegReg143(OpcodeID op
, RegisterID reg1
, RegisterID reg2
)
2686 m_buffer
.putShort(op
| ((reg2
& 8) << 4) | (reg1
<< 3) | (reg2
& 7));
2689 ALWAYS_INLINE
void oneWordOp9Imm7(OpcodeID op
, uint8_t imm
)
2691 m_buffer
.putShort(op
| imm
);
2694 ALWAYS_INLINE
void oneWordOp10Reg3Reg3(OpcodeID op
, RegisterID reg1
, RegisterID reg2
)
2696 m_buffer
.putShort(op
| (reg1
<< 3) | reg2
);
2699 ALWAYS_INLINE
void twoWordOp12Reg4FourFours(OpcodeID1 op
, RegisterID reg
, FourFours ff
)
2701 m_buffer
.putShort(op
| reg
);
2702 m_buffer
.putShort(ff
.m_u
.value
);
2705 ALWAYS_INLINE
void twoWordOp16FourFours(OpcodeID1 op
, FourFours ff
)
2707 m_buffer
.putShort(op
);
2708 m_buffer
.putShort(ff
.m_u
.value
);
2711 ALWAYS_INLINE
void twoWordOp16Op16(OpcodeID1 op1
, OpcodeID2 op2
)
2713 m_buffer
.putShort(op1
);
2714 m_buffer
.putShort(op2
);
2717 ALWAYS_INLINE
void twoWordOp5i6Imm4Reg4EncodedImm(OpcodeID1 op
, int imm4
, RegisterID rd
, ARMThumbImmediate imm
)
2719 ARMThumbImmediate newImm
= imm
;
2720 newImm
.m_value
.imm4
= imm4
;
2722 m_buffer
.putShort(ARMv7Assembler::twoWordOp5i6Imm4Reg4EncodedImmFirst(op
, newImm
));
2723 m_buffer
.putShort(ARMv7Assembler::twoWordOp5i6Imm4Reg4EncodedImmSecond(rd
, newImm
));
2726 ALWAYS_INLINE
void twoWordOp12Reg4Reg4Imm12(OpcodeID1 op
, RegisterID reg1
, RegisterID reg2
, uint16_t imm
)
2728 m_buffer
.putShort(op
| reg1
);
2729 m_buffer
.putShort((reg2
<< 12) | imm
);
2732 ALWAYS_INLINE
void twoWordOp12Reg40Imm3Reg4Imm20Imm5(OpcodeID1 op
, RegisterID reg1
, RegisterID reg2
, uint16_t imm1
, uint16_t imm2
, uint16_t imm3
)
2734 m_buffer
.putShort(op
| reg1
);
2735 m_buffer
.putShort((imm1
<< 12) | (reg2
<< 8) | (imm2
<< 6) | imm3
);
2738 // Formats up instructions of the pattern:
2739 // 111111111B11aaaa:bbbb222SA2C2cccc
2740 // Where 1s in the pattern come from op1, 2s in the pattern come from op2, S is the provided size bit.
2741 // Operands provide 5 bit values of the form Aaaaa, Bbbbb, Ccccc.
2742 ALWAYS_INLINE
void vfpOp(OpcodeID1 op1
, OpcodeID2 op2
, bool size
, VFPOperand a
, VFPOperand b
, VFPOperand c
)
2744 ASSERT(!(op1
& 0x004f));
2745 ASSERT(!(op2
& 0xf1af));
2746 m_buffer
.putShort(op1
| b
.bits1() << 6 | a
.bits4());
2747 m_buffer
.putShort(op2
| b
.bits4() << 12 | size
<< 8 | a
.bits1() << 7 | c
.bits1() << 5 | c
.bits4());
2750 // Arm vfp addresses can be offset by a 9-bit ones-comp immediate, left shifted by 2.
2751 // (i.e. +/-(0..255) 32-bit words)
2752 ALWAYS_INLINE
void vfpMemOp(OpcodeID1 op1
, OpcodeID2 op2
, bool size
, RegisterID rn
, VFPOperand rd
, int32_t imm
)
2760 uint32_t offset
= imm
;
2761 ASSERT(!(offset
& ~0x3fc));
2764 m_buffer
.putShort(op1
| (up
<< 7) | rd
.bits1() << 6 | rn
);
2765 m_buffer
.putShort(op2
| rd
.bits4() << 12 | size
<< 8 | offset
);
2768 // Administrative methods:
2770 size_t codeSize() const { return m_buffer
.codeSize(); }
2771 AssemblerLabel
label() const { return m_buffer
.label(); }
2772 bool isAligned(int alignment
) const { return m_buffer
.isAligned(alignment
); }
2773 void* data() const { return m_buffer
.data(); }
2775 unsigned debugOffset() { return m_buffer
.debugOffset(); }
2778 AssemblerBuffer m_buffer
;
2781 Vector
<LinkRecord
, 0, UnsafeVectorOverflow
> m_jumpsToLink
;
2782 int m_indexOfLastWatchpoint
;
2783 int m_indexOfTailOfLastWatchpoint
;
2788 #endif // ENABLE(ASSEMBLER) && CPU(ARM_THUMB2)
2790 #endif // ARMAssembler_h