2 * Copyright (C) 2009, 2010 Apple Inc. All rights reserved.
3 * Copyright (C) 2010 University of Szeged
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
15 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
18 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
19 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
20 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
21 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
22 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
24 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #ifndef ARMAssembler_h
28 #define ARMAssembler_h
30 #if ENABLE(ASSEMBLER) && CPU(ARM_THUMB2)
32 #include "AssemblerBuffer.h"
33 #include <wtf/Assertions.h>
34 #include <wtf/Vector.h>
39 namespace ARMRegisters
{
48 r7
, wr
= r7
, // thumb work register
50 r9
, sb
= r9
, // static base
51 r10
, sl
= r10
, // stack limit
52 r11
, fp
= r11
, // frame pointer
127 } FPDoubleRegisterID
;
164 inline FPSingleRegisterID
asSingle(FPDoubleRegisterID reg
)
167 return (FPSingleRegisterID
)(reg
<< 1);
170 inline FPDoubleRegisterID
asDouble(FPSingleRegisterID reg
)
173 return (FPDoubleRegisterID
)(reg
>> 1);
177 class ARMv7Assembler
;
178 class ARMThumbImmediate
{
179 friend class ARMv7Assembler
;
181 typedef uint8_t ThumbImmediateType
;
182 static const ThumbImmediateType TypeInvalid
= 0;
183 static const ThumbImmediateType TypeEncoded
= 1;
184 static const ThumbImmediateType TypeUInt16
= 2;
194 // If this is an encoded immediate, then it may describe a shift, or a pattern.
196 unsigned shiftValue7
: 7;
197 unsigned shiftAmount
: 5;
200 unsigned immediate
: 8;
201 unsigned pattern
: 4;
203 } ThumbImmediateValue
;
205 // byte0 contains least significant bit; not using an array to make client code endian agnostic.
216 ALWAYS_INLINE
static void countLeadingZerosPartial(uint32_t& value
, int32_t& zeros
, const int N
)
218 if (value
& ~((1 << N
) - 1)) /* check for any of the top N bits (of 2N bits) are set */
219 value
>>= N
; /* if any were set, lose the bottom N */
220 else /* if none of the top N bits are set, */
221 zeros
+= N
; /* then we have identified N leading zeros */
224 static int32_t countLeadingZeros(uint32_t value
)
230 countLeadingZerosPartial(value
, zeros
, 16);
231 countLeadingZerosPartial(value
, zeros
, 8);
232 countLeadingZerosPartial(value
, zeros
, 4);
233 countLeadingZerosPartial(value
, zeros
, 2);
234 countLeadingZerosPartial(value
, zeros
, 1);
239 : m_type(TypeInvalid
)
244 ARMThumbImmediate(ThumbImmediateType type
, ThumbImmediateValue value
)
250 ARMThumbImmediate(ThumbImmediateType type
, uint16_t value
)
253 // Make sure this constructor is only reached with type TypeUInt16;
254 // this extra parameter makes the code a little clearer by making it
255 // explicit at call sites which type is being constructed
256 ASSERT_UNUSED(type
, type
== TypeUInt16
);
258 m_value
.asInt
= value
;
262 static ARMThumbImmediate
makeEncodedImm(uint32_t value
)
264 ThumbImmediateValue encoding
;
267 // okay, these are easy.
269 encoding
.immediate
= value
;
270 encoding
.pattern
= 0;
271 return ARMThumbImmediate(TypeEncoded
, encoding
);
274 int32_t leadingZeros
= countLeadingZeros(value
);
275 // if there were 24 or more leading zeros, then we'd have hit the (value < 256) case.
276 ASSERT(leadingZeros
< 24);
278 // Given a number with bit fields Z:B:C, where count(Z)+count(B)+count(C) == 32,
279 // Z are the bits known zero, B is the 8-bit immediate, C are the bits to check for
280 // zero. count(B) == 8, so the count of bits to be checked is 24 - count(Z).
281 int32_t rightShiftAmount
= 24 - leadingZeros
;
282 if (value
== ((value
>> rightShiftAmount
) << rightShiftAmount
)) {
283 // Shift the value down to the low byte position. The assign to
284 // shiftValue7 drops the implicit top bit.
285 encoding
.shiftValue7
= value
>> rightShiftAmount
;
286 // The endoded shift amount is the magnitude of a right rotate.
287 encoding
.shiftAmount
= 8 + leadingZeros
;
288 return ARMThumbImmediate(TypeEncoded
, encoding
);
294 if ((bytes
.byte0
== bytes
.byte1
) && (bytes
.byte0
== bytes
.byte2
) && (bytes
.byte0
== bytes
.byte3
)) {
295 encoding
.immediate
= bytes
.byte0
;
296 encoding
.pattern
= 3;
297 return ARMThumbImmediate(TypeEncoded
, encoding
);
300 if ((bytes
.byte0
== bytes
.byte2
) && !(bytes
.byte1
| bytes
.byte3
)) {
301 encoding
.immediate
= bytes
.byte0
;
302 encoding
.pattern
= 1;
303 return ARMThumbImmediate(TypeEncoded
, encoding
);
306 if ((bytes
.byte1
== bytes
.byte3
) && !(bytes
.byte0
| bytes
.byte2
)) {
307 encoding
.immediate
= bytes
.byte1
;
308 encoding
.pattern
= 2;
309 return ARMThumbImmediate(TypeEncoded
, encoding
);
312 return ARMThumbImmediate();
315 static ARMThumbImmediate
makeUInt12(int32_t value
)
317 return (!(value
& 0xfffff000))
318 ? ARMThumbImmediate(TypeUInt16
, (uint16_t)value
)
319 : ARMThumbImmediate();
322 static ARMThumbImmediate
makeUInt12OrEncodedImm(int32_t value
)
324 // If this is not a 12-bit unsigned it, try making an encoded immediate.
325 return (!(value
& 0xfffff000))
326 ? ARMThumbImmediate(TypeUInt16
, (uint16_t)value
)
327 : makeEncodedImm(value
);
330 // The 'make' methods, above, return a !isValid() value if the argument
331 // cannot be represented as the requested type. This methods is called
332 // 'get' since the argument can always be represented.
333 static ARMThumbImmediate
makeUInt16(uint16_t value
)
335 return ARMThumbImmediate(TypeUInt16
, value
);
340 return m_type
!= TypeInvalid
;
343 // These methods rely on the format of encoded byte values.
344 bool isUInt3() { return !(m_value
.asInt
& 0xfff8); }
345 bool isUInt4() { return !(m_value
.asInt
& 0xfff0); }
346 bool isUInt5() { return !(m_value
.asInt
& 0xffe0); }
347 bool isUInt6() { return !(m_value
.asInt
& 0xffc0); }
348 bool isUInt7() { return !(m_value
.asInt
& 0xff80); }
349 bool isUInt8() { return !(m_value
.asInt
& 0xff00); }
350 bool isUInt9() { return (m_type
== TypeUInt16
) && !(m_value
.asInt
& 0xfe00); }
351 bool isUInt10() { return (m_type
== TypeUInt16
) && !(m_value
.asInt
& 0xfc00); }
352 bool isUInt12() { return (m_type
== TypeUInt16
) && !(m_value
.asInt
& 0xf000); }
353 bool isUInt16() { return m_type
== TypeUInt16
; }
354 uint8_t getUInt3() { ASSERT(isUInt3()); return m_value
.asInt
; }
355 uint8_t getUInt4() { ASSERT(isUInt4()); return m_value
.asInt
; }
356 uint8_t getUInt5() { ASSERT(isUInt5()); return m_value
.asInt
; }
357 uint8_t getUInt6() { ASSERT(isUInt6()); return m_value
.asInt
; }
358 uint8_t getUInt7() { ASSERT(isUInt7()); return m_value
.asInt
; }
359 uint8_t getUInt8() { ASSERT(isUInt8()); return m_value
.asInt
; }
360 uint8_t getUInt9() { ASSERT(isUInt9()); return m_value
.asInt
; }
361 uint8_t getUInt10() { ASSERT(isUInt10()); return m_value
.asInt
; }
362 uint16_t getUInt12() { ASSERT(isUInt12()); return m_value
.asInt
; }
363 uint16_t getUInt16() { ASSERT(isUInt16()); return m_value
.asInt
; }
365 bool isEncodedImm() { return m_type
== TypeEncoded
; }
368 ThumbImmediateType m_type
;
369 ThumbImmediateValue m_value
;
374 VFPImmediate(double d
)
384 int sign
= static_cast<int>(u
.i
>> 63);
385 int exponent
= static_cast<int>(u
.i
>> 52) & 0x7ff;
386 uint64_t mantissa
= u
.i
& 0x000fffffffffffffull
;
388 if ((exponent
>= 0x3fc) && (exponent
<= 0x403) && !(mantissa
& 0x0000ffffffffffffull
))
389 m_value
= (sign
<< 7) | ((exponent
& 7) << 4) | (int)(mantissa
>> 48);
394 return m_value
!= -1;
399 return (uint8_t)m_value
;
412 SRType_RRX
= SRType_ROR
415 class ARMv7Assembler
;
416 class ShiftTypeAndAmount
{
417 friend class ARMv7Assembler
;
422 m_u
.type
= (ARMShiftType
)0;
426 ShiftTypeAndAmount(ARMShiftType type
, unsigned amount
)
429 m_u
.amount
= amount
& 31;
432 unsigned lo4() { return m_u
.lo4
; }
433 unsigned hi4() { return m_u
.hi4
; }
448 class ARMv7Assembler
{
452 ASSERT(m_jumpsToLink
.isEmpty());
455 typedef ARMRegisters::RegisterID RegisterID
;
456 typedef ARMRegisters::FPSingleRegisterID FPSingleRegisterID
;
457 typedef ARMRegisters::FPDoubleRegisterID FPDoubleRegisterID
;
458 typedef ARMRegisters::FPQuadRegisterID FPQuadRegisterID
;
460 // (HS, LO, HI, LS) -> (AE, B, A, BE)
461 // (VS, VC) -> (O, NO)
479 ConditionCS
= ConditionHS
,
480 ConditionCC
= ConditionLO
,
483 enum JumpType
{ JumpFixed
, JumpNoCondition
, JumpCondition
, JumpNoConditionFixedSize
, JumpConditionFixedSize
, JumpTypeCount
};
484 enum JumpLinkType
{ LinkInvalid
, LinkJumpT1
, LinkJumpT2
, LinkJumpT3
,
485 LinkJumpT4
, LinkConditionalJumpT4
, LinkBX
, LinkConditionalBX
, JumpLinkTypeCount
};
486 static const int JumpSizes
[JumpLinkTypeCount
];
487 static const int JumpPaddingSizes
[JumpTypeCount
];
490 LinkRecord(intptr_t from
, intptr_t to
, JumpType type
, Condition condition
)
494 , m_linkType(LinkInvalid
)
495 , m_condition(condition
)
498 intptr_t from() const { return m_from
; }
499 void setFrom(intptr_t from
) { m_from
= from
; }
500 intptr_t to() const { return m_to
; }
501 JumpType
type() const { return m_type
; }
502 JumpLinkType
linkType() const { return m_linkType
; }
503 void setLinkType(JumpLinkType linkType
) { ASSERT(m_linkType
== LinkInvalid
); m_linkType
= linkType
; }
504 Condition
condition() const { return m_condition
; }
506 intptr_t m_from
: 31;
509 JumpLinkType m_linkType
: 4;
510 Condition m_condition
: 16;
514 friend class ARMv7Assembler
;
515 friend class ARMInstructionFormatter
;
516 friend class LinkBuffer
;
524 JmpSrc(int offset
, JumpType type
)
526 , m_condition(0xffff)
529 ASSERT(m_type
== JumpFixed
|| m_type
== JumpNoCondition
|| m_type
== JumpNoConditionFixedSize
);
532 JmpSrc(int offset
, JumpType type
, Condition condition
)
534 , m_condition(condition
)
537 ASSERT(m_type
== JumpFixed
|| m_type
== JumpCondition
|| m_type
== JumpConditionFixedSize
);
541 Condition m_condition
: 16;
542 JumpType m_type
: 16;
547 friend class ARMv7Assembler
;
548 friend class ARMInstructionFormatter
;
549 friend class LinkBuffer
;
557 bool isUsed() const { return m_used
; }
558 void used() { m_used
= true; }
564 ASSERT(m_offset
== offset
);
574 bool BadReg(RegisterID reg
)
576 return (reg
== ARMRegisters::sp
) || (reg
== ARMRegisters::pc
);
579 uint32_t singleRegisterMask(FPSingleRegisterID rdNum
, int highBitsShift
, int lowBitShift
)
581 uint32_t rdMask
= (rdNum
>> 1) << highBitsShift
;
583 rdMask
|= 1 << lowBitShift
;
587 uint32_t doubleRegisterMask(FPDoubleRegisterID rdNum
, int highBitShift
, int lowBitsShift
)
589 uint32_t rdMask
= (rdNum
& 0xf) << lowBitsShift
;
591 rdMask
|= 1 << highBitShift
;
596 OP_ADD_reg_T1
= 0x1800,
597 OP_SUB_reg_T1
= 0x1A00,
598 OP_ADD_imm_T1
= 0x1C00,
599 OP_SUB_imm_T1
= 0x1E00,
600 OP_MOV_imm_T1
= 0x2000,
601 OP_CMP_imm_T1
= 0x2800,
602 OP_ADD_imm_T2
= 0x3000,
603 OP_SUB_imm_T2
= 0x3800,
604 OP_AND_reg_T1
= 0x4000,
605 OP_EOR_reg_T1
= 0x4040,
606 OP_TST_reg_T1
= 0x4200,
607 OP_RSB_imm_T1
= 0x4240,
608 OP_CMP_reg_T1
= 0x4280,
609 OP_ORR_reg_T1
= 0x4300,
610 OP_MVN_reg_T1
= 0x43C0,
611 OP_ADD_reg_T2
= 0x4400,
612 OP_MOV_reg_T1
= 0x4600,
615 OP_STR_reg_T1
= 0x5000,
616 OP_LDR_reg_T1
= 0x5800,
617 OP_LDRH_reg_T1
= 0x5A00,
618 OP_LDRB_reg_T1
= 0x5C00,
619 OP_STR_imm_T1
= 0x6000,
620 OP_LDR_imm_T1
= 0x6800,
621 OP_LDRB_imm_T1
= 0x7800,
622 OP_LDRH_imm_T1
= 0x8800,
623 OP_STR_imm_T2
= 0x9000,
624 OP_LDR_imm_T2
= 0x9800,
625 OP_ADD_SP_imm_T1
= 0xA800,
626 OP_ADD_SP_imm_T2
= 0xB000,
627 OP_SUB_SP_imm_T1
= 0xB080,
636 OP_AND_reg_T2
= 0xEA00,
637 OP_TST_reg_T2
= 0xEA10,
638 OP_ORR_reg_T2
= 0xEA40,
639 OP_ORR_S_reg_T2
= 0xEA50,
640 OP_ASR_imm_T1
= 0xEA4F,
641 OP_LSL_imm_T1
= 0xEA4F,
642 OP_LSR_imm_T1
= 0xEA4F,
643 OP_ROR_imm_T1
= 0xEA4F,
644 OP_MVN_reg_T2
= 0xEA6F,
645 OP_EOR_reg_T2
= 0xEA80,
646 OP_ADD_reg_T3
= 0xEB00,
647 OP_ADD_S_reg_T3
= 0xEB10,
648 OP_SUB_reg_T2
= 0xEBA0,
649 OP_SUB_S_reg_T2
= 0xEBB0,
650 OP_CMP_reg_T2
= 0xEBB0,
653 OP_VMOV_StoC
= 0xEE00,
654 OP_VMOV_CtoS
= 0xEE10,
660 OP_VCVT_FPIVFP
= 0xEEB0,
661 OP_VMOV_IMM_T2
= 0xEEB0,
665 OP_AND_imm_T1
= 0xF000,
667 OP_ORR_imm_T1
= 0xF040,
668 OP_MOV_imm_T2
= 0xF040,
670 OP_EOR_imm_T1
= 0xF080,
671 OP_ADD_imm_T3
= 0xF100,
672 OP_ADD_S_imm_T3
= 0xF110,
674 OP_SUB_imm_T3
= 0xF1A0,
675 OP_SUB_S_imm_T3
= 0xF1B0,
676 OP_CMP_imm_T2
= 0xF1B0,
677 OP_RSB_imm_T2
= 0xF1C0,
678 OP_ADD_imm_T4
= 0xF200,
679 OP_MOV_imm_T3
= 0xF240,
680 OP_SUB_imm_T4
= 0xF2A0,
683 OP_LDRB_imm_T3
= 0xF810,
684 OP_LDRB_reg_T2
= 0xF810,
685 OP_LDRH_reg_T2
= 0xF830,
686 OP_LDRH_imm_T3
= 0xF830,
687 OP_STR_imm_T4
= 0xF840,
688 OP_STR_reg_T2
= 0xF840,
689 OP_LDR_imm_T4
= 0xF850,
690 OP_LDR_reg_T2
= 0xF850,
691 OP_LDRB_imm_T2
= 0xF890,
692 OP_LDRH_imm_T2
= 0xF8B0,
693 OP_STR_imm_T3
= 0xF8C0,
694 OP_LDR_imm_T3
= 0xF8D0,
695 OP_LSL_reg_T2
= 0xFA00,
696 OP_LSR_reg_T2
= 0xFA20,
697 OP_ASR_reg_T2
= 0xFA40,
698 OP_ROR_reg_T2
= 0xFA60,
699 OP_SMULL_T1
= 0xFB80,
703 OP_VADD_T2b
= 0x0A00,
706 OP_VMOV_IMM_T2b
= 0x0A00,
707 OP_VMUL_T2b
= 0x0A00,
709 OP_VMOV_CtoSb
= 0x0A10,
710 OP_VMOV_StoCb
= 0x0A10,
713 OP_VCVT_FPIVFPb
= 0x0A40,
714 OP_VSUB_T2b
= 0x0A40,
721 FourFours(unsigned f3
, unsigned f2
, unsigned f1
, unsigned f0
)
740 class ARMInstructionFormatter
;
743 bool ifThenElseConditionBit(Condition condition
, bool isIf
)
745 return isIf
? (condition
& 1) : !(condition
& 1);
747 uint8_t ifThenElse(Condition condition
, bool inst2if
, bool inst3if
, bool inst4if
)
749 int mask
= (ifThenElseConditionBit(condition
, inst2if
) << 3)
750 | (ifThenElseConditionBit(condition
, inst3if
) << 2)
751 | (ifThenElseConditionBit(condition
, inst4if
) << 1)
753 ASSERT((condition
!= ConditionAL
) || !(mask
& (mask
- 1)));
754 return (condition
<< 4) | mask
;
756 uint8_t ifThenElse(Condition condition
, bool inst2if
, bool inst3if
)
758 int mask
= (ifThenElseConditionBit(condition
, inst2if
) << 3)
759 | (ifThenElseConditionBit(condition
, inst3if
) << 2)
761 ASSERT((condition
!= ConditionAL
) || !(mask
& (mask
- 1)));
762 return (condition
<< 4) | mask
;
764 uint8_t ifThenElse(Condition condition
, bool inst2if
)
766 int mask
= (ifThenElseConditionBit(condition
, inst2if
) << 3)
768 ASSERT((condition
!= ConditionAL
) || !(mask
& (mask
- 1)));
769 return (condition
<< 4) | mask
;
772 uint8_t ifThenElse(Condition condition
)
775 return (condition
<< 4) | mask
;
780 void add(RegisterID rd
, RegisterID rn
, ARMThumbImmediate imm
)
782 // Rd can only be SP if Rn is also SP.
783 ASSERT((rd
!= ARMRegisters::sp
) || (rn
== ARMRegisters::sp
));
784 ASSERT(rd
!= ARMRegisters::pc
);
785 ASSERT(rn
!= ARMRegisters::pc
);
786 ASSERT(imm
.isValid());
788 if (rn
== ARMRegisters::sp
) {
789 if (!(rd
& 8) && imm
.isUInt10()) {
790 m_formatter
.oneWordOp5Reg3Imm8(OP_ADD_SP_imm_T1
, rd
, imm
.getUInt10() >> 2);
792 } else if ((rd
== ARMRegisters::sp
) && imm
.isUInt9()) {
793 m_formatter
.oneWordOp9Imm7(OP_ADD_SP_imm_T2
, imm
.getUInt9() >> 2);
796 } else if (!((rd
| rn
) & 8)) {
798 m_formatter
.oneWordOp7Reg3Reg3Reg3(OP_ADD_imm_T1
, (RegisterID
)imm
.getUInt3(), rn
, rd
);
800 } else if ((rd
== rn
) && imm
.isUInt8()) {
801 m_formatter
.oneWordOp5Reg3Imm8(OP_ADD_imm_T2
, rd
, imm
.getUInt8());
806 if (imm
.isEncodedImm())
807 m_formatter
.twoWordOp5i6Imm4Reg4EncodedImm(OP_ADD_imm_T3
, rn
, rd
, imm
);
809 ASSERT(imm
.isUInt12());
810 m_formatter
.twoWordOp5i6Imm4Reg4EncodedImm(OP_ADD_imm_T4
, rn
, rd
, imm
);
814 void add(RegisterID rd
, RegisterID rn
, RegisterID rm
, ShiftTypeAndAmount shift
)
816 ASSERT((rd
!= ARMRegisters::sp
) || (rn
== ARMRegisters::sp
));
817 ASSERT(rd
!= ARMRegisters::pc
);
818 ASSERT(rn
!= ARMRegisters::pc
);
820 m_formatter
.twoWordOp12Reg4FourFours(OP_ADD_reg_T3
, rn
, FourFours(shift
.hi4(), rd
, shift
.lo4(), rm
));
823 // NOTE: In an IT block, add doesn't modify the flags register.
824 void add(RegisterID rd
, RegisterID rn
, RegisterID rm
)
827 m_formatter
.oneWordOp8RegReg143(OP_ADD_reg_T2
, rm
, rd
);
829 m_formatter
.oneWordOp8RegReg143(OP_ADD_reg_T2
, rn
, rd
);
830 else if (!((rd
| rn
| rm
) & 8))
831 m_formatter
.oneWordOp7Reg3Reg3Reg3(OP_ADD_reg_T1
, rm
, rn
, rd
);
833 add(rd
, rn
, rm
, ShiftTypeAndAmount());
836 // Not allowed in an IT (if then) block.
837 void add_S(RegisterID rd
, RegisterID rn
, ARMThumbImmediate imm
)
839 // Rd can only be SP if Rn is also SP.
840 ASSERT((rd
!= ARMRegisters::sp
) || (rn
== ARMRegisters::sp
));
841 ASSERT(rd
!= ARMRegisters::pc
);
842 ASSERT(rn
!= ARMRegisters::pc
);
843 ASSERT(imm
.isEncodedImm());
845 if (!((rd
| rn
) & 8)) {
847 m_formatter
.oneWordOp7Reg3Reg3Reg3(OP_ADD_imm_T1
, (RegisterID
)imm
.getUInt3(), rn
, rd
);
849 } else if ((rd
== rn
) && imm
.isUInt8()) {
850 m_formatter
.oneWordOp5Reg3Imm8(OP_ADD_imm_T2
, rd
, imm
.getUInt8());
855 m_formatter
.twoWordOp5i6Imm4Reg4EncodedImm(OP_ADD_S_imm_T3
, rn
, rd
, imm
);
858 // Not allowed in an IT (if then) block?
859 void add_S(RegisterID rd
, RegisterID rn
, RegisterID rm
, ShiftTypeAndAmount shift
)
861 ASSERT((rd
!= ARMRegisters::sp
) || (rn
== ARMRegisters::sp
));
862 ASSERT(rd
!= ARMRegisters::pc
);
863 ASSERT(rn
!= ARMRegisters::pc
);
865 m_formatter
.twoWordOp12Reg4FourFours(OP_ADD_S_reg_T3
, rn
, FourFours(shift
.hi4(), rd
, shift
.lo4(), rm
));
868 // Not allowed in an IT (if then) block.
869 void add_S(RegisterID rd
, RegisterID rn
, RegisterID rm
)
871 if (!((rd
| rn
| rm
) & 8))
872 m_formatter
.oneWordOp7Reg3Reg3Reg3(OP_ADD_reg_T1
, rm
, rn
, rd
);
874 add_S(rd
, rn
, rm
, ShiftTypeAndAmount());
877 void ARM_and(RegisterID rd
, RegisterID rn
, ARMThumbImmediate imm
)
881 ASSERT(imm
.isEncodedImm());
882 m_formatter
.twoWordOp5i6Imm4Reg4EncodedImm(OP_AND_imm_T1
, rn
, rd
, imm
);
885 void ARM_and(RegisterID rd
, RegisterID rn
, RegisterID rm
, ShiftTypeAndAmount shift
)
890 m_formatter
.twoWordOp12Reg4FourFours(OP_AND_reg_T2
, rn
, FourFours(shift
.hi4(), rd
, shift
.lo4(), rm
));
893 void ARM_and(RegisterID rd
, RegisterID rn
, RegisterID rm
)
895 if ((rd
== rn
) && !((rd
| rm
) & 8))
896 m_formatter
.oneWordOp10Reg3Reg3(OP_AND_reg_T1
, rm
, rd
);
897 else if ((rd
== rm
) && !((rd
| rn
) & 8))
898 m_formatter
.oneWordOp10Reg3Reg3(OP_AND_reg_T1
, rn
, rd
);
900 ARM_and(rd
, rn
, rm
, ShiftTypeAndAmount());
903 void asr(RegisterID rd
, RegisterID rm
, int32_t shiftAmount
)
907 ShiftTypeAndAmount
shift(SRType_ASR
, shiftAmount
);
908 m_formatter
.twoWordOp16FourFours(OP_ASR_imm_T1
, FourFours(shift
.hi4(), rd
, shift
.lo4(), rm
));
911 void asr(RegisterID rd
, RegisterID rn
, RegisterID rm
)
916 m_formatter
.twoWordOp12Reg4FourFours(OP_ASR_reg_T2
, rn
, FourFours(0xf, rd
, 0, rm
));
919 // Only allowed in IT (if then) block if last instruction.
920 JmpSrc
b(JumpType type
)
922 m_formatter
.twoWordOp16Op16(OP_B_T4a
, OP_B_T4b
);
923 return JmpSrc(m_formatter
.size(), type
);
926 // Only allowed in IT (if then) block if last instruction.
927 JmpSrc
blx(RegisterID rm
, JumpType type
)
929 ASSERT(rm
!= ARMRegisters::pc
);
930 m_formatter
.oneWordOp8RegReg143(OP_BLX
, rm
, (RegisterID
)8);
931 return JmpSrc(m_formatter
.size(), type
);
934 // Only allowed in IT (if then) block if last instruction.
935 JmpSrc
bx(RegisterID rm
, JumpType type
, Condition condition
)
937 m_formatter
.oneWordOp8RegReg143(OP_BX
, rm
, (RegisterID
)0);
938 return JmpSrc(m_formatter
.size(), type
, condition
);
941 JmpSrc
bx(RegisterID rm
, JumpType type
)
943 m_formatter
.oneWordOp8RegReg143(OP_BX
, rm
, (RegisterID
)0);
944 return JmpSrc(m_formatter
.size(), type
);
947 void bkpt(uint8_t imm
=0)
949 m_formatter
.oneWordOp8Imm8(OP_BKPT
, imm
);
952 void cmn(RegisterID rn
, ARMThumbImmediate imm
)
954 ASSERT(rn
!= ARMRegisters::pc
);
955 ASSERT(imm
.isEncodedImm());
957 m_formatter
.twoWordOp5i6Imm4Reg4EncodedImm(OP_CMN_imm
, rn
, (RegisterID
)0xf, imm
);
960 void cmp(RegisterID rn
, ARMThumbImmediate imm
)
962 ASSERT(rn
!= ARMRegisters::pc
);
963 ASSERT(imm
.isEncodedImm());
965 if (!(rn
& 8) && imm
.isUInt8())
966 m_formatter
.oneWordOp5Reg3Imm8(OP_CMP_imm_T1
, rn
, imm
.getUInt8());
968 m_formatter
.twoWordOp5i6Imm4Reg4EncodedImm(OP_CMP_imm_T2
, rn
, (RegisterID
)0xf, imm
);
971 void cmp(RegisterID rn
, RegisterID rm
, ShiftTypeAndAmount shift
)
973 ASSERT(rn
!= ARMRegisters::pc
);
975 m_formatter
.twoWordOp12Reg4FourFours(OP_CMP_reg_T2
, rn
, FourFours(shift
.hi4(), 0xf, shift
.lo4(), rm
));
978 void cmp(RegisterID rn
, RegisterID rm
)
981 cmp(rn
, rm
, ShiftTypeAndAmount());
983 m_formatter
.oneWordOp10Reg3Reg3(OP_CMP_reg_T1
, rm
, rn
);
986 // xor is not spelled with an 'e'. :-(
987 void eor(RegisterID rd
, RegisterID rn
, ARMThumbImmediate imm
)
991 ASSERT(imm
.isEncodedImm());
992 m_formatter
.twoWordOp5i6Imm4Reg4EncodedImm(OP_EOR_imm_T1
, rn
, rd
, imm
);
995 // xor is not spelled with an 'e'. :-(
996 void eor(RegisterID rd
, RegisterID rn
, RegisterID rm
, ShiftTypeAndAmount shift
)
1000 ASSERT(!BadReg(rm
));
1001 m_formatter
.twoWordOp12Reg4FourFours(OP_EOR_reg_T2
, rn
, FourFours(shift
.hi4(), rd
, shift
.lo4(), rm
));
1004 // xor is not spelled with an 'e'. :-(
1005 void eor(RegisterID rd
, RegisterID rn
, RegisterID rm
)
1007 if ((rd
== rn
) && !((rd
| rm
) & 8))
1008 m_formatter
.oneWordOp10Reg3Reg3(OP_EOR_reg_T1
, rm
, rd
);
1009 else if ((rd
== rm
) && !((rd
| rn
) & 8))
1010 m_formatter
.oneWordOp10Reg3Reg3(OP_EOR_reg_T1
, rn
, rd
);
1012 eor(rd
, rn
, rm
, ShiftTypeAndAmount());
1015 void it(Condition cond
)
1017 m_formatter
.oneWordOp8Imm8(OP_IT
, ifThenElse(cond
));
1020 void it(Condition cond
, bool inst2if
)
1022 m_formatter
.oneWordOp8Imm8(OP_IT
, ifThenElse(cond
, inst2if
));
1025 void it(Condition cond
, bool inst2if
, bool inst3if
)
1027 m_formatter
.oneWordOp8Imm8(OP_IT
, ifThenElse(cond
, inst2if
, inst3if
));
1030 void it(Condition cond
, bool inst2if
, bool inst3if
, bool inst4if
)
1032 m_formatter
.oneWordOp8Imm8(OP_IT
, ifThenElse(cond
, inst2if
, inst3if
, inst4if
));
1035 // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
1036 void ldr(RegisterID rt
, RegisterID rn
, ARMThumbImmediate imm
)
1038 ASSERT(rn
!= ARMRegisters::pc
); // LDR (literal)
1039 ASSERT(imm
.isUInt12());
1041 if (!((rt
| rn
) & 8) && imm
.isUInt7())
1042 m_formatter
.oneWordOp5Imm5Reg3Reg3(OP_LDR_imm_T1
, imm
.getUInt7() >> 2, rn
, rt
);
1043 else if ((rn
== ARMRegisters::sp
) && !(rt
& 8) && imm
.isUInt10())
1044 m_formatter
.oneWordOp5Reg3Imm8(OP_LDR_imm_T2
, rt
, imm
.getUInt10() >> 2);
1046 m_formatter
.twoWordOp12Reg4Reg4Imm12(OP_LDR_imm_T3
, rn
, rt
, imm
.getUInt12());
1049 // If index is set, this is a regular offset or a pre-indexed load;
1050 // if index is not set then is is a post-index load.
1052 // If wback is set rn is updated - this is a pre or post index load,
1053 // if wback is not set this is a regular offset memory access.
1055 // (-255 <= offset <= 255)
1057 // _tmp = _reg + offset
1058 // MEM[index ? _tmp : _reg] = REG[rt]
1059 // if (wback) REG[rn] = _tmp
1060 void ldr(RegisterID rt
, RegisterID rn
, int offset
, bool index
, bool wback
)
1062 ASSERT(rt
!= ARMRegisters::pc
);
1063 ASSERT(rn
!= ARMRegisters::pc
);
1064 ASSERT(index
|| wback
);
1065 ASSERT(!wback
| (rt
!= rn
));
1072 ASSERT((offset
& ~0xff) == 0);
1074 offset
|= (wback
<< 8);
1075 offset
|= (add
<< 9);
1076 offset
|= (index
<< 10);
1077 offset
|= (1 << 11);
1079 m_formatter
.twoWordOp12Reg4Reg4Imm12(OP_LDR_imm_T4
, rn
, rt
, offset
);
1082 // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
1083 void ldr(RegisterID rt
, RegisterID rn
, RegisterID rm
, unsigned shift
=0)
1085 ASSERT(rn
!= ARMRegisters::pc
); // LDR (literal)
1086 ASSERT(!BadReg(rm
));
1089 if (!shift
&& !((rt
| rn
| rm
) & 8))
1090 m_formatter
.oneWordOp7Reg3Reg3Reg3(OP_LDR_reg_T1
, rm
, rn
, rt
);
1092 m_formatter
.twoWordOp12Reg4FourFours(OP_LDR_reg_T2
, rn
, FourFours(rt
, 0, shift
, rm
));
1095 // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
1096 void ldrh(RegisterID rt
, RegisterID rn
, ARMThumbImmediate imm
)
1098 ASSERT(rn
!= ARMRegisters::pc
); // LDR (literal)
1099 ASSERT(imm
.isUInt12());
1101 if (!((rt
| rn
) & 8) && imm
.isUInt6())
1102 m_formatter
.oneWordOp5Imm5Reg3Reg3(OP_LDRH_imm_T1
, imm
.getUInt6() >> 2, rn
, rt
);
1104 m_formatter
.twoWordOp12Reg4Reg4Imm12(OP_LDRH_imm_T2
, rn
, rt
, imm
.getUInt12());
1107 // If index is set, this is a regular offset or a pre-indexed load;
1108 // if index is not set then is is a post-index load.
1110 // If wback is set rn is updated - this is a pre or post index load,
1111 // if wback is not set this is a regular offset memory access.
1113 // (-255 <= offset <= 255)
1115 // _tmp = _reg + offset
1116 // MEM[index ? _tmp : _reg] = REG[rt]
1117 // if (wback) REG[rn] = _tmp
1118 void ldrh(RegisterID rt
, RegisterID rn
, int offset
, bool index
, bool wback
)
1120 ASSERT(rt
!= ARMRegisters::pc
);
1121 ASSERT(rn
!= ARMRegisters::pc
);
1122 ASSERT(index
|| wback
);
1123 ASSERT(!wback
| (rt
!= rn
));
1130 ASSERT((offset
& ~0xff) == 0);
1132 offset
|= (wback
<< 8);
1133 offset
|= (add
<< 9);
1134 offset
|= (index
<< 10);
1135 offset
|= (1 << 11);
1137 m_formatter
.twoWordOp12Reg4Reg4Imm12(OP_LDRH_imm_T3
, rn
, rt
, offset
);
1140 void ldrh(RegisterID rt
, RegisterID rn
, RegisterID rm
, unsigned shift
=0)
1142 ASSERT(!BadReg(rt
)); // Memory hint
1143 ASSERT(rn
!= ARMRegisters::pc
); // LDRH (literal)
1144 ASSERT(!BadReg(rm
));
1147 if (!shift
&& !((rt
| rn
| rm
) & 8))
1148 m_formatter
.oneWordOp7Reg3Reg3Reg3(OP_LDRH_reg_T1
, rm
, rn
, rt
);
1150 m_formatter
.twoWordOp12Reg4FourFours(OP_LDRH_reg_T2
, rn
, FourFours(rt
, 0, shift
, rm
));
1153 void ldrb(RegisterID rt
, RegisterID rn
, ARMThumbImmediate imm
)
1155 ASSERT(rn
!= ARMRegisters::pc
); // LDR (literal)
1156 ASSERT(imm
.isUInt12());
1158 if (!((rt
| rn
) & 8) && imm
.isUInt5())
1159 m_formatter
.oneWordOp5Imm5Reg3Reg3(OP_LDRB_imm_T1
, imm
.getUInt5(), rn
, rt
);
1161 m_formatter
.twoWordOp12Reg4Reg4Imm12(OP_LDRB_imm_T2
, rn
, rt
, imm
.getUInt12());
1164 void ldrb(RegisterID rt
, RegisterID rn
, int offset
, bool index
, bool wback
)
1166 ASSERT(rt
!= ARMRegisters::pc
);
1167 ASSERT(rn
!= ARMRegisters::pc
);
1168 ASSERT(index
|| wback
);
1169 ASSERT(!wback
| (rt
!= rn
));
1177 ASSERT(!(offset
& ~0xff));
1179 offset
|= (wback
<< 8);
1180 offset
|= (add
<< 9);
1181 offset
|= (index
<< 10);
1182 offset
|= (1 << 11);
1184 m_formatter
.twoWordOp12Reg4Reg4Imm12(OP_LDRB_imm_T3
, rn
, rt
, offset
);
1187 void ldrb(RegisterID rt
, RegisterID rn
, RegisterID rm
, unsigned shift
= 0)
1189 ASSERT(rn
!= ARMRegisters::pc
); // LDR (literal)
1190 ASSERT(!BadReg(rm
));
1193 if (!shift
&& !((rt
| rn
| rm
) & 8))
1194 m_formatter
.oneWordOp7Reg3Reg3Reg3(OP_LDRB_reg_T1
, rm
, rn
, rt
);
1196 m_formatter
.twoWordOp12Reg4FourFours(OP_LDRB_reg_T2
, rn
, FourFours(rt
, 0, shift
, rm
));
1199 void lsl(RegisterID rd
, RegisterID rm
, int32_t shiftAmount
)
1201 ASSERT(!BadReg(rd
));
1202 ASSERT(!BadReg(rm
));
1203 ShiftTypeAndAmount
shift(SRType_LSL
, shiftAmount
);
1204 m_formatter
.twoWordOp16FourFours(OP_LSL_imm_T1
, FourFours(shift
.hi4(), rd
, shift
.lo4(), rm
));
1207 void lsl(RegisterID rd
, RegisterID rn
, RegisterID rm
)
1209 ASSERT(!BadReg(rd
));
1210 ASSERT(!BadReg(rn
));
1211 ASSERT(!BadReg(rm
));
1212 m_formatter
.twoWordOp12Reg4FourFours(OP_LSL_reg_T2
, rn
, FourFours(0xf, rd
, 0, rm
));
1215 void lsr(RegisterID rd
, RegisterID rm
, int32_t shiftAmount
)
1217 ASSERT(!BadReg(rd
));
1218 ASSERT(!BadReg(rm
));
1219 ShiftTypeAndAmount
shift(SRType_LSR
, shiftAmount
);
1220 m_formatter
.twoWordOp16FourFours(OP_LSR_imm_T1
, FourFours(shift
.hi4(), rd
, shift
.lo4(), rm
));
1223 void lsr(RegisterID rd
, RegisterID rn
, RegisterID rm
)
1225 ASSERT(!BadReg(rd
));
1226 ASSERT(!BadReg(rn
));
1227 ASSERT(!BadReg(rm
));
1228 m_formatter
.twoWordOp12Reg4FourFours(OP_LSR_reg_T2
, rn
, FourFours(0xf, rd
, 0, rm
));
1231 void movT3(RegisterID rd
, ARMThumbImmediate imm
)
1233 ASSERT(imm
.isValid());
1234 ASSERT(!imm
.isEncodedImm());
1235 ASSERT(!BadReg(rd
));
1237 m_formatter
.twoWordOp5i6Imm4Reg4EncodedImm(OP_MOV_imm_T3
, imm
.m_value
.imm4
, rd
, imm
);
1240 void mov(RegisterID rd
, ARMThumbImmediate imm
)
1242 ASSERT(imm
.isValid());
1243 ASSERT(!BadReg(rd
));
1245 if ((rd
< 8) && imm
.isUInt8())
1246 m_formatter
.oneWordOp5Reg3Imm8(OP_MOV_imm_T1
, rd
, imm
.getUInt8());
1247 else if (imm
.isEncodedImm())
1248 m_formatter
.twoWordOp5i6Imm4Reg4EncodedImm(OP_MOV_imm_T2
, 0xf, rd
, imm
);
1253 void mov(RegisterID rd
, RegisterID rm
)
1255 m_formatter
.oneWordOp8RegReg143(OP_MOV_reg_T1
, rm
, rd
);
1258 void movt(RegisterID rd
, ARMThumbImmediate imm
)
1260 ASSERT(imm
.isUInt16());
1261 ASSERT(!BadReg(rd
));
1262 m_formatter
.twoWordOp5i6Imm4Reg4EncodedImm(OP_MOVT
, imm
.m_value
.imm4
, rd
, imm
);
1265 void mvn(RegisterID rd
, ARMThumbImmediate imm
)
1267 ASSERT(imm
.isEncodedImm());
1268 ASSERT(!BadReg(rd
));
1270 m_formatter
.twoWordOp5i6Imm4Reg4EncodedImm(OP_MVN_imm
, 0xf, rd
, imm
);
1273 void mvn(RegisterID rd
, RegisterID rm
, ShiftTypeAndAmount shift
)
1275 ASSERT(!BadReg(rd
));
1276 ASSERT(!BadReg(rm
));
1277 m_formatter
.twoWordOp16FourFours(OP_MVN_reg_T2
, FourFours(shift
.hi4(), rd
, shift
.lo4(), rm
));
1280 void mvn(RegisterID rd
, RegisterID rm
)
1282 if (!((rd
| rm
) & 8))
1283 m_formatter
.oneWordOp10Reg3Reg3(OP_MVN_reg_T1
, rm
, rd
);
1285 mvn(rd
, rm
, ShiftTypeAndAmount());
1288 void neg(RegisterID rd
, RegisterID rm
)
1290 ARMThumbImmediate zero
= ARMThumbImmediate::makeUInt12(0);
1294 void orr(RegisterID rd
, RegisterID rn
, ARMThumbImmediate imm
)
1296 ASSERT(!BadReg(rd
));
1297 ASSERT(!BadReg(rn
));
1298 ASSERT(imm
.isEncodedImm());
1299 m_formatter
.twoWordOp5i6Imm4Reg4EncodedImm(OP_ORR_imm_T1
, rn
, rd
, imm
);
1302 void orr(RegisterID rd
, RegisterID rn
, RegisterID rm
, ShiftTypeAndAmount shift
)
1304 ASSERT(!BadReg(rd
));
1305 ASSERT(!BadReg(rn
));
1306 ASSERT(!BadReg(rm
));
1307 m_formatter
.twoWordOp12Reg4FourFours(OP_ORR_reg_T2
, rn
, FourFours(shift
.hi4(), rd
, shift
.lo4(), rm
));
1310 void orr(RegisterID rd
, RegisterID rn
, RegisterID rm
)
1312 if ((rd
== rn
) && !((rd
| rm
) & 8))
1313 m_formatter
.oneWordOp10Reg3Reg3(OP_ORR_reg_T1
, rm
, rd
);
1314 else if ((rd
== rm
) && !((rd
| rn
) & 8))
1315 m_formatter
.oneWordOp10Reg3Reg3(OP_ORR_reg_T1
, rn
, rd
);
1317 orr(rd
, rn
, rm
, ShiftTypeAndAmount());
1320 void orr_S(RegisterID rd
, RegisterID rn
, RegisterID rm
, ShiftTypeAndAmount shift
)
1322 ASSERT(!BadReg(rd
));
1323 ASSERT(!BadReg(rn
));
1324 ASSERT(!BadReg(rm
));
1325 m_formatter
.twoWordOp12Reg4FourFours(OP_ORR_S_reg_T2
, rn
, FourFours(shift
.hi4(), rd
, shift
.lo4(), rm
));
1328 void orr_S(RegisterID rd
, RegisterID rn
, RegisterID rm
)
1330 if ((rd
== rn
) && !((rd
| rm
) & 8))
1331 m_formatter
.oneWordOp10Reg3Reg3(OP_ORR_reg_T1
, rm
, rd
);
1332 else if ((rd
== rm
) && !((rd
| rn
) & 8))
1333 m_formatter
.oneWordOp10Reg3Reg3(OP_ORR_reg_T1
, rn
, rd
);
1335 orr_S(rd
, rn
, rm
, ShiftTypeAndAmount());
1338 void ror(RegisterID rd
, RegisterID rm
, int32_t shiftAmount
)
1340 ASSERT(!BadReg(rd
));
1341 ASSERT(!BadReg(rm
));
1342 ShiftTypeAndAmount
shift(SRType_ROR
, shiftAmount
);
1343 m_formatter
.twoWordOp16FourFours(OP_ROR_imm_T1
, FourFours(shift
.hi4(), rd
, shift
.lo4(), rm
));
1346 void ror(RegisterID rd
, RegisterID rn
, RegisterID rm
)
1348 ASSERT(!BadReg(rd
));
1349 ASSERT(!BadReg(rn
));
1350 ASSERT(!BadReg(rm
));
1351 m_formatter
.twoWordOp12Reg4FourFours(OP_ROR_reg_T2
, rn
, FourFours(0xf, rd
, 0, rm
));
1354 void smull(RegisterID rdLo
, RegisterID rdHi
, RegisterID rn
, RegisterID rm
)
1356 ASSERT(!BadReg(rdLo
));
1357 ASSERT(!BadReg(rdHi
));
1358 ASSERT(!BadReg(rn
));
1359 ASSERT(!BadReg(rm
));
1360 ASSERT(rdLo
!= rdHi
);
1361 m_formatter
.twoWordOp12Reg4FourFours(OP_SMULL_T1
, rn
, FourFours(rdLo
, rdHi
, 0, rm
));
1364 // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
1365 void str(RegisterID rt
, RegisterID rn
, ARMThumbImmediate imm
)
1367 ASSERT(rt
!= ARMRegisters::pc
);
1368 ASSERT(rn
!= ARMRegisters::pc
);
1369 ASSERT(imm
.isUInt12());
1371 if (!((rt
| rn
) & 8) && imm
.isUInt7())
1372 m_formatter
.oneWordOp5Imm5Reg3Reg3(OP_STR_imm_T1
, imm
.getUInt7() >> 2, rn
, rt
);
1373 else if ((rn
== ARMRegisters::sp
) && !(rt
& 8) && imm
.isUInt10())
1374 m_formatter
.oneWordOp5Reg3Imm8(OP_STR_imm_T2
, rt
, imm
.getUInt10() >> 2);
1376 m_formatter
.twoWordOp12Reg4Reg4Imm12(OP_STR_imm_T3
, rn
, rt
, imm
.getUInt12());
1379 // If index is set, this is a regular offset or a pre-indexed store;
1380 // if index is not set then is is a post-index store.
1382 // If wback is set rn is updated - this is a pre or post index store,
1383 // if wback is not set this is a regular offset memory access.
1385 // (-255 <= offset <= 255)
1387 // _tmp = _reg + offset
1388 // MEM[index ? _tmp : _reg] = REG[rt]
1389 // if (wback) REG[rn] = _tmp
1390 void str(RegisterID rt
, RegisterID rn
, int offset
, bool index
, bool wback
)
1392 ASSERT(rt
!= ARMRegisters::pc
);
1393 ASSERT(rn
!= ARMRegisters::pc
);
1394 ASSERT(index
|| wback
);
1395 ASSERT(!wback
| (rt
!= rn
));
1402 ASSERT((offset
& ~0xff) == 0);
1404 offset
|= (wback
<< 8);
1405 offset
|= (add
<< 9);
1406 offset
|= (index
<< 10);
1407 offset
|= (1 << 11);
1409 m_formatter
.twoWordOp12Reg4Reg4Imm12(OP_STR_imm_T4
, rn
, rt
, offset
);
1412 // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
1413 void str(RegisterID rt
, RegisterID rn
, RegisterID rm
, unsigned shift
=0)
1415 ASSERT(rn
!= ARMRegisters::pc
);
1416 ASSERT(!BadReg(rm
));
1419 if (!shift
&& !((rt
| rn
| rm
) & 8))
1420 m_formatter
.oneWordOp7Reg3Reg3Reg3(OP_STR_reg_T1
, rm
, rn
, rt
);
1422 m_formatter
.twoWordOp12Reg4FourFours(OP_STR_reg_T2
, rn
, FourFours(rt
, 0, shift
, rm
));
1425 void sub(RegisterID rd
, RegisterID rn
, ARMThumbImmediate imm
)
1427 // Rd can only be SP if Rn is also SP.
1428 ASSERT((rd
!= ARMRegisters::sp
) || (rn
== ARMRegisters::sp
));
1429 ASSERT(rd
!= ARMRegisters::pc
);
1430 ASSERT(rn
!= ARMRegisters::pc
);
1431 ASSERT(imm
.isValid());
1433 if ((rn
== ARMRegisters::sp
) && (rd
== ARMRegisters::sp
) && imm
.isUInt9()) {
1434 m_formatter
.oneWordOp9Imm7(OP_SUB_SP_imm_T1
, imm
.getUInt9() >> 2);
1436 } else if (!((rd
| rn
) & 8)) {
1437 if (imm
.isUInt3()) {
1438 m_formatter
.oneWordOp7Reg3Reg3Reg3(OP_SUB_imm_T1
, (RegisterID
)imm
.getUInt3(), rn
, rd
);
1440 } else if ((rd
== rn
) && imm
.isUInt8()) {
1441 m_formatter
.oneWordOp5Reg3Imm8(OP_SUB_imm_T2
, rd
, imm
.getUInt8());
1446 if (imm
.isEncodedImm())
1447 m_formatter
.twoWordOp5i6Imm4Reg4EncodedImm(OP_SUB_imm_T3
, rn
, rd
, imm
);
1449 ASSERT(imm
.isUInt12());
1450 m_formatter
.twoWordOp5i6Imm4Reg4EncodedImm(OP_SUB_imm_T4
, rn
, rd
, imm
);
1454 void sub(RegisterID rd
, ARMThumbImmediate imm
, RegisterID rn
)
1456 ASSERT(rd
!= ARMRegisters::pc
);
1457 ASSERT(rn
!= ARMRegisters::pc
);
1458 ASSERT(imm
.isValid());
1459 ASSERT(imm
.isUInt12());
1461 if (!((rd
| rn
) & 8) && !imm
.getUInt12())
1462 m_formatter
.oneWordOp10Reg3Reg3(OP_RSB_imm_T1
, rn
, rd
);
1464 m_formatter
.twoWordOp5i6Imm4Reg4EncodedImm(OP_RSB_imm_T2
, rn
, rd
, imm
);
1467 void sub(RegisterID rd
, RegisterID rn
, RegisterID rm
, ShiftTypeAndAmount shift
)
1469 ASSERT((rd
!= ARMRegisters::sp
) || (rn
== ARMRegisters::sp
));
1470 ASSERT(rd
!= ARMRegisters::pc
);
1471 ASSERT(rn
!= ARMRegisters::pc
);
1472 ASSERT(!BadReg(rm
));
1473 m_formatter
.twoWordOp12Reg4FourFours(OP_SUB_reg_T2
, rn
, FourFours(shift
.hi4(), rd
, shift
.lo4(), rm
));
1476 // NOTE: In an IT block, add doesn't modify the flags register.
1477 void sub(RegisterID rd
, RegisterID rn
, RegisterID rm
)
1479 if (!((rd
| rn
| rm
) & 8))
1480 m_formatter
.oneWordOp7Reg3Reg3Reg3(OP_SUB_reg_T1
, rm
, rn
, rd
);
1482 sub(rd
, rn
, rm
, ShiftTypeAndAmount());
1485 // Not allowed in an IT (if then) block.
1486 void sub_S(RegisterID rd
, RegisterID rn
, ARMThumbImmediate imm
)
1488 // Rd can only be SP if Rn is also SP.
1489 ASSERT((rd
!= ARMRegisters::sp
) || (rn
== ARMRegisters::sp
));
1490 ASSERT(rd
!= ARMRegisters::pc
);
1491 ASSERT(rn
!= ARMRegisters::pc
);
1492 ASSERT(imm
.isValid());
1494 if ((rn
== ARMRegisters::sp
) && (rd
== ARMRegisters::sp
) && imm
.isUInt9()) {
1495 m_formatter
.oneWordOp9Imm7(OP_SUB_SP_imm_T1
, imm
.getUInt9() >> 2);
1497 } else if (!((rd
| rn
) & 8)) {
1498 if (imm
.isUInt3()) {
1499 m_formatter
.oneWordOp7Reg3Reg3Reg3(OP_SUB_imm_T1
, (RegisterID
)imm
.getUInt3(), rn
, rd
);
1501 } else if ((rd
== rn
) && imm
.isUInt8()) {
1502 m_formatter
.oneWordOp5Reg3Imm8(OP_SUB_imm_T2
, rd
, imm
.getUInt8());
1507 m_formatter
.twoWordOp5i6Imm4Reg4EncodedImm(OP_SUB_S_imm_T3
, rn
, rd
, imm
);
1510 // Not allowed in an IT (if then) block?
1511 void sub_S(RegisterID rd
, RegisterID rn
, RegisterID rm
, ShiftTypeAndAmount shift
)
1513 ASSERT((rd
!= ARMRegisters::sp
) || (rn
== ARMRegisters::sp
));
1514 ASSERT(rd
!= ARMRegisters::pc
);
1515 ASSERT(rn
!= ARMRegisters::pc
);
1516 ASSERT(!BadReg(rm
));
1517 m_formatter
.twoWordOp12Reg4FourFours(OP_SUB_S_reg_T2
, rn
, FourFours(shift
.hi4(), rd
, shift
.lo4(), rm
));
1520 // Not allowed in an IT (if then) block.
1521 void sub_S(RegisterID rd
, RegisterID rn
, RegisterID rm
)
1523 if (!((rd
| rn
| rm
) & 8))
1524 m_formatter
.oneWordOp7Reg3Reg3Reg3(OP_SUB_reg_T1
, rm
, rn
, rd
);
1526 sub_S(rd
, rn
, rm
, ShiftTypeAndAmount());
1529 void tst(RegisterID rn
, ARMThumbImmediate imm
)
1531 ASSERT(!BadReg(rn
));
1532 ASSERT(imm
.isEncodedImm());
1534 m_formatter
.twoWordOp5i6Imm4Reg4EncodedImm(OP_TST_imm
, rn
, (RegisterID
)0xf, imm
);
1537 void tst(RegisterID rn
, RegisterID rm
, ShiftTypeAndAmount shift
)
1539 ASSERT(!BadReg(rn
));
1540 ASSERT(!BadReg(rm
));
1541 m_formatter
.twoWordOp12Reg4FourFours(OP_TST_reg_T2
, rn
, FourFours(shift
.hi4(), 0xf, shift
.lo4(), rm
));
1544 void tst(RegisterID rn
, RegisterID rm
)
1547 tst(rn
, rm
, ShiftTypeAndAmount());
1549 m_formatter
.oneWordOp10Reg3Reg3(OP_TST_reg_T1
, rm
, rn
);
1552 void vadd_F64(FPDoubleRegisterID rd
, FPDoubleRegisterID rn
, FPDoubleRegisterID rm
)
1554 m_formatter
.vfpOp(OP_VADD_T2
, OP_VADD_T2b
, true, rn
, rd
, rm
);
1557 void vcmp_F64(FPDoubleRegisterID rd
, FPDoubleRegisterID rm
)
1559 m_formatter
.vfpOp(OP_VCMP
, OP_VCMPb
, true, VFPOperand(4), rd
, rm
);
1562 void vcmpz_F64(FPDoubleRegisterID rd
)
1564 m_formatter
.vfpOp(OP_VCMP
, OP_VCMPb
, true, VFPOperand(5), rd
, VFPOperand(0));
1567 void vcvt_F64_S32(FPDoubleRegisterID rd
, FPSingleRegisterID rm
)
1569 // boolean values are 64bit (toInt, unsigned, roundZero)
1570 m_formatter
.vfpOp(OP_VCVT_FPIVFP
, OP_VCVT_FPIVFPb
, true, vcvtOp(false, false, false), rd
, rm
);
1573 void vcvtr_S32_F64(FPSingleRegisterID rd
, FPDoubleRegisterID rm
)
1575 // boolean values are 64bit (toInt, unsigned, roundZero)
1576 m_formatter
.vfpOp(OP_VCVT_FPIVFP
, OP_VCVT_FPIVFPb
, true, vcvtOp(true, false, true), rd
, rm
);
1579 void vdiv_F64(FPDoubleRegisterID rd
, FPDoubleRegisterID rn
, FPDoubleRegisterID rm
)
1581 m_formatter
.vfpOp(OP_VDIV
, OP_VDIVb
, true, rn
, rd
, rm
);
1584 void vldr(FPDoubleRegisterID rd
, RegisterID rn
, int32_t imm
)
1586 m_formatter
.vfpMemOp(OP_VLDR
, OP_VLDRb
, true, rn
, rd
, imm
);
1589 void vmov(RegisterID rd
, FPSingleRegisterID rn
)
1591 ASSERT(!BadReg(rd
));
1592 m_formatter
.vfpOp(OP_VMOV_CtoS
, OP_VMOV_CtoSb
, false, rn
, rd
, VFPOperand(0));
1595 void vmov(FPSingleRegisterID rd
, RegisterID rn
)
1597 ASSERT(!BadReg(rn
));
1598 m_formatter
.vfpOp(OP_VMOV_StoC
, OP_VMOV_StoCb
, false, rd
, rn
, VFPOperand(0));
1601 void vmrs(RegisterID reg
= ARMRegisters::pc
)
1603 ASSERT(reg
!= ARMRegisters::sp
);
1604 m_formatter
.vfpOp(OP_VMRS
, OP_VMRSb
, false, VFPOperand(1), VFPOperand(0x10 | reg
), VFPOperand(0));
1607 void vmul_F64(FPDoubleRegisterID rd
, FPDoubleRegisterID rn
, FPDoubleRegisterID rm
)
1609 m_formatter
.vfpOp(OP_VMUL_T2
, OP_VMUL_T2b
, true, rn
, rd
, rm
);
1612 void vstr(FPDoubleRegisterID rd
, RegisterID rn
, int32_t imm
)
1614 m_formatter
.vfpMemOp(OP_VSTR
, OP_VSTRb
, true, rn
, rd
, imm
);
1617 void vsub_F64(FPDoubleRegisterID rd
, FPDoubleRegisterID rn
, FPDoubleRegisterID rm
)
1619 m_formatter
.vfpOp(OP_VSUB_T2
, OP_VSUB_T2b
, true, rn
, rd
, rm
);
1624 return JmpDst(m_formatter
.size());
1627 JmpDst
align(int alignment
)
1629 while (!m_formatter
.isAligned(alignment
))
1635 static void* getRelocatedAddress(void* code
, JmpSrc jump
)
1637 ASSERT(jump
.m_offset
!= -1);
1639 return reinterpret_cast<void*>(reinterpret_cast<ptrdiff_t>(code
) + jump
.m_offset
);
1642 static void* getRelocatedAddress(void* code
, JmpDst destination
)
1644 ASSERT(destination
.m_offset
!= -1);
1646 return reinterpret_cast<void*>(reinterpret_cast<ptrdiff_t>(code
) + destination
.m_offset
);
1649 static int getDifferenceBetweenLabels(JmpDst src
, JmpDst dst
)
1651 return dst
.m_offset
- src
.m_offset
;
1654 static int getDifferenceBetweenLabels(JmpDst src
, JmpSrc dst
)
1656 return dst
.m_offset
- src
.m_offset
;
1659 static int getDifferenceBetweenLabels(JmpSrc src
, JmpDst dst
)
1661 return dst
.m_offset
- src
.m_offset
;
1664 int executableOffsetFor(int location
)
1668 return static_cast<int32_t*>(m_formatter
.data())[location
/ sizeof(int32_t) - 1];
1671 int jumpSizeDelta(JumpType jumpType
, JumpLinkType jumpLinkType
) { return JumpPaddingSizes
[jumpType
] - JumpSizes
[jumpLinkType
]; }
1673 // Assembler admin methods:
1677 return m_formatter
.size();
1680 static bool linkRecordSourceComparator(const LinkRecord
& a
, const LinkRecord
& b
)
1682 return a
.from() < b
.from();
1685 bool canCompact(JumpType jumpType
)
1687 // The following cannot be compacted:
1688 // JumpFixed: represents custom jump sequence
1689 // JumpNoConditionFixedSize: represents unconditional jump that must remain a fixed size
1690 // JumpConditionFixedSize: represents conditional jump that must remain a fixed size
1691 return (jumpType
== JumpNoCondition
) || (jumpType
== JumpCondition
);
1694 JumpLinkType
computeJumpType(JumpType jumpType
, const uint8_t* from
, const uint8_t* to
)
1696 if (jumpType
== JumpFixed
)
1699 // for patchable jump we must leave space for the longest code sequence
1700 if (jumpType
== JumpNoConditionFixedSize
)
1702 if (jumpType
== JumpConditionFixedSize
)
1703 return LinkConditionalBX
;
1705 const int paddingSize
= JumpPaddingSizes
[jumpType
];
1706 bool mayTriggerErrata
= false;
1708 if (jumpType
== JumpCondition
) {
1709 // 2-byte conditional T1
1710 const uint16_t* jumpT1Location
= reinterpret_cast<const uint16_t*>(from
- (paddingSize
- JumpSizes
[LinkJumpT1
]));
1711 if (canBeJumpT1(jumpT1Location
, to
))
1713 // 4-byte conditional T3
1714 const uint16_t* jumpT3Location
= reinterpret_cast<const uint16_t*>(from
- (paddingSize
- JumpSizes
[LinkJumpT3
]));
1715 if (canBeJumpT3(jumpT3Location
, to
, mayTriggerErrata
)) {
1716 if (!mayTriggerErrata
)
1719 // 4-byte conditional T4 with IT
1720 const uint16_t* conditionalJumpT4Location
=
1721 reinterpret_cast<const uint16_t*>(from
- (paddingSize
- JumpSizes
[LinkConditionalJumpT4
]));
1722 if (canBeJumpT4(conditionalJumpT4Location
, to
, mayTriggerErrata
)) {
1723 if (!mayTriggerErrata
)
1724 return LinkConditionalJumpT4
;
1727 // 2-byte unconditional T2
1728 const uint16_t* jumpT2Location
= reinterpret_cast<const uint16_t*>(from
- (paddingSize
- JumpSizes
[LinkJumpT2
]));
1729 if (canBeJumpT2(jumpT2Location
, to
))
1731 // 4-byte unconditional T4
1732 const uint16_t* jumpT4Location
= reinterpret_cast<const uint16_t*>(from
- (paddingSize
- JumpSizes
[LinkJumpT4
]));
1733 if (canBeJumpT4(jumpT4Location
, to
, mayTriggerErrata
)) {
1734 if (!mayTriggerErrata
)
1737 // use long jump sequence
1741 ASSERT(jumpType
== JumpCondition
);
1742 return LinkConditionalBX
;
1745 JumpLinkType
computeJumpType(LinkRecord
& record
, const uint8_t* from
, const uint8_t* to
)
1747 JumpLinkType linkType
= computeJumpType(record
.type(), from
, to
);
1748 record
.setLinkType(linkType
);
1752 void recordLinkOffsets(int32_t regionStart
, int32_t regionEnd
, int32_t offset
)
1754 int32_t ptr
= regionStart
/ sizeof(int32_t);
1755 const int32_t end
= regionEnd
/ sizeof(int32_t);
1756 int32_t* offsets
= static_cast<int32_t*>(m_formatter
.data());
1758 offsets
[ptr
++] = offset
;
1761 Vector
<LinkRecord
>& jumpsToLink()
1763 std::sort(m_jumpsToLink
.begin(), m_jumpsToLink
.end(), linkRecordSourceComparator
);
1764 return m_jumpsToLink
;
1767 void link(LinkRecord
& record
, uint8_t* from
, uint8_t* to
)
1769 switch (record
.linkType()) {
1771 linkJumpT1(record
.condition(), reinterpret_cast<uint16_t*>(from
), to
);
1774 linkJumpT2(reinterpret_cast<uint16_t*>(from
), to
);
1777 linkJumpT3(record
.condition(), reinterpret_cast<uint16_t*>(from
), to
);
1780 linkJumpT4(reinterpret_cast<uint16_t*>(from
), to
);
1782 case LinkConditionalJumpT4
:
1783 linkConditionalJumpT4(record
.condition(), reinterpret_cast<uint16_t*>(from
), to
);
1785 case LinkConditionalBX
:
1786 linkConditionalBX(record
.condition(), reinterpret_cast<uint16_t*>(from
), to
);
1789 linkBX(reinterpret_cast<uint16_t*>(from
), to
);
1792 ASSERT_NOT_REACHED();
1797 void* unlinkedCode() { return m_formatter
.data(); }
1799 static unsigned getCallReturnOffset(JmpSrc call
)
1801 ASSERT(call
.m_offset
>= 0);
1802 return call
.m_offset
;
1805 // Linking & patching:
1807 // 'link' and 'patch' methods are for use on unprotected code - such as the code
1808 // within the AssemblerBuffer, and code being patched by the patch buffer. Once
1809 // code has been finalized it is (platform support permitting) within a non-
1810 // writable region of memory; to modify the code in an execute-only execuable
1811 // pool the 'repatch' and 'relink' methods should be used.
1813 void linkJump(JmpSrc from
, JmpDst to
)
1815 ASSERT(to
.m_offset
!= -1);
1816 ASSERT(from
.m_offset
!= -1);
1817 m_jumpsToLink
.append(LinkRecord(from
.m_offset
, to
.m_offset
, from
.m_type
, from
.m_condition
));
1820 static void linkJump(void* code
, JmpSrc from
, void* to
)
1822 ASSERT(from
.m_offset
!= -1);
1824 uint16_t* location
= reinterpret_cast<uint16_t*>(reinterpret_cast<intptr_t>(code
) + from
.m_offset
);
1825 linkJumpAbsolute(location
, to
);
1828 // bah, this mathod should really be static, since it is used by the LinkBuffer.
1829 // return a bool saying whether the link was successful?
1830 static void linkCall(void* code
, JmpSrc from
, void* to
)
1832 ASSERT(!(reinterpret_cast<intptr_t>(code
) & 1));
1833 ASSERT(from
.m_offset
!= -1);
1834 ASSERT(reinterpret_cast<intptr_t>(to
) & 1);
1836 setPointer(reinterpret_cast<uint16_t*>(reinterpret_cast<intptr_t>(code
) + from
.m_offset
) - 1, to
);
1839 static void linkPointer(void* code
, JmpDst where
, void* value
)
1841 setPointer(reinterpret_cast<char*>(code
) + where
.m_offset
, value
);
1844 static void relinkJump(void* from
, void* to
)
1846 ASSERT(!(reinterpret_cast<intptr_t>(from
) & 1));
1847 ASSERT(!(reinterpret_cast<intptr_t>(to
) & 1));
1849 linkJumpAbsolute(reinterpret_cast<uint16_t*>(from
), to
);
1851 ExecutableAllocator::cacheFlush(reinterpret_cast<uint16_t*>(from
) - 5, 5 * sizeof(uint16_t));
1854 static void relinkCall(void* from
, void* to
)
1856 ASSERT(!(reinterpret_cast<intptr_t>(from
) & 1));
1857 ASSERT(reinterpret_cast<intptr_t>(to
) & 1);
1859 setPointer(reinterpret_cast<uint16_t*>(from
) - 1, to
);
1862 static void repatchInt32(void* where
, int32_t value
)
1864 ASSERT(!(reinterpret_cast<intptr_t>(where
) & 1));
1866 setInt32(where
, value
);
1869 static void repatchPointer(void* where
, void* value
)
1871 ASSERT(!(reinterpret_cast<intptr_t>(where
) & 1));
1873 setPointer(where
, value
);
1876 static void repatchLoadPtrToLEA(void* where
)
1878 ASSERT(!(reinterpret_cast<intptr_t>(where
) & 1));
1879 uint16_t* loadOp
= reinterpret_cast<uint16_t*>(where
) + 4;
1881 ASSERT((loadOp
[0] & 0xfff0) == OP_LDR_reg_T2
);
1882 ASSERT((loadOp
[1] & 0x0ff0) == 0);
1883 int rn
= loadOp
[0] & 0xf;
1884 int rt
= loadOp
[1] >> 12;
1885 int rm
= loadOp
[1] & 0xf;
1887 loadOp
[0] = OP_ADD_reg_T3
| rn
;
1888 loadOp
[1] = rt
<< 8 | rm
;
1889 ExecutableAllocator::cacheFlush(loadOp
, sizeof(uint32_t));
1893 // VFP operations commonly take one or more 5-bit operands, typically representing a
1894 // floating point register number. This will commonly be encoded in the instruction
1895 // in two parts, with one single bit field, and one 4-bit field. In the case of
1896 // double precision operands the high bit of the register number will be encoded
1897 // separately, and for single precision operands the high bit of the register number
1898 // will be encoded individually.
1899 // VFPOperand encapsulates a 5-bit VFP operand, with bits 0..3 containing the 4-bit
1900 // field to be encoded together in the instruction (the low 4-bits of a double
1901 // register number, or the high 4-bits of a single register number), and bit 4
1902 // contains the bit value to be encoded individually.
1904 explicit VFPOperand(uint32_t value
)
1907 ASSERT(!(m_value
& ~0x1f));
1910 VFPOperand(FPDoubleRegisterID reg
)
1915 VFPOperand(RegisterID reg
)
1920 VFPOperand(FPSingleRegisterID reg
)
1921 : m_value(((reg
& 1) << 4) | (reg
>> 1)) // rotate the lowest bit of 'reg' to the top.
1927 return m_value
>> 4;
1932 return m_value
& 0xf;
1938 VFPOperand
vcvtOp(bool toInteger
, bool isUnsigned
, bool isRoundZero
)
1940 // Cannot specify rounding when converting to float.
1941 ASSERT(toInteger
|| !isRoundZero
);
1945 // opc2 indicates both toInteger & isUnsigned.
1946 op
|= isUnsigned
? 0x4 : 0x5;
1947 // 'op' field in instruction is isRoundZero
1951 // 'op' field in instruction is isUnsigned
1955 return VFPOperand(op
);
1958 static void setInt32(void* code
, uint32_t value
)
1960 uint16_t* location
= reinterpret_cast<uint16_t*>(code
);
1961 ASSERT(isMOV_imm_T3(location
- 4) && isMOVT(location
- 2));
1963 ARMThumbImmediate lo16
= ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(value
));
1964 ARMThumbImmediate hi16
= ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(value
>> 16));
1965 location
[-4] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOV_imm_T3
, lo16
);
1966 location
[-3] = twoWordOp5i6Imm4Reg4EncodedImmSecond((location
[-3] >> 8) & 0xf, lo16
);
1967 location
[-2] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOVT
, hi16
);
1968 location
[-1] = twoWordOp5i6Imm4Reg4EncodedImmSecond((location
[-1] >> 8) & 0xf, hi16
);
1970 ExecutableAllocator::cacheFlush(location
- 4, 4 * sizeof(uint16_t));
1973 static void setPointer(void* code
, void* value
)
1975 setInt32(code
, reinterpret_cast<uint32_t>(value
));
1978 static bool isB(void* address
)
1980 uint16_t* instruction
= static_cast<uint16_t*>(address
);
1981 return ((instruction
[0] & 0xf800) == OP_B_T4a
) && ((instruction
[1] & 0xd000) == OP_B_T4b
);
1984 static bool isBX(void* address
)
1986 uint16_t* instruction
= static_cast<uint16_t*>(address
);
1987 return (instruction
[0] & 0xff87) == OP_BX
;
1990 static bool isMOV_imm_T3(void* address
)
1992 uint16_t* instruction
= static_cast<uint16_t*>(address
);
1993 return ((instruction
[0] & 0xFBF0) == OP_MOV_imm_T3
) && ((instruction
[1] & 0x8000) == 0);
1996 static bool isMOVT(void* address
)
1998 uint16_t* instruction
= static_cast<uint16_t*>(address
);
1999 return ((instruction
[0] & 0xFBF0) == OP_MOVT
) && ((instruction
[1] & 0x8000) == 0);
2002 static bool isNOP_T1(void* address
)
2004 uint16_t* instruction
= static_cast<uint16_t*>(address
);
2005 return instruction
[0] == OP_NOP_T1
;
2008 static bool isNOP_T2(void* address
)
2010 uint16_t* instruction
= static_cast<uint16_t*>(address
);
2011 return (instruction
[0] == OP_NOP_T2a
) && (instruction
[1] == OP_NOP_T2b
);
2014 static bool canBeJumpT1(const uint16_t* instruction
, const void* target
)
2016 ASSERT(!(reinterpret_cast<intptr_t>(instruction
) & 1));
2017 ASSERT(!(reinterpret_cast<intptr_t>(target
) & 1));
2019 intptr_t relative
= reinterpret_cast<intptr_t>(target
) - (reinterpret_cast<intptr_t>(instruction
));
2020 // It does not appear to be documented in the ARM ARM (big surprise), but
2021 // for OP_B_T1 the branch displacement encoded in the instruction is 2
2022 // less than the actual displacement.
2024 return ((relative
<< 23) >> 23) == relative
;
2027 static bool canBeJumpT2(const uint16_t* instruction
, const void* target
)
2029 ASSERT(!(reinterpret_cast<intptr_t>(instruction
) & 1));
2030 ASSERT(!(reinterpret_cast<intptr_t>(target
) & 1));
2032 intptr_t relative
= reinterpret_cast<intptr_t>(target
) - (reinterpret_cast<intptr_t>(instruction
));
2033 // It does not appear to be documented in the ARM ARM (big surprise), but
2034 // for OP_B_T2 the branch displacement encoded in the instruction is 2
2035 // less than the actual displacement.
2037 return ((relative
<< 20) >> 20) == relative
;
2040 static bool canBeJumpT3(const uint16_t* instruction
, const void* target
, bool& mayTriggerErrata
)
2042 ASSERT(!(reinterpret_cast<intptr_t>(instruction
) & 1));
2043 ASSERT(!(reinterpret_cast<intptr_t>(target
) & 1));
2045 intptr_t relative
= reinterpret_cast<intptr_t>(target
) - (reinterpret_cast<intptr_t>(instruction
));
2046 // From Cortex-A8 errata:
2047 // If the 32-bit Thumb-2 branch instruction spans two 4KiB regions and
2048 // the target of the branch falls within the first region it is
2049 // possible for the processor to incorrectly determine the branch
2050 // instruction, and it is also possible in some cases for the processor
2051 // to enter a deadlock state.
2052 // The instruction is spanning two pages if it ends at an address ending 0x002
2053 bool spansTwo4K
= ((reinterpret_cast<intptr_t>(instruction
) & 0xfff) == 0x002);
2054 mayTriggerErrata
= spansTwo4K
;
2055 // The target is in the first page if the jump branch back by [3..0x1002] bytes
2056 bool targetInFirstPage
= (relative
>= -0x1002) && (relative
< -2);
2057 bool wouldTriggerA8Errata
= spansTwo4K
&& targetInFirstPage
;
2058 return ((relative
<< 11) >> 11) == relative
&& !wouldTriggerA8Errata
;
2061 static bool canBeJumpT4(const uint16_t* instruction
, const void* target
, bool& mayTriggerErrata
)
2063 ASSERT(!(reinterpret_cast<intptr_t>(instruction
) & 1));
2064 ASSERT(!(reinterpret_cast<intptr_t>(target
) & 1));
2066 intptr_t relative
= reinterpret_cast<intptr_t>(target
) - (reinterpret_cast<intptr_t>(instruction
));
2067 // From Cortex-A8 errata:
2068 // If the 32-bit Thumb-2 branch instruction spans two 4KiB regions and
2069 // the target of the branch falls within the first region it is
2070 // possible for the processor to incorrectly determine the branch
2071 // instruction, and it is also possible in some cases for the processor
2072 // to enter a deadlock state.
2073 // The instruction is spanning two pages if it ends at an address ending 0x002
2074 bool spansTwo4K
= ((reinterpret_cast<intptr_t>(instruction
) & 0xfff) == 0x002);
2075 mayTriggerErrata
= spansTwo4K
;
2076 // The target is in the first page if the jump branch back by [3..0x1002] bytes
2077 bool targetInFirstPage
= (relative
>= -0x1002) && (relative
< -2);
2078 bool wouldTriggerA8Errata
= spansTwo4K
&& targetInFirstPage
;
2079 return ((relative
<< 7) >> 7) == relative
&& !wouldTriggerA8Errata
;
2082 void linkJumpT1(Condition cond
, uint16_t* instruction
, void* target
)
2084 // FIMXE: this should be up in the MacroAssembler layer. :-(
2085 ASSERT(!(reinterpret_cast<intptr_t>(instruction
) & 1));
2086 ASSERT(!(reinterpret_cast<intptr_t>(target
) & 1));
2087 ASSERT(canBeJumpT1(instruction
, target
));
2089 intptr_t relative
= reinterpret_cast<intptr_t>(target
) - (reinterpret_cast<intptr_t>(instruction
));
2090 // It does not appear to be documented in the ARM ARM (big surprise), but
2091 // for OP_B_T1 the branch displacement encoded in the instruction is 2
2092 // less than the actual displacement.
2095 // All branch offsets should be an even distance.
2096 ASSERT(!(relative
& 1));
2097 instruction
[-1] = OP_B_T1
| ((cond
& 0xf) << 8) | ((relative
& 0x1fe) >> 1);
2100 static void linkJumpT2(uint16_t* instruction
, void* target
)
2102 // FIMXE: this should be up in the MacroAssembler layer. :-(
2103 ASSERT(!(reinterpret_cast<intptr_t>(instruction
) & 1));
2104 ASSERT(!(reinterpret_cast<intptr_t>(target
) & 1));
2105 ASSERT(canBeJumpT2(instruction
, target
));
2107 intptr_t relative
= reinterpret_cast<intptr_t>(target
) - (reinterpret_cast<intptr_t>(instruction
));
2108 // It does not appear to be documented in the ARM ARM (big surprise), but
2109 // for OP_B_T2 the branch displacement encoded in the instruction is 2
2110 // less than the actual displacement.
2113 // All branch offsets should be an even distance.
2114 ASSERT(!(relative
& 1));
2115 instruction
[-1] = OP_B_T2
| ((relative
& 0xffe) >> 1);
2118 void linkJumpT3(Condition cond
, uint16_t* instruction
, void* target
)
2120 // FIMXE: this should be up in the MacroAssembler layer. :-(
2121 ASSERT(!(reinterpret_cast<intptr_t>(instruction
) & 1));
2122 ASSERT(!(reinterpret_cast<intptr_t>(target
) & 1));
2124 UNUSED_PARAM(scratch
);
2125 ASSERT(canBeJumpT3(instruction
, target
, scratch
));
2127 intptr_t relative
= reinterpret_cast<intptr_t>(target
) - (reinterpret_cast<intptr_t>(instruction
));
2129 // All branch offsets should be an even distance.
2130 ASSERT(!(relative
& 1));
2131 instruction
[-2] = OP_B_T3a
| ((relative
& 0x100000) >> 10) | ((cond
& 0xf) << 6) | ((relative
& 0x3f000) >> 12);
2132 instruction
[-1] = OP_B_T3b
| ((relative
& 0x80000) >> 8) | ((relative
& 0x40000) >> 5) | ((relative
& 0xffe) >> 1);
2135 static void linkJumpT4(uint16_t* instruction
, void* target
)
2137 // FIMXE: this should be up in the MacroAssembler layer. :-(
2138 ASSERT(!(reinterpret_cast<intptr_t>(instruction
) & 1));
2139 ASSERT(!(reinterpret_cast<intptr_t>(target
) & 1));
2141 UNUSED_PARAM(scratch
);
2142 ASSERT(canBeJumpT4(instruction
, target
, scratch
));
2144 intptr_t relative
= reinterpret_cast<intptr_t>(target
) - (reinterpret_cast<intptr_t>(instruction
));
2145 // ARM encoding for the top two bits below the sign bit is 'peculiar'.
2147 relative
^= 0xC00000;
2149 // All branch offsets should be an even distance.
2150 ASSERT(!(relative
& 1));
2151 instruction
[-2] = OP_B_T4a
| ((relative
& 0x1000000) >> 14) | ((relative
& 0x3ff000) >> 12);
2152 instruction
[-1] = OP_B_T4b
| ((relative
& 0x800000) >> 10) | ((relative
& 0x400000) >> 11) | ((relative
& 0xffe) >> 1);
2155 void linkConditionalJumpT4(Condition cond
, uint16_t* instruction
, void* target
)
2157 // FIMXE: this should be up in the MacroAssembler layer. :-(
2158 ASSERT(!(reinterpret_cast<intptr_t>(instruction
) & 1));
2159 ASSERT(!(reinterpret_cast<intptr_t>(target
) & 1));
2161 instruction
[-3] = ifThenElse(cond
) | OP_IT
;
2162 linkJumpT4(instruction
, target
);
2165 static void linkBX(uint16_t* instruction
, void* target
)
2167 // FIMXE: this should be up in the MacroAssembler layer. :-(
2168 ASSERT(!(reinterpret_cast<intptr_t>(instruction
) & 1));
2169 ASSERT(!(reinterpret_cast<intptr_t>(target
) & 1));
2171 const uint16_t JUMP_TEMPORARY_REGISTER
= ARMRegisters::ip
;
2172 ARMThumbImmediate lo16
= ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(reinterpret_cast<uint32_t>(target
) + 1));
2173 ARMThumbImmediate hi16
= ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(reinterpret_cast<uint32_t>(target
) >> 16));
2174 instruction
[-5] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOV_imm_T3
, lo16
);
2175 instruction
[-4] = twoWordOp5i6Imm4Reg4EncodedImmSecond(JUMP_TEMPORARY_REGISTER
, lo16
);
2176 instruction
[-3] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOVT
, hi16
);
2177 instruction
[-2] = twoWordOp5i6Imm4Reg4EncodedImmSecond(JUMP_TEMPORARY_REGISTER
, hi16
);
2178 instruction
[-1] = OP_BX
| (JUMP_TEMPORARY_REGISTER
<< 3);
2181 void linkConditionalBX(Condition cond
, uint16_t* instruction
, void* target
)
2183 // FIMXE: this should be up in the MacroAssembler layer. :-(
2184 ASSERT(!(reinterpret_cast<intptr_t>(instruction
) & 1));
2185 ASSERT(!(reinterpret_cast<intptr_t>(target
) & 1));
2187 linkBX(instruction
, target
);
2188 instruction
[-6] = ifThenElse(cond
, true, true) | OP_IT
;
2191 static void linkJumpAbsolute(uint16_t* instruction
, void* target
)
2193 // FIMXE: this should be up in the MacroAssembler layer. :-(
2194 ASSERT(!(reinterpret_cast<intptr_t>(instruction
) & 1));
2195 ASSERT(!(reinterpret_cast<intptr_t>(target
) & 1));
2197 ASSERT((isMOV_imm_T3(instruction
- 5) && isMOVT(instruction
- 3) && isBX(instruction
- 1))
2198 || (isNOP_T1(instruction
- 5) && isNOP_T2(instruction
- 4) && isB(instruction
- 2)));
2201 if (canBeJumpT4(instruction
, target
, scratch
)) {
2202 // There may be a better way to fix this, but right now put the NOPs first, since in the
2203 // case of an conditional branch this will be coming after an ITTT predicating *three*
2204 // instructions! Looking backwards to modify the ITTT to an IT is not easy, due to
2205 // variable wdith encoding - the previous instruction might *look* like an ITTT but
2206 // actually be the second half of a 2-word op.
2207 instruction
[-5] = OP_NOP_T1
;
2208 instruction
[-4] = OP_NOP_T2a
;
2209 instruction
[-3] = OP_NOP_T2b
;
2210 linkJumpT4(instruction
, target
);
2212 const uint16_t JUMP_TEMPORARY_REGISTER
= ARMRegisters::ip
;
2213 ARMThumbImmediate lo16
= ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(reinterpret_cast<uint32_t>(target
) + 1));
2214 ARMThumbImmediate hi16
= ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(reinterpret_cast<uint32_t>(target
) >> 16));
2215 instruction
[-5] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOV_imm_T3
, lo16
);
2216 instruction
[-4] = twoWordOp5i6Imm4Reg4EncodedImmSecond(JUMP_TEMPORARY_REGISTER
, lo16
);
2217 instruction
[-3] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOVT
, hi16
);
2218 instruction
[-2] = twoWordOp5i6Imm4Reg4EncodedImmSecond(JUMP_TEMPORARY_REGISTER
, hi16
);
2219 instruction
[-1] = OP_BX
| (JUMP_TEMPORARY_REGISTER
<< 3);
2223 static uint16_t twoWordOp5i6Imm4Reg4EncodedImmFirst(uint16_t op
, ARMThumbImmediate imm
)
2225 return op
| (imm
.m_value
.i
<< 10) | imm
.m_value
.imm4
;
2228 static uint16_t twoWordOp5i6Imm4Reg4EncodedImmSecond(uint16_t rd
, ARMThumbImmediate imm
)
2230 return (imm
.m_value
.imm3
<< 12) | (rd
<< 8) | imm
.m_value
.imm8
;
2233 class ARMInstructionFormatter
{
2235 void oneWordOp5Reg3Imm8(OpcodeID op
, RegisterID rd
, uint8_t imm
)
2237 m_buffer
.putShort(op
| (rd
<< 8) | imm
);
2240 void oneWordOp5Imm5Reg3Reg3(OpcodeID op
, uint8_t imm
, RegisterID reg1
, RegisterID reg2
)
2242 m_buffer
.putShort(op
| (imm
<< 6) | (reg1
<< 3) | reg2
);
2245 void oneWordOp7Reg3Reg3Reg3(OpcodeID op
, RegisterID reg1
, RegisterID reg2
, RegisterID reg3
)
2247 m_buffer
.putShort(op
| (reg1
<< 6) | (reg2
<< 3) | reg3
);
2250 void oneWordOp8Imm8(OpcodeID op
, uint8_t imm
)
2252 m_buffer
.putShort(op
| imm
);
2255 void oneWordOp8RegReg143(OpcodeID op
, RegisterID reg1
, RegisterID reg2
)
2257 m_buffer
.putShort(op
| ((reg2
& 8) << 4) | (reg1
<< 3) | (reg2
& 7));
2259 void oneWordOp9Imm7(OpcodeID op
, uint8_t imm
)
2261 m_buffer
.putShort(op
| imm
);
2264 void oneWordOp10Reg3Reg3(OpcodeID op
, RegisterID reg1
, RegisterID reg2
)
2266 m_buffer
.putShort(op
| (reg1
<< 3) | reg2
);
2269 void twoWordOp12Reg4FourFours(OpcodeID1 op
, RegisterID reg
, FourFours ff
)
2271 m_buffer
.putShort(op
| reg
);
2272 m_buffer
.putShort(ff
.m_u
.value
);
2275 void twoWordOp16FourFours(OpcodeID1 op
, FourFours ff
)
2277 m_buffer
.putShort(op
);
2278 m_buffer
.putShort(ff
.m_u
.value
);
2281 void twoWordOp16Op16(OpcodeID1 op1
, OpcodeID2 op2
)
2283 m_buffer
.putShort(op1
);
2284 m_buffer
.putShort(op2
);
2287 void twoWordOp5i6Imm4Reg4EncodedImm(OpcodeID1 op
, int imm4
, RegisterID rd
, ARMThumbImmediate imm
)
2289 ARMThumbImmediate newImm
= imm
;
2290 newImm
.m_value
.imm4
= imm4
;
2292 m_buffer
.putShort(ARMv7Assembler::twoWordOp5i6Imm4Reg4EncodedImmFirst(op
, newImm
));
2293 m_buffer
.putShort(ARMv7Assembler::twoWordOp5i6Imm4Reg4EncodedImmSecond(rd
, newImm
));
2296 void twoWordOp12Reg4Reg4Imm12(OpcodeID1 op
, RegisterID reg1
, RegisterID reg2
, uint16_t imm
)
2298 m_buffer
.putShort(op
| reg1
);
2299 m_buffer
.putShort((reg2
<< 12) | imm
);
2302 // Formats up instructions of the pattern:
2303 // 111111111B11aaaa:bbbb222SA2C2cccc
2304 // Where 1s in the pattern come from op1, 2s in the pattern come from op2, S is the provided size bit.
2305 // Operands provide 5 bit values of the form Aaaaa, Bbbbb, Ccccc.
2306 void vfpOp(OpcodeID1 op1
, OpcodeID2 op2
, bool size
, VFPOperand a
, VFPOperand b
, VFPOperand c
)
2308 ASSERT(!(op1
& 0x004f));
2309 ASSERT(!(op2
& 0xf1af));
2310 m_buffer
.putShort(op1
| b
.bits1() << 6 | a
.bits4());
2311 m_buffer
.putShort(op2
| b
.bits4() << 12 | size
<< 8 | a
.bits1() << 7 | c
.bits1() << 5 | c
.bits4());
2314 // Arm vfp addresses can be offset by a 9-bit ones-comp immediate, left shifted by 2.
2315 // (i.e. +/-(0..255) 32-bit words)
2316 void vfpMemOp(OpcodeID1 op1
, OpcodeID2 op2
, bool size
, RegisterID rn
, VFPOperand rd
, int32_t imm
)
2324 uint32_t offset
= imm
;
2325 ASSERT(!(offset
& ~0x3fc));
2328 m_buffer
.putShort(op1
| (up
<< 7) | rd
.bits1() << 6 | rn
);
2329 m_buffer
.putShort(op2
| rd
.bits4() << 12 | size
<< 8 | offset
);
2332 // Administrative methods:
2334 size_t size() const { return m_buffer
.size(); }
2335 bool isAligned(int alignment
) const { return m_buffer
.isAligned(alignment
); }
2336 void* data() const { return m_buffer
.data(); }
2337 void* executableCopy(ExecutablePool
* allocator
) { return m_buffer
.executableCopy(allocator
); }
2340 AssemblerBuffer m_buffer
;
2343 Vector
<LinkRecord
> m_jumpsToLink
;
2344 Vector
<int32_t> m_offsets
;
2349 #endif // ENABLE(ASSEMBLER) && CPU(ARM_THUMB2)
2351 #endif // ARMAssembler_h