2 * Copyright (C) 2009, 2010 Apple Inc. All rights reserved.
3 * Copyright (C) 2010 University of Szeged
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
15 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
18 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
19 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
20 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
21 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
22 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
24 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #ifndef ARMAssembler_h
28 #define ARMAssembler_h
30 #if ENABLE(ASSEMBLER) && CPU(ARM_THUMB2)
32 #include "AssemblerBuffer.h"
33 #include <wtf/Assertions.h>
34 #include <wtf/Vector.h>
39 namespace ARMRegisters
{
48 r7
, wr
= r7
, // thumb work register
50 r9
, sb
= r9
, // static base
51 r10
, sl
= r10
, // stack limit
52 r11
, fp
= r11
, // frame pointer
127 } FPDoubleRegisterID
;
164 inline FPSingleRegisterID
asSingle(FPDoubleRegisterID reg
)
167 return (FPSingleRegisterID
)(reg
<< 1);
170 inline FPDoubleRegisterID
asDouble(FPSingleRegisterID reg
)
173 return (FPDoubleRegisterID
)(reg
>> 1);
177 class ARMv7Assembler
;
178 class ARMThumbImmediate
{
179 friend class ARMv7Assembler
;
181 typedef uint8_t ThumbImmediateType
;
182 static const ThumbImmediateType TypeInvalid
= 0;
183 static const ThumbImmediateType TypeEncoded
= 1;
184 static const ThumbImmediateType TypeUInt16
= 2;
194 // If this is an encoded immediate, then it may describe a shift, or a pattern.
196 unsigned shiftValue7
: 7;
197 unsigned shiftAmount
: 5;
200 unsigned immediate
: 8;
201 unsigned pattern
: 4;
203 } ThumbImmediateValue
;
205 // byte0 contains least significant bit; not using an array to make client code endian agnostic.
216 ALWAYS_INLINE
static void countLeadingZerosPartial(uint32_t& value
, int32_t& zeros
, const int N
)
218 if (value
& ~((1 << N
) - 1)) /* check for any of the top N bits (of 2N bits) are set */
219 value
>>= N
; /* if any were set, lose the bottom N */
220 else /* if none of the top N bits are set, */
221 zeros
+= N
; /* then we have identified N leading zeros */
224 static int32_t countLeadingZeros(uint32_t value
)
230 countLeadingZerosPartial(value
, zeros
, 16);
231 countLeadingZerosPartial(value
, zeros
, 8);
232 countLeadingZerosPartial(value
, zeros
, 4);
233 countLeadingZerosPartial(value
, zeros
, 2);
234 countLeadingZerosPartial(value
, zeros
, 1);
239 : m_type(TypeInvalid
)
244 ARMThumbImmediate(ThumbImmediateType type
, ThumbImmediateValue value
)
250 ARMThumbImmediate(ThumbImmediateType type
, uint16_t value
)
253 // Make sure this constructor is only reached with type TypeUInt16;
254 // this extra parameter makes the code a little clearer by making it
255 // explicit at call sites which type is being constructed
256 ASSERT_UNUSED(type
, type
== TypeUInt16
);
258 m_value
.asInt
= value
;
262 static ARMThumbImmediate
makeEncodedImm(uint32_t value
)
264 ThumbImmediateValue encoding
;
267 // okay, these are easy.
269 encoding
.immediate
= value
;
270 encoding
.pattern
= 0;
271 return ARMThumbImmediate(TypeEncoded
, encoding
);
274 int32_t leadingZeros
= countLeadingZeros(value
);
275 // if there were 24 or more leading zeros, then we'd have hit the (value < 256) case.
276 ASSERT(leadingZeros
< 24);
278 // Given a number with bit fields Z:B:C, where count(Z)+count(B)+count(C) == 32,
279 // Z are the bits known zero, B is the 8-bit immediate, C are the bits to check for
280 // zero. count(B) == 8, so the count of bits to be checked is 24 - count(Z).
281 int32_t rightShiftAmount
= 24 - leadingZeros
;
282 if (value
== ((value
>> rightShiftAmount
) << rightShiftAmount
)) {
283 // Shift the value down to the low byte position. The assign to
284 // shiftValue7 drops the implicit top bit.
285 encoding
.shiftValue7
= value
>> rightShiftAmount
;
286 // The endoded shift amount is the magnitude of a right rotate.
287 encoding
.shiftAmount
= 8 + leadingZeros
;
288 return ARMThumbImmediate(TypeEncoded
, encoding
);
294 if ((bytes
.byte0
== bytes
.byte1
) && (bytes
.byte0
== bytes
.byte2
) && (bytes
.byte0
== bytes
.byte3
)) {
295 encoding
.immediate
= bytes
.byte0
;
296 encoding
.pattern
= 3;
297 return ARMThumbImmediate(TypeEncoded
, encoding
);
300 if ((bytes
.byte0
== bytes
.byte2
) && !(bytes
.byte1
| bytes
.byte3
)) {
301 encoding
.immediate
= bytes
.byte0
;
302 encoding
.pattern
= 1;
303 return ARMThumbImmediate(TypeEncoded
, encoding
);
306 if ((bytes
.byte1
== bytes
.byte3
) && !(bytes
.byte0
| bytes
.byte2
)) {
307 encoding
.immediate
= bytes
.byte1
;
308 encoding
.pattern
= 2;
309 return ARMThumbImmediate(TypeEncoded
, encoding
);
312 return ARMThumbImmediate();
315 static ARMThumbImmediate
makeUInt12(int32_t value
)
317 return (!(value
& 0xfffff000))
318 ? ARMThumbImmediate(TypeUInt16
, (uint16_t)value
)
319 : ARMThumbImmediate();
322 static ARMThumbImmediate
makeUInt12OrEncodedImm(int32_t value
)
324 // If this is not a 12-bit unsigned it, try making an encoded immediate.
325 return (!(value
& 0xfffff000))
326 ? ARMThumbImmediate(TypeUInt16
, (uint16_t)value
)
327 : makeEncodedImm(value
);
330 // The 'make' methods, above, return a !isValid() value if the argument
331 // cannot be represented as the requested type. This methods is called
332 // 'get' since the argument can always be represented.
333 static ARMThumbImmediate
makeUInt16(uint16_t value
)
335 return ARMThumbImmediate(TypeUInt16
, value
);
340 return m_type
!= TypeInvalid
;
343 uint16_t asUInt16() const { return m_value
.asInt
; }
345 // These methods rely on the format of encoded byte values.
346 bool isUInt3() { return !(m_value
.asInt
& 0xfff8); }
347 bool isUInt4() { return !(m_value
.asInt
& 0xfff0); }
348 bool isUInt5() { return !(m_value
.asInt
& 0xffe0); }
349 bool isUInt6() { return !(m_value
.asInt
& 0xffc0); }
350 bool isUInt7() { return !(m_value
.asInt
& 0xff80); }
351 bool isUInt8() { return !(m_value
.asInt
& 0xff00); }
352 bool isUInt9() { return (m_type
== TypeUInt16
) && !(m_value
.asInt
& 0xfe00); }
353 bool isUInt10() { return (m_type
== TypeUInt16
) && !(m_value
.asInt
& 0xfc00); }
354 bool isUInt12() { return (m_type
== TypeUInt16
) && !(m_value
.asInt
& 0xf000); }
355 bool isUInt16() { return m_type
== TypeUInt16
; }
356 uint8_t getUInt3() { ASSERT(isUInt3()); return m_value
.asInt
; }
357 uint8_t getUInt4() { ASSERT(isUInt4()); return m_value
.asInt
; }
358 uint8_t getUInt5() { ASSERT(isUInt5()); return m_value
.asInt
; }
359 uint8_t getUInt6() { ASSERT(isUInt6()); return m_value
.asInt
; }
360 uint8_t getUInt7() { ASSERT(isUInt7()); return m_value
.asInt
; }
361 uint8_t getUInt8() { ASSERT(isUInt8()); return m_value
.asInt
; }
362 uint16_t getUInt9() { ASSERT(isUInt9()); return m_value
.asInt
; }
363 uint16_t getUInt10() { ASSERT(isUInt10()); return m_value
.asInt
; }
364 uint16_t getUInt12() { ASSERT(isUInt12()); return m_value
.asInt
; }
365 uint16_t getUInt16() { ASSERT(isUInt16()); return m_value
.asInt
; }
367 bool isEncodedImm() { return m_type
== TypeEncoded
; }
370 ThumbImmediateType m_type
;
371 ThumbImmediateValue m_value
;
380 SRType_RRX
= SRType_ROR
383 class ShiftTypeAndAmount
{
384 friend class ARMv7Assembler
;
389 m_u
.type
= (ARMShiftType
)0;
393 ShiftTypeAndAmount(ARMShiftType type
, unsigned amount
)
396 m_u
.amount
= amount
& 31;
399 unsigned lo4() { return m_u
.lo4
; }
400 unsigned hi4() { return m_u
.hi4
; }
415 class ARMv7Assembler
{
419 ASSERT(m_jumpsToLink
.isEmpty());
422 typedef ARMRegisters::RegisterID RegisterID
;
423 typedef ARMRegisters::FPSingleRegisterID FPSingleRegisterID
;
424 typedef ARMRegisters::FPDoubleRegisterID FPDoubleRegisterID
;
425 typedef ARMRegisters::FPQuadRegisterID FPQuadRegisterID
;
427 // (HS, LO, HI, LS) -> (AE, B, A, BE)
428 // (VS, VC) -> (O, NO)
432 ConditionHS
, ConditionCS
= ConditionHS
,
433 ConditionLO
, ConditionCC
= ConditionLO
,
448 #define JUMP_ENUM_WITH_SIZE(index, value) (((value) << 3) | (index))
449 #define JUMP_ENUM_SIZE(jump) ((jump) >> 3)
450 enum JumpType
{ JumpFixed
= JUMP_ENUM_WITH_SIZE(0, 0),
451 JumpNoCondition
= JUMP_ENUM_WITH_SIZE(1, 5 * sizeof(uint16_t)),
452 JumpCondition
= JUMP_ENUM_WITH_SIZE(2, 6 * sizeof(uint16_t)),
453 JumpNoConditionFixedSize
= JUMP_ENUM_WITH_SIZE(3, 5 * sizeof(uint16_t)),
454 JumpConditionFixedSize
= JUMP_ENUM_WITH_SIZE(4, 6 * sizeof(uint16_t))
457 LinkInvalid
= JUMP_ENUM_WITH_SIZE(0, 0),
458 LinkJumpT1
= JUMP_ENUM_WITH_SIZE(1, sizeof(uint16_t)),
459 LinkJumpT2
= JUMP_ENUM_WITH_SIZE(2, sizeof(uint16_t)),
460 LinkJumpT3
= JUMP_ENUM_WITH_SIZE(3, 2 * sizeof(uint16_t)),
461 LinkJumpT4
= JUMP_ENUM_WITH_SIZE(4, 2 * sizeof(uint16_t)),
462 LinkConditionalJumpT4
= JUMP_ENUM_WITH_SIZE(5, 3 * sizeof(uint16_t)),
463 LinkBX
= JUMP_ENUM_WITH_SIZE(6, 5 * sizeof(uint16_t)),
464 LinkConditionalBX
= JUMP_ENUM_WITH_SIZE(7, 6 * sizeof(uint16_t))
469 LinkRecord(intptr_t from
, intptr_t to
, JumpType type
, Condition condition
)
473 , m_linkType(LinkInvalid
)
474 , m_condition(condition
)
477 intptr_t from() const { return m_from
; }
478 void setFrom(intptr_t from
) { m_from
= from
; }
479 intptr_t to() const { return m_to
; }
480 JumpType
type() const { return m_type
; }
481 JumpLinkType
linkType() const { return m_linkType
; }
482 void setLinkType(JumpLinkType linkType
) { ASSERT(m_linkType
== LinkInvalid
); m_linkType
= linkType
; }
483 Condition
condition() const { return m_condition
; }
485 intptr_t m_from
: 31;
488 JumpLinkType m_linkType
: 8;
489 Condition m_condition
: 16;
495 bool BadReg(RegisterID reg
)
497 return (reg
== ARMRegisters::sp
) || (reg
== ARMRegisters::pc
);
500 uint32_t singleRegisterMask(FPSingleRegisterID rdNum
, int highBitsShift
, int lowBitShift
)
502 uint32_t rdMask
= (rdNum
>> 1) << highBitsShift
;
504 rdMask
|= 1 << lowBitShift
;
508 uint32_t doubleRegisterMask(FPDoubleRegisterID rdNum
, int highBitShift
, int lowBitsShift
)
510 uint32_t rdMask
= (rdNum
& 0xf) << lowBitsShift
;
512 rdMask
|= 1 << highBitShift
;
517 OP_ADD_reg_T1
= 0x1800,
518 OP_SUB_reg_T1
= 0x1A00,
519 OP_ADD_imm_T1
= 0x1C00,
520 OP_SUB_imm_T1
= 0x1E00,
521 OP_MOV_imm_T1
= 0x2000,
522 OP_CMP_imm_T1
= 0x2800,
523 OP_ADD_imm_T2
= 0x3000,
524 OP_SUB_imm_T2
= 0x3800,
525 OP_AND_reg_T1
= 0x4000,
526 OP_EOR_reg_T1
= 0x4040,
527 OP_TST_reg_T1
= 0x4200,
528 OP_RSB_imm_T1
= 0x4240,
529 OP_CMP_reg_T1
= 0x4280,
530 OP_ORR_reg_T1
= 0x4300,
531 OP_MVN_reg_T1
= 0x43C0,
532 OP_ADD_reg_T2
= 0x4400,
533 OP_MOV_reg_T1
= 0x4600,
536 OP_STR_reg_T1
= 0x5000,
537 OP_LDR_reg_T1
= 0x5800,
538 OP_LDRH_reg_T1
= 0x5A00,
539 OP_LDRB_reg_T1
= 0x5C00,
540 OP_STR_imm_T1
= 0x6000,
541 OP_LDR_imm_T1
= 0x6800,
542 OP_LDRB_imm_T1
= 0x7800,
543 OP_LDRH_imm_T1
= 0x8800,
544 OP_STR_imm_T2
= 0x9000,
545 OP_LDR_imm_T2
= 0x9800,
546 OP_ADD_SP_imm_T1
= 0xA800,
547 OP_ADD_SP_imm_T2
= 0xB000,
548 OP_SUB_SP_imm_T1
= 0xB080,
557 OP_AND_reg_T2
= 0xEA00,
558 OP_TST_reg_T2
= 0xEA10,
559 OP_ORR_reg_T2
= 0xEA40,
560 OP_ORR_S_reg_T2
= 0xEA50,
561 OP_ASR_imm_T1
= 0xEA4F,
562 OP_LSL_imm_T1
= 0xEA4F,
563 OP_LSR_imm_T1
= 0xEA4F,
564 OP_ROR_imm_T1
= 0xEA4F,
565 OP_MVN_reg_T2
= 0xEA6F,
566 OP_EOR_reg_T2
= 0xEA80,
567 OP_ADD_reg_T3
= 0xEB00,
568 OP_ADD_S_reg_T3
= 0xEB10,
569 OP_SUB_reg_T2
= 0xEBA0,
570 OP_SUB_S_reg_T2
= 0xEBB0,
571 OP_CMP_reg_T2
= 0xEBB0,
574 OP_VMOV_StoC
= 0xEE00,
575 OP_VMOV_CtoS
= 0xEE10,
581 OP_VCVT_FPIVFP
= 0xEEB0,
582 OP_VMOV_IMM_T2
= 0xEEB0,
586 OP_AND_imm_T1
= 0xF000,
588 OP_ORR_imm_T1
= 0xF040,
589 OP_MOV_imm_T2
= 0xF040,
591 OP_EOR_imm_T1
= 0xF080,
592 OP_ADD_imm_T3
= 0xF100,
593 OP_ADD_S_imm_T3
= 0xF110,
595 OP_SUB_imm_T3
= 0xF1A0,
596 OP_SUB_S_imm_T3
= 0xF1B0,
597 OP_CMP_imm_T2
= 0xF1B0,
598 OP_RSB_imm_T2
= 0xF1C0,
599 OP_ADD_imm_T4
= 0xF200,
600 OP_MOV_imm_T3
= 0xF240,
601 OP_SUB_imm_T4
= 0xF2A0,
604 OP_LDRB_imm_T3
= 0xF810,
605 OP_LDRB_reg_T2
= 0xF810,
606 OP_LDRH_reg_T2
= 0xF830,
607 OP_LDRH_imm_T3
= 0xF830,
608 OP_STR_imm_T4
= 0xF840,
609 OP_STR_reg_T2
= 0xF840,
610 OP_LDR_imm_T4
= 0xF850,
611 OP_LDR_reg_T2
= 0xF850,
612 OP_LDRB_imm_T2
= 0xF890,
613 OP_LDRH_imm_T2
= 0xF8B0,
614 OP_STR_imm_T3
= 0xF8C0,
615 OP_LDR_imm_T3
= 0xF8D0,
616 OP_LSL_reg_T2
= 0xFA00,
617 OP_LSR_reg_T2
= 0xFA20,
618 OP_ASR_reg_T2
= 0xFA40,
619 OP_ROR_reg_T2
= 0xFA60,
621 OP_SMULL_T1
= 0xFB80,
625 OP_VADD_T2b
= 0x0A00,
628 OP_VMOV_IMM_T2b
= 0x0A00,
629 OP_VMUL_T2b
= 0x0A00,
631 OP_VMOV_CtoSb
= 0x0A10,
632 OP_VMOV_StoCb
= 0x0A10,
635 OP_VCVT_FPIVFPb
= 0x0A40,
636 OP_VSUB_T2b
= 0x0A40,
643 FourFours(unsigned f3
, unsigned f2
, unsigned f1
, unsigned f0
)
662 class ARMInstructionFormatter
;
665 bool ifThenElseConditionBit(Condition condition
, bool isIf
)
667 return isIf
? (condition
& 1) : !(condition
& 1);
669 uint8_t ifThenElse(Condition condition
, bool inst2if
, bool inst3if
, bool inst4if
)
671 int mask
= (ifThenElseConditionBit(condition
, inst2if
) << 3)
672 | (ifThenElseConditionBit(condition
, inst3if
) << 2)
673 | (ifThenElseConditionBit(condition
, inst4if
) << 1)
675 ASSERT((condition
!= ConditionAL
) || !(mask
& (mask
- 1)));
676 return (condition
<< 4) | mask
;
678 uint8_t ifThenElse(Condition condition
, bool inst2if
, bool inst3if
)
680 int mask
= (ifThenElseConditionBit(condition
, inst2if
) << 3)
681 | (ifThenElseConditionBit(condition
, inst3if
) << 2)
683 ASSERT((condition
!= ConditionAL
) || !(mask
& (mask
- 1)));
684 return (condition
<< 4) | mask
;
686 uint8_t ifThenElse(Condition condition
, bool inst2if
)
688 int mask
= (ifThenElseConditionBit(condition
, inst2if
) << 3)
690 ASSERT((condition
!= ConditionAL
) || !(mask
& (mask
- 1)));
691 return (condition
<< 4) | mask
;
694 uint8_t ifThenElse(Condition condition
)
697 return (condition
<< 4) | mask
;
702 void add(RegisterID rd
, RegisterID rn
, ARMThumbImmediate imm
)
704 // Rd can only be SP if Rn is also SP.
705 ASSERT((rd
!= ARMRegisters::sp
) || (rn
== ARMRegisters::sp
));
706 ASSERT(rd
!= ARMRegisters::pc
);
707 ASSERT(rn
!= ARMRegisters::pc
);
708 ASSERT(imm
.isValid());
710 if (rn
== ARMRegisters::sp
) {
711 if (!(rd
& 8) && imm
.isUInt10()) {
712 m_formatter
.oneWordOp5Reg3Imm8(OP_ADD_SP_imm_T1
, rd
, static_cast<uint8_t>(imm
.getUInt10() >> 2));
714 } else if ((rd
== ARMRegisters::sp
) && imm
.isUInt9()) {
715 m_formatter
.oneWordOp9Imm7(OP_ADD_SP_imm_T2
, static_cast<uint8_t>(imm
.getUInt9() >> 2));
718 } else if (!((rd
| rn
) & 8)) {
720 m_formatter
.oneWordOp7Reg3Reg3Reg3(OP_ADD_imm_T1
, (RegisterID
)imm
.getUInt3(), rn
, rd
);
722 } else if ((rd
== rn
) && imm
.isUInt8()) {
723 m_formatter
.oneWordOp5Reg3Imm8(OP_ADD_imm_T2
, rd
, imm
.getUInt8());
728 if (imm
.isEncodedImm())
729 m_formatter
.twoWordOp5i6Imm4Reg4EncodedImm(OP_ADD_imm_T3
, rn
, rd
, imm
);
731 ASSERT(imm
.isUInt12());
732 m_formatter
.twoWordOp5i6Imm4Reg4EncodedImm(OP_ADD_imm_T4
, rn
, rd
, imm
);
736 ALWAYS_INLINE
void add(RegisterID rd
, RegisterID rn
, RegisterID rm
, ShiftTypeAndAmount shift
)
738 ASSERT((rd
!= ARMRegisters::sp
) || (rn
== ARMRegisters::sp
));
739 ASSERT(rd
!= ARMRegisters::pc
);
740 ASSERT(rn
!= ARMRegisters::pc
);
742 m_formatter
.twoWordOp12Reg4FourFours(OP_ADD_reg_T3
, rn
, FourFours(shift
.hi4(), rd
, shift
.lo4(), rm
));
745 // NOTE: In an IT block, add doesn't modify the flags register.
746 ALWAYS_INLINE
void add(RegisterID rd
, RegisterID rn
, RegisterID rm
)
749 m_formatter
.oneWordOp8RegReg143(OP_ADD_reg_T2
, rm
, rd
);
751 m_formatter
.oneWordOp8RegReg143(OP_ADD_reg_T2
, rn
, rd
);
752 else if (!((rd
| rn
| rm
) & 8))
753 m_formatter
.oneWordOp7Reg3Reg3Reg3(OP_ADD_reg_T1
, rm
, rn
, rd
);
755 add(rd
, rn
, rm
, ShiftTypeAndAmount());
758 // Not allowed in an IT (if then) block.
759 ALWAYS_INLINE
void add_S(RegisterID rd
, RegisterID rn
, ARMThumbImmediate imm
)
761 // Rd can only be SP if Rn is also SP.
762 ASSERT((rd
!= ARMRegisters::sp
) || (rn
== ARMRegisters::sp
));
763 ASSERT(rd
!= ARMRegisters::pc
);
764 ASSERT(rn
!= ARMRegisters::pc
);
765 ASSERT(imm
.isEncodedImm());
767 if (!((rd
| rn
) & 8)) {
769 m_formatter
.oneWordOp7Reg3Reg3Reg3(OP_ADD_imm_T1
, (RegisterID
)imm
.getUInt3(), rn
, rd
);
771 } else if ((rd
== rn
) && imm
.isUInt8()) {
772 m_formatter
.oneWordOp5Reg3Imm8(OP_ADD_imm_T2
, rd
, imm
.getUInt8());
777 m_formatter
.twoWordOp5i6Imm4Reg4EncodedImm(OP_ADD_S_imm_T3
, rn
, rd
, imm
);
780 // Not allowed in an IT (if then) block?
781 ALWAYS_INLINE
void add_S(RegisterID rd
, RegisterID rn
, RegisterID rm
, ShiftTypeAndAmount shift
)
783 ASSERT((rd
!= ARMRegisters::sp
) || (rn
== ARMRegisters::sp
));
784 ASSERT(rd
!= ARMRegisters::pc
);
785 ASSERT(rn
!= ARMRegisters::pc
);
787 m_formatter
.twoWordOp12Reg4FourFours(OP_ADD_S_reg_T3
, rn
, FourFours(shift
.hi4(), rd
, shift
.lo4(), rm
));
790 // Not allowed in an IT (if then) block.
791 ALWAYS_INLINE
void add_S(RegisterID rd
, RegisterID rn
, RegisterID rm
)
793 if (!((rd
| rn
| rm
) & 8))
794 m_formatter
.oneWordOp7Reg3Reg3Reg3(OP_ADD_reg_T1
, rm
, rn
, rd
);
796 add_S(rd
, rn
, rm
, ShiftTypeAndAmount());
799 ALWAYS_INLINE
void ARM_and(RegisterID rd
, RegisterID rn
, ARMThumbImmediate imm
)
803 ASSERT(imm
.isEncodedImm());
804 m_formatter
.twoWordOp5i6Imm4Reg4EncodedImm(OP_AND_imm_T1
, rn
, rd
, imm
);
807 ALWAYS_INLINE
void ARM_and(RegisterID rd
, RegisterID rn
, RegisterID rm
, ShiftTypeAndAmount shift
)
812 m_formatter
.twoWordOp12Reg4FourFours(OP_AND_reg_T2
, rn
, FourFours(shift
.hi4(), rd
, shift
.lo4(), rm
));
815 ALWAYS_INLINE
void ARM_and(RegisterID rd
, RegisterID rn
, RegisterID rm
)
817 if ((rd
== rn
) && !((rd
| rm
) & 8))
818 m_formatter
.oneWordOp10Reg3Reg3(OP_AND_reg_T1
, rm
, rd
);
819 else if ((rd
== rm
) && !((rd
| rn
) & 8))
820 m_formatter
.oneWordOp10Reg3Reg3(OP_AND_reg_T1
, rn
, rd
);
822 ARM_and(rd
, rn
, rm
, ShiftTypeAndAmount());
825 ALWAYS_INLINE
void asr(RegisterID rd
, RegisterID rm
, int32_t shiftAmount
)
829 ShiftTypeAndAmount
shift(SRType_ASR
, shiftAmount
);
830 m_formatter
.twoWordOp16FourFours(OP_ASR_imm_T1
, FourFours(shift
.hi4(), rd
, shift
.lo4(), rm
));
833 ALWAYS_INLINE
void asr(RegisterID rd
, RegisterID rn
, RegisterID rm
)
838 m_formatter
.twoWordOp12Reg4FourFours(OP_ASR_reg_T2
, rn
, FourFours(0xf, rd
, 0, rm
));
841 // Only allowed in IT (if then) block if last instruction.
842 ALWAYS_INLINE AssemblerLabel
b()
844 m_formatter
.twoWordOp16Op16(OP_B_T4a
, OP_B_T4b
);
845 return m_formatter
.label();
848 // Only allowed in IT (if then) block if last instruction.
849 ALWAYS_INLINE AssemblerLabel
blx(RegisterID rm
)
851 ASSERT(rm
!= ARMRegisters::pc
);
852 m_formatter
.oneWordOp8RegReg143(OP_BLX
, rm
, (RegisterID
)8);
853 return m_formatter
.label();
856 // Only allowed in IT (if then) block if last instruction.
857 ALWAYS_INLINE AssemblerLabel
bx(RegisterID rm
)
859 m_formatter
.oneWordOp8RegReg143(OP_BX
, rm
, (RegisterID
)0);
860 return m_formatter
.label();
863 void bkpt(uint8_t imm
=0)
865 m_formatter
.oneWordOp8Imm8(OP_BKPT
, imm
);
868 ALWAYS_INLINE
void clz(RegisterID rd
, RegisterID rm
)
872 m_formatter
.twoWordOp12Reg4FourFours(OP_CLZ
, rm
, FourFours(0xf, rd
, 8, rm
));
875 ALWAYS_INLINE
void cmn(RegisterID rn
, ARMThumbImmediate imm
)
877 ASSERT(rn
!= ARMRegisters::pc
);
878 ASSERT(imm
.isEncodedImm());
880 m_formatter
.twoWordOp5i6Imm4Reg4EncodedImm(OP_CMN_imm
, rn
, (RegisterID
)0xf, imm
);
883 ALWAYS_INLINE
void cmp(RegisterID rn
, ARMThumbImmediate imm
)
885 ASSERT(rn
!= ARMRegisters::pc
);
886 ASSERT(imm
.isEncodedImm());
888 if (!(rn
& 8) && imm
.isUInt8())
889 m_formatter
.oneWordOp5Reg3Imm8(OP_CMP_imm_T1
, rn
, imm
.getUInt8());
891 m_formatter
.twoWordOp5i6Imm4Reg4EncodedImm(OP_CMP_imm_T2
, rn
, (RegisterID
)0xf, imm
);
894 ALWAYS_INLINE
void cmp(RegisterID rn
, RegisterID rm
, ShiftTypeAndAmount shift
)
896 ASSERT(rn
!= ARMRegisters::pc
);
898 m_formatter
.twoWordOp12Reg4FourFours(OP_CMP_reg_T2
, rn
, FourFours(shift
.hi4(), 0xf, shift
.lo4(), rm
));
901 ALWAYS_INLINE
void cmp(RegisterID rn
, RegisterID rm
)
904 cmp(rn
, rm
, ShiftTypeAndAmount());
906 m_formatter
.oneWordOp10Reg3Reg3(OP_CMP_reg_T1
, rm
, rn
);
909 // xor is not spelled with an 'e'. :-(
910 ALWAYS_INLINE
void eor(RegisterID rd
, RegisterID rn
, ARMThumbImmediate imm
)
914 ASSERT(imm
.isEncodedImm());
915 m_formatter
.twoWordOp5i6Imm4Reg4EncodedImm(OP_EOR_imm_T1
, rn
, rd
, imm
);
918 // xor is not spelled with an 'e'. :-(
919 ALWAYS_INLINE
void eor(RegisterID rd
, RegisterID rn
, RegisterID rm
, ShiftTypeAndAmount shift
)
924 m_formatter
.twoWordOp12Reg4FourFours(OP_EOR_reg_T2
, rn
, FourFours(shift
.hi4(), rd
, shift
.lo4(), rm
));
927 // xor is not spelled with an 'e'. :-(
928 void eor(RegisterID rd
, RegisterID rn
, RegisterID rm
)
930 if ((rd
== rn
) && !((rd
| rm
) & 8))
931 m_formatter
.oneWordOp10Reg3Reg3(OP_EOR_reg_T1
, rm
, rd
);
932 else if ((rd
== rm
) && !((rd
| rn
) & 8))
933 m_formatter
.oneWordOp10Reg3Reg3(OP_EOR_reg_T1
, rn
, rd
);
935 eor(rd
, rn
, rm
, ShiftTypeAndAmount());
938 ALWAYS_INLINE
void it(Condition cond
)
940 m_formatter
.oneWordOp8Imm8(OP_IT
, ifThenElse(cond
));
943 ALWAYS_INLINE
void it(Condition cond
, bool inst2if
)
945 m_formatter
.oneWordOp8Imm8(OP_IT
, ifThenElse(cond
, inst2if
));
948 ALWAYS_INLINE
void it(Condition cond
, bool inst2if
, bool inst3if
)
950 m_formatter
.oneWordOp8Imm8(OP_IT
, ifThenElse(cond
, inst2if
, inst3if
));
953 ALWAYS_INLINE
void it(Condition cond
, bool inst2if
, bool inst3if
, bool inst4if
)
955 m_formatter
.oneWordOp8Imm8(OP_IT
, ifThenElse(cond
, inst2if
, inst3if
, inst4if
));
958 // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
959 ALWAYS_INLINE
void ldr(RegisterID rt
, RegisterID rn
, ARMThumbImmediate imm
)
961 ASSERT(rn
!= ARMRegisters::pc
); // LDR (literal)
962 ASSERT(imm
.isUInt12());
964 if (!((rt
| rn
) & 8) && imm
.isUInt7())
965 m_formatter
.oneWordOp5Imm5Reg3Reg3(OP_LDR_imm_T1
, imm
.getUInt7() >> 2, rn
, rt
);
966 else if ((rn
== ARMRegisters::sp
) && !(rt
& 8) && imm
.isUInt10())
967 m_formatter
.oneWordOp5Reg3Imm8(OP_LDR_imm_T2
, rt
, static_cast<uint8_t>(imm
.getUInt10() >> 2));
969 m_formatter
.twoWordOp12Reg4Reg4Imm12(OP_LDR_imm_T3
, rn
, rt
, imm
.getUInt12());
972 ALWAYS_INLINE
void ldrCompact(RegisterID rt
, RegisterID rn
, ARMThumbImmediate imm
)
974 ASSERT(rn
!= ARMRegisters::pc
); // LDR (literal)
975 ASSERT(imm
.isUInt7());
976 ASSERT(!((rt
| rn
) & 8));
977 m_formatter
.oneWordOp5Imm5Reg3Reg3(OP_LDR_imm_T1
, imm
.getUInt7() >> 2, rn
, rt
);
980 // If index is set, this is a regular offset or a pre-indexed load;
981 // if index is not set then is is a post-index load.
983 // If wback is set rn is updated - this is a pre or post index load,
984 // if wback is not set this is a regular offset memory access.
986 // (-255 <= offset <= 255)
988 // _tmp = _reg + offset
989 // MEM[index ? _tmp : _reg] = REG[rt]
990 // if (wback) REG[rn] = _tmp
991 ALWAYS_INLINE
void ldr(RegisterID rt
, RegisterID rn
, int offset
, bool index
, bool wback
)
993 ASSERT(rt
!= ARMRegisters::pc
);
994 ASSERT(rn
!= ARMRegisters::pc
);
995 ASSERT(index
|| wback
);
996 ASSERT(!wback
| (rt
!= rn
));
1003 ASSERT((offset
& ~0xff) == 0);
1005 offset
|= (wback
<< 8);
1006 offset
|= (add
<< 9);
1007 offset
|= (index
<< 10);
1008 offset
|= (1 << 11);
1010 m_formatter
.twoWordOp12Reg4Reg4Imm12(OP_LDR_imm_T4
, rn
, rt
, offset
);
1013 // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
1014 ALWAYS_INLINE
void ldr(RegisterID rt
, RegisterID rn
, RegisterID rm
, unsigned shift
= 0)
1016 ASSERT(rn
!= ARMRegisters::pc
); // LDR (literal)
1017 ASSERT(!BadReg(rm
));
1020 if (!shift
&& !((rt
| rn
| rm
) & 8))
1021 m_formatter
.oneWordOp7Reg3Reg3Reg3(OP_LDR_reg_T1
, rm
, rn
, rt
);
1023 m_formatter
.twoWordOp12Reg4FourFours(OP_LDR_reg_T2
, rn
, FourFours(rt
, 0, shift
, rm
));
1026 // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
1027 ALWAYS_INLINE
void ldrh(RegisterID rt
, RegisterID rn
, ARMThumbImmediate imm
)
1029 ASSERT(rn
!= ARMRegisters::pc
); // LDR (literal)
1030 ASSERT(imm
.isUInt12());
1032 if (!((rt
| rn
) & 8) && imm
.isUInt6())
1033 m_formatter
.oneWordOp5Imm5Reg3Reg3(OP_LDRH_imm_T1
, imm
.getUInt6() >> 2, rn
, rt
);
1035 m_formatter
.twoWordOp12Reg4Reg4Imm12(OP_LDRH_imm_T2
, rn
, rt
, imm
.getUInt12());
1038 // If index is set, this is a regular offset or a pre-indexed load;
1039 // if index is not set then is is a post-index load.
1041 // If wback is set rn is updated - this is a pre or post index load,
1042 // if wback is not set this is a regular offset memory access.
1044 // (-255 <= offset <= 255)
1046 // _tmp = _reg + offset
1047 // MEM[index ? _tmp : _reg] = REG[rt]
1048 // if (wback) REG[rn] = _tmp
1049 ALWAYS_INLINE
void ldrh(RegisterID rt
, RegisterID rn
, int offset
, bool index
, bool wback
)
1051 ASSERT(rt
!= ARMRegisters::pc
);
1052 ASSERT(rn
!= ARMRegisters::pc
);
1053 ASSERT(index
|| wback
);
1054 ASSERT(!wback
| (rt
!= rn
));
1061 ASSERT((offset
& ~0xff) == 0);
1063 offset
|= (wback
<< 8);
1064 offset
|= (add
<< 9);
1065 offset
|= (index
<< 10);
1066 offset
|= (1 << 11);
1068 m_formatter
.twoWordOp12Reg4Reg4Imm12(OP_LDRH_imm_T3
, rn
, rt
, offset
);
1071 ALWAYS_INLINE
void ldrh(RegisterID rt
, RegisterID rn
, RegisterID rm
, unsigned shift
= 0)
1073 ASSERT(!BadReg(rt
)); // Memory hint
1074 ASSERT(rn
!= ARMRegisters::pc
); // LDRH (literal)
1075 ASSERT(!BadReg(rm
));
1078 if (!shift
&& !((rt
| rn
| rm
) & 8))
1079 m_formatter
.oneWordOp7Reg3Reg3Reg3(OP_LDRH_reg_T1
, rm
, rn
, rt
);
1081 m_formatter
.twoWordOp12Reg4FourFours(OP_LDRH_reg_T2
, rn
, FourFours(rt
, 0, shift
, rm
));
1084 void ldrb(RegisterID rt
, RegisterID rn
, ARMThumbImmediate imm
)
1086 ASSERT(rn
!= ARMRegisters::pc
); // LDR (literal)
1087 ASSERT(imm
.isUInt12());
1089 if (!((rt
| rn
) & 8) && imm
.isUInt5())
1090 m_formatter
.oneWordOp5Imm5Reg3Reg3(OP_LDRB_imm_T1
, imm
.getUInt5(), rn
, rt
);
1092 m_formatter
.twoWordOp12Reg4Reg4Imm12(OP_LDRB_imm_T2
, rn
, rt
, imm
.getUInt12());
1095 void ldrb(RegisterID rt
, RegisterID rn
, int offset
, bool index
, bool wback
)
1097 ASSERT(rt
!= ARMRegisters::pc
);
1098 ASSERT(rn
!= ARMRegisters::pc
);
1099 ASSERT(index
|| wback
);
1100 ASSERT(!wback
| (rt
!= rn
));
1108 ASSERT(!(offset
& ~0xff));
1110 offset
|= (wback
<< 8);
1111 offset
|= (add
<< 9);
1112 offset
|= (index
<< 10);
1113 offset
|= (1 << 11);
1115 m_formatter
.twoWordOp12Reg4Reg4Imm12(OP_LDRB_imm_T3
, rn
, rt
, offset
);
1118 ALWAYS_INLINE
void ldrb(RegisterID rt
, RegisterID rn
, RegisterID rm
, unsigned shift
= 0)
1120 ASSERT(rn
!= ARMRegisters::pc
); // LDR (literal)
1121 ASSERT(!BadReg(rm
));
1124 if (!shift
&& !((rt
| rn
| rm
) & 8))
1125 m_formatter
.oneWordOp7Reg3Reg3Reg3(OP_LDRB_reg_T1
, rm
, rn
, rt
);
1127 m_formatter
.twoWordOp12Reg4FourFours(OP_LDRB_reg_T2
, rn
, FourFours(rt
, 0, shift
, rm
));
1130 void lsl(RegisterID rd
, RegisterID rm
, int32_t shiftAmount
)
1132 ASSERT(!BadReg(rd
));
1133 ASSERT(!BadReg(rm
));
1134 ShiftTypeAndAmount
shift(SRType_LSL
, shiftAmount
);
1135 m_formatter
.twoWordOp16FourFours(OP_LSL_imm_T1
, FourFours(shift
.hi4(), rd
, shift
.lo4(), rm
));
1138 ALWAYS_INLINE
void lsl(RegisterID rd
, RegisterID rn
, RegisterID rm
)
1140 ASSERT(!BadReg(rd
));
1141 ASSERT(!BadReg(rn
));
1142 ASSERT(!BadReg(rm
));
1143 m_formatter
.twoWordOp12Reg4FourFours(OP_LSL_reg_T2
, rn
, FourFours(0xf, rd
, 0, rm
));
1146 ALWAYS_INLINE
void lsr(RegisterID rd
, RegisterID rm
, int32_t shiftAmount
)
1148 ASSERT(!BadReg(rd
));
1149 ASSERT(!BadReg(rm
));
1150 ShiftTypeAndAmount
shift(SRType_LSR
, shiftAmount
);
1151 m_formatter
.twoWordOp16FourFours(OP_LSR_imm_T1
, FourFours(shift
.hi4(), rd
, shift
.lo4(), rm
));
1154 ALWAYS_INLINE
void lsr(RegisterID rd
, RegisterID rn
, RegisterID rm
)
1156 ASSERT(!BadReg(rd
));
1157 ASSERT(!BadReg(rn
));
1158 ASSERT(!BadReg(rm
));
1159 m_formatter
.twoWordOp12Reg4FourFours(OP_LSR_reg_T2
, rn
, FourFours(0xf, rd
, 0, rm
));
1162 ALWAYS_INLINE
void movT3(RegisterID rd
, ARMThumbImmediate imm
)
1164 ASSERT(imm
.isValid());
1165 ASSERT(!imm
.isEncodedImm());
1166 ASSERT(!BadReg(rd
));
1168 m_formatter
.twoWordOp5i6Imm4Reg4EncodedImm(OP_MOV_imm_T3
, imm
.m_value
.imm4
, rd
, imm
);
1171 ALWAYS_INLINE
void mov(RegisterID rd
, ARMThumbImmediate imm
)
1173 ASSERT(imm
.isValid());
1174 ASSERT(!BadReg(rd
));
1176 if ((rd
< 8) && imm
.isUInt8())
1177 m_formatter
.oneWordOp5Reg3Imm8(OP_MOV_imm_T1
, rd
, imm
.getUInt8());
1178 else if (imm
.isEncodedImm())
1179 m_formatter
.twoWordOp5i6Imm4Reg4EncodedImm(OP_MOV_imm_T2
, 0xf, rd
, imm
);
1184 ALWAYS_INLINE
void mov(RegisterID rd
, RegisterID rm
)
1186 m_formatter
.oneWordOp8RegReg143(OP_MOV_reg_T1
, rm
, rd
);
1189 ALWAYS_INLINE
void movt(RegisterID rd
, ARMThumbImmediate imm
)
1191 ASSERT(imm
.isUInt16());
1192 ASSERT(!BadReg(rd
));
1193 m_formatter
.twoWordOp5i6Imm4Reg4EncodedImm(OP_MOVT
, imm
.m_value
.imm4
, rd
, imm
);
1196 ALWAYS_INLINE
void mvn(RegisterID rd
, ARMThumbImmediate imm
)
1198 ASSERT(imm
.isEncodedImm());
1199 ASSERT(!BadReg(rd
));
1201 m_formatter
.twoWordOp5i6Imm4Reg4EncodedImm(OP_MVN_imm
, 0xf, rd
, imm
);
1204 ALWAYS_INLINE
void mvn(RegisterID rd
, RegisterID rm
, ShiftTypeAndAmount shift
)
1206 ASSERT(!BadReg(rd
));
1207 ASSERT(!BadReg(rm
));
1208 m_formatter
.twoWordOp16FourFours(OP_MVN_reg_T2
, FourFours(shift
.hi4(), rd
, shift
.lo4(), rm
));
1211 ALWAYS_INLINE
void mvn(RegisterID rd
, RegisterID rm
)
1213 if (!((rd
| rm
) & 8))
1214 m_formatter
.oneWordOp10Reg3Reg3(OP_MVN_reg_T1
, rm
, rd
);
1216 mvn(rd
, rm
, ShiftTypeAndAmount());
1219 ALWAYS_INLINE
void neg(RegisterID rd
, RegisterID rm
)
1221 ARMThumbImmediate zero
= ARMThumbImmediate::makeUInt12(0);
1225 ALWAYS_INLINE
void orr(RegisterID rd
, RegisterID rn
, ARMThumbImmediate imm
)
1227 ASSERT(!BadReg(rd
));
1228 ASSERT(!BadReg(rn
));
1229 ASSERT(imm
.isEncodedImm());
1230 m_formatter
.twoWordOp5i6Imm4Reg4EncodedImm(OP_ORR_imm_T1
, rn
, rd
, imm
);
1233 ALWAYS_INLINE
void orr(RegisterID rd
, RegisterID rn
, RegisterID rm
, ShiftTypeAndAmount shift
)
1235 ASSERT(!BadReg(rd
));
1236 ASSERT(!BadReg(rn
));
1237 ASSERT(!BadReg(rm
));
1238 m_formatter
.twoWordOp12Reg4FourFours(OP_ORR_reg_T2
, rn
, FourFours(shift
.hi4(), rd
, shift
.lo4(), rm
));
1241 void orr(RegisterID rd
, RegisterID rn
, RegisterID rm
)
1243 if ((rd
== rn
) && !((rd
| rm
) & 8))
1244 m_formatter
.oneWordOp10Reg3Reg3(OP_ORR_reg_T1
, rm
, rd
);
1245 else if ((rd
== rm
) && !((rd
| rn
) & 8))
1246 m_formatter
.oneWordOp10Reg3Reg3(OP_ORR_reg_T1
, rn
, rd
);
1248 orr(rd
, rn
, rm
, ShiftTypeAndAmount());
1251 ALWAYS_INLINE
void orr_S(RegisterID rd
, RegisterID rn
, RegisterID rm
, ShiftTypeAndAmount shift
)
1253 ASSERT(!BadReg(rd
));
1254 ASSERT(!BadReg(rn
));
1255 ASSERT(!BadReg(rm
));
1256 m_formatter
.twoWordOp12Reg4FourFours(OP_ORR_S_reg_T2
, rn
, FourFours(shift
.hi4(), rd
, shift
.lo4(), rm
));
1259 void orr_S(RegisterID rd
, RegisterID rn
, RegisterID rm
)
1261 if ((rd
== rn
) && !((rd
| rm
) & 8))
1262 m_formatter
.oneWordOp10Reg3Reg3(OP_ORR_reg_T1
, rm
, rd
);
1263 else if ((rd
== rm
) && !((rd
| rn
) & 8))
1264 m_formatter
.oneWordOp10Reg3Reg3(OP_ORR_reg_T1
, rn
, rd
);
1266 orr_S(rd
, rn
, rm
, ShiftTypeAndAmount());
1269 ALWAYS_INLINE
void ror(RegisterID rd
, RegisterID rm
, int32_t shiftAmount
)
1271 ASSERT(!BadReg(rd
));
1272 ASSERT(!BadReg(rm
));
1273 ShiftTypeAndAmount
shift(SRType_ROR
, shiftAmount
);
1274 m_formatter
.twoWordOp16FourFours(OP_ROR_imm_T1
, FourFours(shift
.hi4(), rd
, shift
.lo4(), rm
));
1277 ALWAYS_INLINE
void ror(RegisterID rd
, RegisterID rn
, RegisterID rm
)
1279 ASSERT(!BadReg(rd
));
1280 ASSERT(!BadReg(rn
));
1281 ASSERT(!BadReg(rm
));
1282 m_formatter
.twoWordOp12Reg4FourFours(OP_ROR_reg_T2
, rn
, FourFours(0xf, rd
, 0, rm
));
1285 ALWAYS_INLINE
void smull(RegisterID rdLo
, RegisterID rdHi
, RegisterID rn
, RegisterID rm
)
1287 ASSERT(!BadReg(rdLo
));
1288 ASSERT(!BadReg(rdHi
));
1289 ASSERT(!BadReg(rn
));
1290 ASSERT(!BadReg(rm
));
1291 ASSERT(rdLo
!= rdHi
);
1292 m_formatter
.twoWordOp12Reg4FourFours(OP_SMULL_T1
, rn
, FourFours(rdLo
, rdHi
, 0, rm
));
1295 // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
1296 ALWAYS_INLINE
void str(RegisterID rt
, RegisterID rn
, ARMThumbImmediate imm
)
1298 ASSERT(rt
!= ARMRegisters::pc
);
1299 ASSERT(rn
!= ARMRegisters::pc
);
1300 ASSERT(imm
.isUInt12());
1302 if (!((rt
| rn
) & 8) && imm
.isUInt7())
1303 m_formatter
.oneWordOp5Imm5Reg3Reg3(OP_STR_imm_T1
, imm
.getUInt7() >> 2, rn
, rt
);
1304 else if ((rn
== ARMRegisters::sp
) && !(rt
& 8) && imm
.isUInt10())
1305 m_formatter
.oneWordOp5Reg3Imm8(OP_STR_imm_T2
, rt
, static_cast<uint8_t>(imm
.getUInt10() >> 2));
1307 m_formatter
.twoWordOp12Reg4Reg4Imm12(OP_STR_imm_T3
, rn
, rt
, imm
.getUInt12());
1310 // If index is set, this is a regular offset or a pre-indexed store;
1311 // if index is not set then is is a post-index store.
1313 // If wback is set rn is updated - this is a pre or post index store,
1314 // if wback is not set this is a regular offset memory access.
1316 // (-255 <= offset <= 255)
1318 // _tmp = _reg + offset
1319 // MEM[index ? _tmp : _reg] = REG[rt]
1320 // if (wback) REG[rn] = _tmp
1321 ALWAYS_INLINE
void str(RegisterID rt
, RegisterID rn
, int offset
, bool index
, bool wback
)
1323 ASSERT(rt
!= ARMRegisters::pc
);
1324 ASSERT(rn
!= ARMRegisters::pc
);
1325 ASSERT(index
|| wback
);
1326 ASSERT(!wback
| (rt
!= rn
));
1333 ASSERT((offset
& ~0xff) == 0);
1335 offset
|= (wback
<< 8);
1336 offset
|= (add
<< 9);
1337 offset
|= (index
<< 10);
1338 offset
|= (1 << 11);
1340 m_formatter
.twoWordOp12Reg4Reg4Imm12(OP_STR_imm_T4
, rn
, rt
, offset
);
1343 // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
1344 ALWAYS_INLINE
void str(RegisterID rt
, RegisterID rn
, RegisterID rm
, unsigned shift
= 0)
1346 ASSERT(rn
!= ARMRegisters::pc
);
1347 ASSERT(!BadReg(rm
));
1350 if (!shift
&& !((rt
| rn
| rm
) & 8))
1351 m_formatter
.oneWordOp7Reg3Reg3Reg3(OP_STR_reg_T1
, rm
, rn
, rt
);
1353 m_formatter
.twoWordOp12Reg4FourFours(OP_STR_reg_T2
, rn
, FourFours(rt
, 0, shift
, rm
));
1356 ALWAYS_INLINE
void sub(RegisterID rd
, RegisterID rn
, ARMThumbImmediate imm
)
1358 // Rd can only be SP if Rn is also SP.
1359 ASSERT((rd
!= ARMRegisters::sp
) || (rn
== ARMRegisters::sp
));
1360 ASSERT(rd
!= ARMRegisters::pc
);
1361 ASSERT(rn
!= ARMRegisters::pc
);
1362 ASSERT(imm
.isValid());
1364 if ((rn
== ARMRegisters::sp
) && (rd
== ARMRegisters::sp
) && imm
.isUInt9()) {
1365 m_formatter
.oneWordOp9Imm7(OP_SUB_SP_imm_T1
, static_cast<uint8_t>(imm
.getUInt9() >> 2));
1367 } else if (!((rd
| rn
) & 8)) {
1368 if (imm
.isUInt3()) {
1369 m_formatter
.oneWordOp7Reg3Reg3Reg3(OP_SUB_imm_T1
, (RegisterID
)imm
.getUInt3(), rn
, rd
);
1371 } else if ((rd
== rn
) && imm
.isUInt8()) {
1372 m_formatter
.oneWordOp5Reg3Imm8(OP_SUB_imm_T2
, rd
, imm
.getUInt8());
1377 if (imm
.isEncodedImm())
1378 m_formatter
.twoWordOp5i6Imm4Reg4EncodedImm(OP_SUB_imm_T3
, rn
, rd
, imm
);
1380 ASSERT(imm
.isUInt12());
1381 m_formatter
.twoWordOp5i6Imm4Reg4EncodedImm(OP_SUB_imm_T4
, rn
, rd
, imm
);
1385 ALWAYS_INLINE
void sub(RegisterID rd
, ARMThumbImmediate imm
, RegisterID rn
)
1387 ASSERT(rd
!= ARMRegisters::pc
);
1388 ASSERT(rn
!= ARMRegisters::pc
);
1389 ASSERT(imm
.isValid());
1390 ASSERT(imm
.isUInt12());
1392 if (!((rd
| rn
) & 8) && !imm
.getUInt12())
1393 m_formatter
.oneWordOp10Reg3Reg3(OP_RSB_imm_T1
, rn
, rd
);
1395 m_formatter
.twoWordOp5i6Imm4Reg4EncodedImm(OP_RSB_imm_T2
, rn
, rd
, imm
);
1398 ALWAYS_INLINE
void sub(RegisterID rd
, RegisterID rn
, RegisterID rm
, ShiftTypeAndAmount shift
)
1400 ASSERT((rd
!= ARMRegisters::sp
) || (rn
== ARMRegisters::sp
));
1401 ASSERT(rd
!= ARMRegisters::pc
);
1402 ASSERT(rn
!= ARMRegisters::pc
);
1403 ASSERT(!BadReg(rm
));
1404 m_formatter
.twoWordOp12Reg4FourFours(OP_SUB_reg_T2
, rn
, FourFours(shift
.hi4(), rd
, shift
.lo4(), rm
));
1407 // NOTE: In an IT block, add doesn't modify the flags register.
1408 ALWAYS_INLINE
void sub(RegisterID rd
, RegisterID rn
, RegisterID rm
)
1410 if (!((rd
| rn
| rm
) & 8))
1411 m_formatter
.oneWordOp7Reg3Reg3Reg3(OP_SUB_reg_T1
, rm
, rn
, rd
);
1413 sub(rd
, rn
, rm
, ShiftTypeAndAmount());
1416 // Not allowed in an IT (if then) block.
1417 void sub_S(RegisterID rd
, RegisterID rn
, ARMThumbImmediate imm
)
1419 // Rd can only be SP if Rn is also SP.
1420 ASSERT((rd
!= ARMRegisters::sp
) || (rn
== ARMRegisters::sp
));
1421 ASSERT(rd
!= ARMRegisters::pc
);
1422 ASSERT(rn
!= ARMRegisters::pc
);
1423 ASSERT(imm
.isValid());
1425 if ((rn
== ARMRegisters::sp
) && (rd
== ARMRegisters::sp
) && imm
.isUInt9()) {
1426 m_formatter
.oneWordOp9Imm7(OP_SUB_SP_imm_T1
, static_cast<uint8_t>(imm
.getUInt9() >> 2));
1428 } else if (!((rd
| rn
) & 8)) {
1429 if (imm
.isUInt3()) {
1430 m_formatter
.oneWordOp7Reg3Reg3Reg3(OP_SUB_imm_T1
, (RegisterID
)imm
.getUInt3(), rn
, rd
);
1432 } else if ((rd
== rn
) && imm
.isUInt8()) {
1433 m_formatter
.oneWordOp5Reg3Imm8(OP_SUB_imm_T2
, rd
, imm
.getUInt8());
1438 m_formatter
.twoWordOp5i6Imm4Reg4EncodedImm(OP_SUB_S_imm_T3
, rn
, rd
, imm
);
1441 // Not allowed in an IT (if then) block?
1442 ALWAYS_INLINE
void sub_S(RegisterID rd
, RegisterID rn
, RegisterID rm
, ShiftTypeAndAmount shift
)
1444 ASSERT((rd
!= ARMRegisters::sp
) || (rn
== ARMRegisters::sp
));
1445 ASSERT(rd
!= ARMRegisters::pc
);
1446 ASSERT(rn
!= ARMRegisters::pc
);
1447 ASSERT(!BadReg(rm
));
1448 m_formatter
.twoWordOp12Reg4FourFours(OP_SUB_S_reg_T2
, rn
, FourFours(shift
.hi4(), rd
, shift
.lo4(), rm
));
1451 // Not allowed in an IT (if then) block.
1452 ALWAYS_INLINE
void sub_S(RegisterID rd
, RegisterID rn
, RegisterID rm
)
1454 if (!((rd
| rn
| rm
) & 8))
1455 m_formatter
.oneWordOp7Reg3Reg3Reg3(OP_SUB_reg_T1
, rm
, rn
, rd
);
1457 sub_S(rd
, rn
, rm
, ShiftTypeAndAmount());
1460 ALWAYS_INLINE
void tst(RegisterID rn
, ARMThumbImmediate imm
)
1462 ASSERT(!BadReg(rn
));
1463 ASSERT(imm
.isEncodedImm());
1465 m_formatter
.twoWordOp5i6Imm4Reg4EncodedImm(OP_TST_imm
, rn
, (RegisterID
)0xf, imm
);
1468 ALWAYS_INLINE
void tst(RegisterID rn
, RegisterID rm
, ShiftTypeAndAmount shift
)
1470 ASSERT(!BadReg(rn
));
1471 ASSERT(!BadReg(rm
));
1472 m_formatter
.twoWordOp12Reg4FourFours(OP_TST_reg_T2
, rn
, FourFours(shift
.hi4(), 0xf, shift
.lo4(), rm
));
1475 ALWAYS_INLINE
void tst(RegisterID rn
, RegisterID rm
)
1478 tst(rn
, rm
, ShiftTypeAndAmount());
1480 m_formatter
.oneWordOp10Reg3Reg3(OP_TST_reg_T1
, rm
, rn
);
1483 void vadd_F64(FPDoubleRegisterID rd
, FPDoubleRegisterID rn
, FPDoubleRegisterID rm
)
1485 m_formatter
.vfpOp(OP_VADD_T2
, OP_VADD_T2b
, true, rn
, rd
, rm
);
1488 void vcmp_F64(FPDoubleRegisterID rd
, FPDoubleRegisterID rm
)
1490 m_formatter
.vfpOp(OP_VCMP
, OP_VCMPb
, true, VFPOperand(4), rd
, rm
);
1493 void vcmpz_F64(FPDoubleRegisterID rd
)
1495 m_formatter
.vfpOp(OP_VCMP
, OP_VCMPb
, true, VFPOperand(5), rd
, VFPOperand(0));
1498 void vcvt_F64_S32(FPDoubleRegisterID rd
, FPSingleRegisterID rm
)
1500 // boolean values are 64bit (toInt, unsigned, roundZero)
1501 m_formatter
.vfpOp(OP_VCVT_FPIVFP
, OP_VCVT_FPIVFPb
, true, vcvtOp(false, false, false), rd
, rm
);
1504 void vcvtr_S32_F64(FPSingleRegisterID rd
, FPDoubleRegisterID rm
)
1506 // boolean values are 64bit (toInt, unsigned, roundZero)
1507 m_formatter
.vfpOp(OP_VCVT_FPIVFP
, OP_VCVT_FPIVFPb
, true, vcvtOp(true, false, true), rd
, rm
);
1510 void vdiv_F64(FPDoubleRegisterID rd
, FPDoubleRegisterID rn
, FPDoubleRegisterID rm
)
1512 m_formatter
.vfpOp(OP_VDIV
, OP_VDIVb
, true, rn
, rd
, rm
);
1515 void vldr(FPDoubleRegisterID rd
, RegisterID rn
, int32_t imm
)
1517 m_formatter
.vfpMemOp(OP_VLDR
, OP_VLDRb
, true, rn
, rd
, imm
);
1520 void vmov(RegisterID rd
, FPSingleRegisterID rn
)
1522 ASSERT(!BadReg(rd
));
1523 m_formatter
.vfpOp(OP_VMOV_CtoS
, OP_VMOV_CtoSb
, false, rn
, rd
, VFPOperand(0));
1526 void vmov(FPSingleRegisterID rd
, RegisterID rn
)
1528 ASSERT(!BadReg(rn
));
1529 m_formatter
.vfpOp(OP_VMOV_StoC
, OP_VMOV_StoCb
, false, rd
, rn
, VFPOperand(0));
1532 void vmrs(RegisterID reg
= ARMRegisters::pc
)
1534 ASSERT(reg
!= ARMRegisters::sp
);
1535 m_formatter
.vfpOp(OP_VMRS
, OP_VMRSb
, false, VFPOperand(1), VFPOperand(0x10 | reg
), VFPOperand(0));
1538 void vmul_F64(FPDoubleRegisterID rd
, FPDoubleRegisterID rn
, FPDoubleRegisterID rm
)
1540 m_formatter
.vfpOp(OP_VMUL_T2
, OP_VMUL_T2b
, true, rn
, rd
, rm
);
1543 void vstr(FPDoubleRegisterID rd
, RegisterID rn
, int32_t imm
)
1545 m_formatter
.vfpMemOp(OP_VSTR
, OP_VSTRb
, true, rn
, rd
, imm
);
1548 void vsub_F64(FPDoubleRegisterID rd
, FPDoubleRegisterID rn
, FPDoubleRegisterID rm
)
1550 m_formatter
.vfpOp(OP_VSUB_T2
, OP_VSUB_T2b
, true, rn
, rd
, rm
);
1555 m_formatter
.oneWordOp8Imm8(OP_NOP_T1
, 0);
1558 AssemblerLabel
label()
1560 return m_formatter
.label();
1563 AssemblerLabel
align(int alignment
)
1565 while (!m_formatter
.isAligned(alignment
))
1571 static void* getRelocatedAddress(void* code
, AssemblerLabel label
)
1573 ASSERT(label
.isSet());
1574 return reinterpret_cast<void*>(reinterpret_cast<ptrdiff_t>(code
) + label
.m_offset
);
1577 static int getDifferenceBetweenLabels(AssemblerLabel a
, AssemblerLabel b
)
1579 return b
.m_offset
- a
.m_offset
;
1582 int executableOffsetFor(int location
)
1586 return static_cast<int32_t*>(m_formatter
.data())[location
/ sizeof(int32_t) - 1];
1589 int jumpSizeDelta(JumpType jumpType
, JumpLinkType jumpLinkType
) { return JUMP_ENUM_SIZE(jumpType
) - JUMP_ENUM_SIZE(jumpLinkType
); }
1591 // Assembler admin methods:
1593 static ALWAYS_INLINE
bool linkRecordSourceComparator(const LinkRecord
& a
, const LinkRecord
& b
)
1595 return a
.from() < b
.from();
1598 bool canCompact(JumpType jumpType
)
1600 // The following cannot be compacted:
1601 // JumpFixed: represents custom jump sequence
1602 // JumpNoConditionFixedSize: represents unconditional jump that must remain a fixed size
1603 // JumpConditionFixedSize: represents conditional jump that must remain a fixed size
1604 return (jumpType
== JumpNoCondition
) || (jumpType
== JumpCondition
);
1607 JumpLinkType
computeJumpType(JumpType jumpType
, const uint8_t* from
, const uint8_t* to
)
1609 if (jumpType
== JumpFixed
)
1612 // for patchable jump we must leave space for the longest code sequence
1613 if (jumpType
== JumpNoConditionFixedSize
)
1615 if (jumpType
== JumpConditionFixedSize
)
1616 return LinkConditionalBX
;
1618 const int paddingSize
= JUMP_ENUM_SIZE(jumpType
);
1619 bool mayTriggerErrata
= false;
1621 if (jumpType
== JumpCondition
) {
1622 // 2-byte conditional T1
1623 const uint16_t* jumpT1Location
= reinterpret_cast<const uint16_t*>(from
- (paddingSize
- JUMP_ENUM_SIZE(LinkJumpT1
)));
1624 if (canBeJumpT1(jumpT1Location
, to
))
1626 // 4-byte conditional T3
1627 const uint16_t* jumpT3Location
= reinterpret_cast<const uint16_t*>(from
- (paddingSize
- JUMP_ENUM_SIZE(LinkJumpT3
)));
1628 if (canBeJumpT3(jumpT3Location
, to
, mayTriggerErrata
)) {
1629 if (!mayTriggerErrata
)
1632 // 4-byte conditional T4 with IT
1633 const uint16_t* conditionalJumpT4Location
=
1634 reinterpret_cast<const uint16_t*>(from
- (paddingSize
- JUMP_ENUM_SIZE(LinkConditionalJumpT4
)));
1635 if (canBeJumpT4(conditionalJumpT4Location
, to
, mayTriggerErrata
)) {
1636 if (!mayTriggerErrata
)
1637 return LinkConditionalJumpT4
;
1640 // 2-byte unconditional T2
1641 const uint16_t* jumpT2Location
= reinterpret_cast<const uint16_t*>(from
- (paddingSize
- JUMP_ENUM_SIZE(LinkJumpT2
)));
1642 if (canBeJumpT2(jumpT2Location
, to
))
1644 // 4-byte unconditional T4
1645 const uint16_t* jumpT4Location
= reinterpret_cast<const uint16_t*>(from
- (paddingSize
- JUMP_ENUM_SIZE(LinkJumpT4
)));
1646 if (canBeJumpT4(jumpT4Location
, to
, mayTriggerErrata
)) {
1647 if (!mayTriggerErrata
)
1650 // use long jump sequence
1654 ASSERT(jumpType
== JumpCondition
);
1655 return LinkConditionalBX
;
1658 JumpLinkType
computeJumpType(LinkRecord
& record
, const uint8_t* from
, const uint8_t* to
)
1660 JumpLinkType linkType
= computeJumpType(record
.type(), from
, to
);
1661 record
.setLinkType(linkType
);
1665 void recordLinkOffsets(int32_t regionStart
, int32_t regionEnd
, int32_t offset
)
1667 int32_t ptr
= regionStart
/ sizeof(int32_t);
1668 const int32_t end
= regionEnd
/ sizeof(int32_t);
1669 int32_t* offsets
= static_cast<int32_t*>(m_formatter
.data());
1671 offsets
[ptr
++] = offset
;
1674 Vector
<LinkRecord
>& jumpsToLink()
1676 std::sort(m_jumpsToLink
.begin(), m_jumpsToLink
.end(), linkRecordSourceComparator
);
1677 return m_jumpsToLink
;
1680 void ALWAYS_INLINE
link(LinkRecord
& record
, uint8_t* from
, uint8_t* to
)
1682 switch (record
.linkType()) {
1684 linkJumpT1(record
.condition(), reinterpret_cast<uint16_t*>(from
), to
);
1687 linkJumpT2(reinterpret_cast<uint16_t*>(from
), to
);
1690 linkJumpT3(record
.condition(), reinterpret_cast<uint16_t*>(from
), to
);
1693 linkJumpT4(reinterpret_cast<uint16_t*>(from
), to
);
1695 case LinkConditionalJumpT4
:
1696 linkConditionalJumpT4(record
.condition(), reinterpret_cast<uint16_t*>(from
), to
);
1698 case LinkConditionalBX
:
1699 linkConditionalBX(record
.condition(), reinterpret_cast<uint16_t*>(from
), to
);
1702 linkBX(reinterpret_cast<uint16_t*>(from
), to
);
1705 ASSERT_NOT_REACHED();
1710 void* unlinkedCode() { return m_formatter
.data(); }
1711 size_t codeSize() const { return m_formatter
.codeSize(); }
1713 static unsigned getCallReturnOffset(AssemblerLabel call
)
1715 ASSERT(call
.isSet());
1716 return call
.m_offset
;
1719 // Linking & patching:
1721 // 'link' and 'patch' methods are for use on unprotected code - such as the code
1722 // within the AssemblerBuffer, and code being patched by the patch buffer. Once
1723 // code has been finalized it is (platform support permitting) within a non-
1724 // writable region of memory; to modify the code in an execute-only execuable
1725 // pool the 'repatch' and 'relink' methods should be used.
1727 void linkJump(AssemblerLabel from
, AssemblerLabel to
, JumpType type
, Condition condition
)
1730 ASSERT(from
.isSet());
1731 m_jumpsToLink
.append(LinkRecord(from
.m_offset
, to
.m_offset
, type
, condition
));
1734 static void linkJump(void* code
, AssemblerLabel from
, void* to
)
1736 ASSERT(from
.isSet());
1738 uint16_t* location
= reinterpret_cast<uint16_t*>(reinterpret_cast<intptr_t>(code
) + from
.m_offset
);
1739 linkJumpAbsolute(location
, to
);
1742 static void linkCall(void* code
, AssemblerLabel from
, void* to
)
1744 ASSERT(!(reinterpret_cast<intptr_t>(code
) & 1));
1745 ASSERT(from
.isSet());
1746 ASSERT(reinterpret_cast<intptr_t>(to
) & 1);
1748 setPointer(reinterpret_cast<uint16_t*>(reinterpret_cast<intptr_t>(code
) + from
.m_offset
) - 1, to
);
1751 static void linkPointer(void* code
, AssemblerLabel where
, void* value
)
1753 setPointer(reinterpret_cast<char*>(code
) + where
.m_offset
, value
);
1756 static void relinkJump(void* from
, void* to
)
1758 ASSERT(!(reinterpret_cast<intptr_t>(from
) & 1));
1759 ASSERT(!(reinterpret_cast<intptr_t>(to
) & 1));
1761 linkJumpAbsolute(reinterpret_cast<uint16_t*>(from
), to
);
1763 ExecutableAllocator::cacheFlush(reinterpret_cast<uint16_t*>(from
) - 5, 5 * sizeof(uint16_t));
1766 static void relinkCall(void* from
, void* to
)
1768 ASSERT(!(reinterpret_cast<intptr_t>(from
) & 1));
1769 ASSERT(reinterpret_cast<intptr_t>(to
) & 1);
1771 setPointer(reinterpret_cast<uint16_t*>(from
) - 1, to
);
1774 static void repatchInt32(void* where
, int32_t value
)
1776 ASSERT(!(reinterpret_cast<intptr_t>(where
) & 1));
1778 setInt32(where
, value
);
1781 static void repatchCompact(void* where
, int32_t value
)
1784 ASSERT(ARMThumbImmediate::makeUInt12(value
).isUInt7());
1785 setUInt7ForLoad(where
, ARMThumbImmediate::makeUInt12(value
));
1788 static void repatchPointer(void* where
, void* value
)
1790 ASSERT(!(reinterpret_cast<intptr_t>(where
) & 1));
1792 setPointer(where
, value
);
1795 static void* readPointer(void* where
)
1797 return reinterpret_cast<void*>(readInt32(where
));
1801 // VFP operations commonly take one or more 5-bit operands, typically representing a
1802 // floating point register number. This will commonly be encoded in the instruction
1803 // in two parts, with one single bit field, and one 4-bit field. In the case of
1804 // double precision operands the high bit of the register number will be encoded
1805 // separately, and for single precision operands the high bit of the register number
1806 // will be encoded individually.
1807 // VFPOperand encapsulates a 5-bit VFP operand, with bits 0..3 containing the 4-bit
1808 // field to be encoded together in the instruction (the low 4-bits of a double
1809 // register number, or the high 4-bits of a single register number), and bit 4
1810 // contains the bit value to be encoded individually.
1812 explicit VFPOperand(uint32_t value
)
1815 ASSERT(!(m_value
& ~0x1f));
1818 VFPOperand(FPDoubleRegisterID reg
)
1823 VFPOperand(RegisterID reg
)
1828 VFPOperand(FPSingleRegisterID reg
)
1829 : m_value(((reg
& 1) << 4) | (reg
>> 1)) // rotate the lowest bit of 'reg' to the top.
1835 return m_value
>> 4;
1840 return m_value
& 0xf;
1846 VFPOperand
vcvtOp(bool toInteger
, bool isUnsigned
, bool isRoundZero
)
1848 // Cannot specify rounding when converting to float.
1849 ASSERT(toInteger
|| !isRoundZero
);
1853 // opc2 indicates both toInteger & isUnsigned.
1854 op
|= isUnsigned
? 0x4 : 0x5;
1855 // 'op' field in instruction is isRoundZero
1859 // 'op' field in instruction is isUnsigned
1863 return VFPOperand(op
);
1866 static void setInt32(void* code
, uint32_t value
)
1868 uint16_t* location
= reinterpret_cast<uint16_t*>(code
);
1869 ASSERT(isMOV_imm_T3(location
- 4) && isMOVT(location
- 2));
1871 ARMThumbImmediate lo16
= ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(value
));
1872 ARMThumbImmediate hi16
= ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(value
>> 16));
1873 location
[-4] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOV_imm_T3
, lo16
);
1874 location
[-3] = twoWordOp5i6Imm4Reg4EncodedImmSecond((location
[-3] >> 8) & 0xf, lo16
);
1875 location
[-2] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOVT
, hi16
);
1876 location
[-1] = twoWordOp5i6Imm4Reg4EncodedImmSecond((location
[-1] >> 8) & 0xf, hi16
);
1878 ExecutableAllocator::cacheFlush(location
- 4, 4 * sizeof(uint16_t));
1881 static int32_t readInt32(void* code
)
1883 uint16_t* location
= reinterpret_cast<uint16_t*>(code
);
1884 ASSERT(isMOV_imm_T3(location
- 4) && isMOVT(location
- 2));
1886 ARMThumbImmediate lo16
;
1887 ARMThumbImmediate hi16
;
1888 decodeTwoWordOp5i6Imm4Reg4EncodedImmFirst(lo16
, location
[-4]);
1889 decodeTwoWordOp5i6Imm4Reg4EncodedImmSecond(lo16
, location
[-3]);
1890 decodeTwoWordOp5i6Imm4Reg4EncodedImmFirst(hi16
, location
[-2]);
1891 decodeTwoWordOp5i6Imm4Reg4EncodedImmSecond(hi16
, location
[-1]);
1892 uint32_t result
= hi16
.asUInt16();
1894 result
|= lo16
.asUInt16();
1895 return static_cast<int32_t>(result
);
1898 static void setUInt7ForLoad(void* code
, ARMThumbImmediate imm
)
1900 // Requires us to have planted a LDR_imm_T1
1901 ASSERT(imm
.isValid());
1902 ASSERT(imm
.isUInt7());
1903 uint16_t* location
= reinterpret_cast<uint16_t*>(code
);
1904 location
[0] |= (imm
.getUInt7() >> 2) << 6;
1905 ExecutableAllocator::cacheFlush(location
, sizeof(uint16_t));
1908 static void setPointer(void* code
, void* value
)
1910 setInt32(code
, reinterpret_cast<uint32_t>(value
));
1913 static bool isB(void* address
)
1915 uint16_t* instruction
= static_cast<uint16_t*>(address
);
1916 return ((instruction
[0] & 0xf800) == OP_B_T4a
) && ((instruction
[1] & 0xd000) == OP_B_T4b
);
1919 static bool isBX(void* address
)
1921 uint16_t* instruction
= static_cast<uint16_t*>(address
);
1922 return (instruction
[0] & 0xff87) == OP_BX
;
1925 static bool isMOV_imm_T3(void* address
)
1927 uint16_t* instruction
= static_cast<uint16_t*>(address
);
1928 return ((instruction
[0] & 0xFBF0) == OP_MOV_imm_T3
) && ((instruction
[1] & 0x8000) == 0);
1931 static bool isMOVT(void* address
)
1933 uint16_t* instruction
= static_cast<uint16_t*>(address
);
1934 return ((instruction
[0] & 0xFBF0) == OP_MOVT
) && ((instruction
[1] & 0x8000) == 0);
1937 static bool isNOP_T1(void* address
)
1939 uint16_t* instruction
= static_cast<uint16_t*>(address
);
1940 return instruction
[0] == OP_NOP_T1
;
1943 static bool isNOP_T2(void* address
)
1945 uint16_t* instruction
= static_cast<uint16_t*>(address
);
1946 return (instruction
[0] == OP_NOP_T2a
) && (instruction
[1] == OP_NOP_T2b
);
1949 static bool canBeJumpT1(const uint16_t* instruction
, const void* target
)
1951 ASSERT(!(reinterpret_cast<intptr_t>(instruction
) & 1));
1952 ASSERT(!(reinterpret_cast<intptr_t>(target
) & 1));
1954 intptr_t relative
= reinterpret_cast<intptr_t>(target
) - (reinterpret_cast<intptr_t>(instruction
));
1955 // It does not appear to be documented in the ARM ARM (big surprise), but
1956 // for OP_B_T1 the branch displacement encoded in the instruction is 2
1957 // less than the actual displacement.
1959 return ((relative
<< 23) >> 23) == relative
;
1962 static bool canBeJumpT2(const uint16_t* instruction
, const void* target
)
1964 ASSERT(!(reinterpret_cast<intptr_t>(instruction
) & 1));
1965 ASSERT(!(reinterpret_cast<intptr_t>(target
) & 1));
1967 intptr_t relative
= reinterpret_cast<intptr_t>(target
) - (reinterpret_cast<intptr_t>(instruction
));
1968 // It does not appear to be documented in the ARM ARM (big surprise), but
1969 // for OP_B_T2 the branch displacement encoded in the instruction is 2
1970 // less than the actual displacement.
1972 return ((relative
<< 20) >> 20) == relative
;
1975 static bool canBeJumpT3(const uint16_t* instruction
, const void* target
, bool& mayTriggerErrata
)
1977 ASSERT(!(reinterpret_cast<intptr_t>(instruction
) & 1));
1978 ASSERT(!(reinterpret_cast<intptr_t>(target
) & 1));
1980 intptr_t relative
= reinterpret_cast<intptr_t>(target
) - (reinterpret_cast<intptr_t>(instruction
));
1981 // From Cortex-A8 errata:
1982 // If the 32-bit Thumb-2 branch instruction spans two 4KiB regions and
1983 // the target of the branch falls within the first region it is
1984 // possible for the processor to incorrectly determine the branch
1985 // instruction, and it is also possible in some cases for the processor
1986 // to enter a deadlock state.
1987 // The instruction is spanning two pages if it ends at an address ending 0x002
1988 bool spansTwo4K
= ((reinterpret_cast<intptr_t>(instruction
) & 0xfff) == 0x002);
1989 mayTriggerErrata
= spansTwo4K
;
1990 // The target is in the first page if the jump branch back by [3..0x1002] bytes
1991 bool targetInFirstPage
= (relative
>= -0x1002) && (relative
< -2);
1992 bool wouldTriggerA8Errata
= spansTwo4K
&& targetInFirstPage
;
1993 return ((relative
<< 11) >> 11) == relative
&& !wouldTriggerA8Errata
;
1996 static bool canBeJumpT4(const uint16_t* instruction
, const void* target
, bool& mayTriggerErrata
)
1998 ASSERT(!(reinterpret_cast<intptr_t>(instruction
) & 1));
1999 ASSERT(!(reinterpret_cast<intptr_t>(target
) & 1));
2001 intptr_t relative
= reinterpret_cast<intptr_t>(target
) - (reinterpret_cast<intptr_t>(instruction
));
2002 // From Cortex-A8 errata:
2003 // If the 32-bit Thumb-2 branch instruction spans two 4KiB regions and
2004 // the target of the branch falls within the first region it is
2005 // possible for the processor to incorrectly determine the branch
2006 // instruction, and it is also possible in some cases for the processor
2007 // to enter a deadlock state.
2008 // The instruction is spanning two pages if it ends at an address ending 0x002
2009 bool spansTwo4K
= ((reinterpret_cast<intptr_t>(instruction
) & 0xfff) == 0x002);
2010 mayTriggerErrata
= spansTwo4K
;
2011 // The target is in the first page if the jump branch back by [3..0x1002] bytes
2012 bool targetInFirstPage
= (relative
>= -0x1002) && (relative
< -2);
2013 bool wouldTriggerA8Errata
= spansTwo4K
&& targetInFirstPage
;
2014 return ((relative
<< 7) >> 7) == relative
&& !wouldTriggerA8Errata
;
2017 void linkJumpT1(Condition cond
, uint16_t* instruction
, void* target
)
2019 // FIMXE: this should be up in the MacroAssembler layer. :-(
2020 ASSERT(!(reinterpret_cast<intptr_t>(instruction
) & 1));
2021 ASSERT(!(reinterpret_cast<intptr_t>(target
) & 1));
2022 ASSERT(canBeJumpT1(instruction
, target
));
2024 intptr_t relative
= reinterpret_cast<intptr_t>(target
) - (reinterpret_cast<intptr_t>(instruction
));
2025 // It does not appear to be documented in the ARM ARM (big surprise), but
2026 // for OP_B_T1 the branch displacement encoded in the instruction is 2
2027 // less than the actual displacement.
2030 // All branch offsets should be an even distance.
2031 ASSERT(!(relative
& 1));
2032 instruction
[-1] = OP_B_T1
| ((cond
& 0xf) << 8) | ((relative
& 0x1fe) >> 1);
2035 static void linkJumpT2(uint16_t* instruction
, void* target
)
2037 // FIMXE: this should be up in the MacroAssembler layer. :-(
2038 ASSERT(!(reinterpret_cast<intptr_t>(instruction
) & 1));
2039 ASSERT(!(reinterpret_cast<intptr_t>(target
) & 1));
2040 ASSERT(canBeJumpT2(instruction
, target
));
2042 intptr_t relative
= reinterpret_cast<intptr_t>(target
) - (reinterpret_cast<intptr_t>(instruction
));
2043 // It does not appear to be documented in the ARM ARM (big surprise), but
2044 // for OP_B_T2 the branch displacement encoded in the instruction is 2
2045 // less than the actual displacement.
2048 // All branch offsets should be an even distance.
2049 ASSERT(!(relative
& 1));
2050 instruction
[-1] = OP_B_T2
| ((relative
& 0xffe) >> 1);
2053 void linkJumpT3(Condition cond
, uint16_t* instruction
, void* target
)
2055 // FIMXE: this should be up in the MacroAssembler layer. :-(
2056 ASSERT(!(reinterpret_cast<intptr_t>(instruction
) & 1));
2057 ASSERT(!(reinterpret_cast<intptr_t>(target
) & 1));
2059 UNUSED_PARAM(scratch
);
2060 ASSERT(canBeJumpT3(instruction
, target
, scratch
));
2062 intptr_t relative
= reinterpret_cast<intptr_t>(target
) - (reinterpret_cast<intptr_t>(instruction
));
2064 // All branch offsets should be an even distance.
2065 ASSERT(!(relative
& 1));
2066 instruction
[-2] = OP_B_T3a
| ((relative
& 0x100000) >> 10) | ((cond
& 0xf) << 6) | ((relative
& 0x3f000) >> 12);
2067 instruction
[-1] = OP_B_T3b
| ((relative
& 0x80000) >> 8) | ((relative
& 0x40000) >> 5) | ((relative
& 0xffe) >> 1);
2070 static void linkJumpT4(uint16_t* instruction
, void* target
)
2072 // FIMXE: this should be up in the MacroAssembler layer. :-(
2073 ASSERT(!(reinterpret_cast<intptr_t>(instruction
) & 1));
2074 ASSERT(!(reinterpret_cast<intptr_t>(target
) & 1));
2076 UNUSED_PARAM(scratch
);
2077 ASSERT(canBeJumpT4(instruction
, target
, scratch
));
2079 intptr_t relative
= reinterpret_cast<intptr_t>(target
) - (reinterpret_cast<intptr_t>(instruction
));
2080 // ARM encoding for the top two bits below the sign bit is 'peculiar'.
2082 relative
^= 0xC00000;
2084 // All branch offsets should be an even distance.
2085 ASSERT(!(relative
& 1));
2086 instruction
[-2] = OP_B_T4a
| ((relative
& 0x1000000) >> 14) | ((relative
& 0x3ff000) >> 12);
2087 instruction
[-1] = OP_B_T4b
| ((relative
& 0x800000) >> 10) | ((relative
& 0x400000) >> 11) | ((relative
& 0xffe) >> 1);
2090 void linkConditionalJumpT4(Condition cond
, uint16_t* instruction
, void* target
)
2092 // FIMXE: this should be up in the MacroAssembler layer. :-(
2093 ASSERT(!(reinterpret_cast<intptr_t>(instruction
) & 1));
2094 ASSERT(!(reinterpret_cast<intptr_t>(target
) & 1));
2096 instruction
[-3] = ifThenElse(cond
) | OP_IT
;
2097 linkJumpT4(instruction
, target
);
2100 static void linkBX(uint16_t* instruction
, void* target
)
2102 // FIMXE: this should be up in the MacroAssembler layer. :-(
2103 ASSERT(!(reinterpret_cast<intptr_t>(instruction
) & 1));
2104 ASSERT(!(reinterpret_cast<intptr_t>(target
) & 1));
2106 const uint16_t JUMP_TEMPORARY_REGISTER
= ARMRegisters::ip
;
2107 ARMThumbImmediate lo16
= ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(reinterpret_cast<uint32_t>(target
) + 1));
2108 ARMThumbImmediate hi16
= ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(reinterpret_cast<uint32_t>(target
) >> 16));
2109 instruction
[-5] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOV_imm_T3
, lo16
);
2110 instruction
[-4] = twoWordOp5i6Imm4Reg4EncodedImmSecond(JUMP_TEMPORARY_REGISTER
, lo16
);
2111 instruction
[-3] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOVT
, hi16
);
2112 instruction
[-2] = twoWordOp5i6Imm4Reg4EncodedImmSecond(JUMP_TEMPORARY_REGISTER
, hi16
);
2113 instruction
[-1] = OP_BX
| (JUMP_TEMPORARY_REGISTER
<< 3);
2116 void linkConditionalBX(Condition cond
, uint16_t* instruction
, void* target
)
2118 // FIMXE: this should be up in the MacroAssembler layer. :-(
2119 ASSERT(!(reinterpret_cast<intptr_t>(instruction
) & 1));
2120 ASSERT(!(reinterpret_cast<intptr_t>(target
) & 1));
2122 linkBX(instruction
, target
);
2123 instruction
[-6] = ifThenElse(cond
, true, true) | OP_IT
;
2126 static void linkJumpAbsolute(uint16_t* instruction
, void* target
)
2128 // FIMXE: this should be up in the MacroAssembler layer. :-(
2129 ASSERT(!(reinterpret_cast<intptr_t>(instruction
) & 1));
2130 ASSERT(!(reinterpret_cast<intptr_t>(target
) & 1));
2132 ASSERT((isMOV_imm_T3(instruction
- 5) && isMOVT(instruction
- 3) && isBX(instruction
- 1))
2133 || (isNOP_T1(instruction
- 5) && isNOP_T2(instruction
- 4) && isB(instruction
- 2)));
2136 if (canBeJumpT4(instruction
, target
, scratch
)) {
2137 // There may be a better way to fix this, but right now put the NOPs first, since in the
2138 // case of an conditional branch this will be coming after an ITTT predicating *three*
2139 // instructions! Looking backwards to modify the ITTT to an IT is not easy, due to
2140 // variable wdith encoding - the previous instruction might *look* like an ITTT but
2141 // actually be the second half of a 2-word op.
2142 instruction
[-5] = OP_NOP_T1
;
2143 instruction
[-4] = OP_NOP_T2a
;
2144 instruction
[-3] = OP_NOP_T2b
;
2145 linkJumpT4(instruction
, target
);
2147 const uint16_t JUMP_TEMPORARY_REGISTER
= ARMRegisters::ip
;
2148 ARMThumbImmediate lo16
= ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(reinterpret_cast<uint32_t>(target
) + 1));
2149 ARMThumbImmediate hi16
= ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(reinterpret_cast<uint32_t>(target
) >> 16));
2150 instruction
[-5] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOV_imm_T3
, lo16
);
2151 instruction
[-4] = twoWordOp5i6Imm4Reg4EncodedImmSecond(JUMP_TEMPORARY_REGISTER
, lo16
);
2152 instruction
[-3] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOVT
, hi16
);
2153 instruction
[-2] = twoWordOp5i6Imm4Reg4EncodedImmSecond(JUMP_TEMPORARY_REGISTER
, hi16
);
2154 instruction
[-1] = OP_BX
| (JUMP_TEMPORARY_REGISTER
<< 3);
2158 static uint16_t twoWordOp5i6Imm4Reg4EncodedImmFirst(uint16_t op
, ARMThumbImmediate imm
)
2160 return op
| (imm
.m_value
.i
<< 10) | imm
.m_value
.imm4
;
2163 static void decodeTwoWordOp5i6Imm4Reg4EncodedImmFirst(ARMThumbImmediate
& result
, uint16_t value
)
2165 result
.m_value
.i
= (value
>> 10) & 1;
2166 result
.m_value
.imm4
= value
& 15;
2169 static uint16_t twoWordOp5i6Imm4Reg4EncodedImmSecond(uint16_t rd
, ARMThumbImmediate imm
)
2171 return (imm
.m_value
.imm3
<< 12) | (rd
<< 8) | imm
.m_value
.imm8
;
2174 static void decodeTwoWordOp5i6Imm4Reg4EncodedImmSecond(ARMThumbImmediate
& result
, uint16_t value
)
2176 result
.m_value
.imm3
= (value
>> 12) & 7;
2177 result
.m_value
.imm8
= value
& 255;
2180 class ARMInstructionFormatter
{
2182 ALWAYS_INLINE
void oneWordOp5Reg3Imm8(OpcodeID op
, RegisterID rd
, uint8_t imm
)
2184 m_buffer
.putShort(op
| (rd
<< 8) | imm
);
2187 ALWAYS_INLINE
void oneWordOp5Imm5Reg3Reg3(OpcodeID op
, uint8_t imm
, RegisterID reg1
, RegisterID reg2
)
2189 m_buffer
.putShort(op
| (imm
<< 6) | (reg1
<< 3) | reg2
);
2192 ALWAYS_INLINE
void oneWordOp7Reg3Reg3Reg3(OpcodeID op
, RegisterID reg1
, RegisterID reg2
, RegisterID reg3
)
2194 m_buffer
.putShort(op
| (reg1
<< 6) | (reg2
<< 3) | reg3
);
2197 ALWAYS_INLINE
void oneWordOp8Imm8(OpcodeID op
, uint8_t imm
)
2199 m_buffer
.putShort(op
| imm
);
2202 ALWAYS_INLINE
void oneWordOp8RegReg143(OpcodeID op
, RegisterID reg1
, RegisterID reg2
)
2204 m_buffer
.putShort(op
| ((reg2
& 8) << 4) | (reg1
<< 3) | (reg2
& 7));
2207 ALWAYS_INLINE
void oneWordOp9Imm7(OpcodeID op
, uint8_t imm
)
2209 m_buffer
.putShort(op
| imm
);
2212 ALWAYS_INLINE
void oneWordOp10Reg3Reg3(OpcodeID op
, RegisterID reg1
, RegisterID reg2
)
2214 m_buffer
.putShort(op
| (reg1
<< 3) | reg2
);
2217 ALWAYS_INLINE
void twoWordOp12Reg4FourFours(OpcodeID1 op
, RegisterID reg
, FourFours ff
)
2219 m_buffer
.putShort(op
| reg
);
2220 m_buffer
.putShort(ff
.m_u
.value
);
2223 ALWAYS_INLINE
void twoWordOp16FourFours(OpcodeID1 op
, FourFours ff
)
2225 m_buffer
.putShort(op
);
2226 m_buffer
.putShort(ff
.m_u
.value
);
2229 ALWAYS_INLINE
void twoWordOp16Op16(OpcodeID1 op1
, OpcodeID2 op2
)
2231 m_buffer
.putShort(op1
);
2232 m_buffer
.putShort(op2
);
2235 ALWAYS_INLINE
void twoWordOp5i6Imm4Reg4EncodedImm(OpcodeID1 op
, int imm4
, RegisterID rd
, ARMThumbImmediate imm
)
2237 ARMThumbImmediate newImm
= imm
;
2238 newImm
.m_value
.imm4
= imm4
;
2240 m_buffer
.putShort(ARMv7Assembler::twoWordOp5i6Imm4Reg4EncodedImmFirst(op
, newImm
));
2241 m_buffer
.putShort(ARMv7Assembler::twoWordOp5i6Imm4Reg4EncodedImmSecond(rd
, newImm
));
2244 ALWAYS_INLINE
void twoWordOp12Reg4Reg4Imm12(OpcodeID1 op
, RegisterID reg1
, RegisterID reg2
, uint16_t imm
)
2246 m_buffer
.putShort(op
| reg1
);
2247 m_buffer
.putShort((reg2
<< 12) | imm
);
2250 // Formats up instructions of the pattern:
2251 // 111111111B11aaaa:bbbb222SA2C2cccc
2252 // Where 1s in the pattern come from op1, 2s in the pattern come from op2, S is the provided size bit.
2253 // Operands provide 5 bit values of the form Aaaaa, Bbbbb, Ccccc.
2254 ALWAYS_INLINE
void vfpOp(OpcodeID1 op1
, OpcodeID2 op2
, bool size
, VFPOperand a
, VFPOperand b
, VFPOperand c
)
2256 ASSERT(!(op1
& 0x004f));
2257 ASSERT(!(op2
& 0xf1af));
2258 m_buffer
.putShort(op1
| b
.bits1() << 6 | a
.bits4());
2259 m_buffer
.putShort(op2
| b
.bits4() << 12 | size
<< 8 | a
.bits1() << 7 | c
.bits1() << 5 | c
.bits4());
2262 // Arm vfp addresses can be offset by a 9-bit ones-comp immediate, left shifted by 2.
2263 // (i.e. +/-(0..255) 32-bit words)
2264 ALWAYS_INLINE
void vfpMemOp(OpcodeID1 op1
, OpcodeID2 op2
, bool size
, RegisterID rn
, VFPOperand rd
, int32_t imm
)
2272 uint32_t offset
= imm
;
2273 ASSERT(!(offset
& ~0x3fc));
2276 m_buffer
.putShort(op1
| (up
<< 7) | rd
.bits1() << 6 | rn
);
2277 m_buffer
.putShort(op2
| rd
.bits4() << 12 | size
<< 8 | offset
);
2280 // Administrative methods:
2282 size_t codeSize() const { return m_buffer
.codeSize(); }
2283 AssemblerLabel
label() const { return m_buffer
.label(); }
2284 bool isAligned(int alignment
) const { return m_buffer
.isAligned(alignment
); }
2285 void* data() const { return m_buffer
.data(); }
2288 unsigned debugOffset() { return m_buffer
.debugOffset(); }
2292 AssemblerBuffer m_buffer
;
2295 Vector
<LinkRecord
> m_jumpsToLink
;
2296 Vector
<int32_t> m_offsets
;
2301 #endif // ENABLE(ASSEMBLER) && CPU(ARM_THUMB2)
2303 #endif // ARMAssembler_h