2 * Copyright (C) 2009, 2010, 2012, 2013, 2014 Apple Inc. All rights reserved.
3 * Copyright (C) 2010 University of Szeged
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
15 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
18 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
19 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
20 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
21 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
22 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
24 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #ifndef ARMAssembler_h
28 #define ARMAssembler_h
30 #if ENABLE(ASSEMBLER) && CPU(ARM_THUMB2)
32 #include "AssemblerBuffer.h"
34 #include <wtf/Assertions.h>
35 #include <wtf/Vector.h>
40 namespace ARMRegisters
{
49 r7
, fp
= r7
, // frame pointer
51 r9
, sb
= r9
, // static base
52 r10
, sl
= r10
, // stack limit
128 } FPDoubleRegisterID
;
165 inline FPSingleRegisterID
asSingle(FPDoubleRegisterID reg
)
168 return (FPSingleRegisterID
)(reg
<< 1);
171 inline FPDoubleRegisterID
asDouble(FPSingleRegisterID reg
)
174 return (FPDoubleRegisterID
)(reg
>> 1);
178 #define FOR_EACH_CPU_REGISTER(V) \
179 FOR_EACH_CPU_GPREGISTER(V) \
180 FOR_EACH_CPU_SPECIAL_REGISTER(V) \
181 FOR_EACH_CPU_FPREGISTER(V)
183 #define FOR_EACH_CPU_GPREGISTER(V) \
201 #define FOR_EACH_CPU_SPECIAL_REGISTER(V) \
205 #define FOR_EACH_CPU_FPREGISTER(V) \
222 FOR_EACH_CPU_FPREGISTER_EXTENSION(V)
224 #if CPU(APPLE_ARMV7S)
225 #define FOR_EACH_CPU_FPREGISTER_EXTENSION(V) \
243 #define FOR_EACH_CPU_FPREGISTER_EXTENSION(V) // Nothing to add.
244 #endif // CPU(APPLE_ARMV7S)
246 #endif // USE(MASM_PROBE)
249 class ARMv7Assembler
;
250 class ARMThumbImmediate
{
251 friend class ARMv7Assembler
;
253 typedef uint8_t ThumbImmediateType
;
254 static const ThumbImmediateType TypeInvalid
= 0;
255 static const ThumbImmediateType TypeEncoded
= 1;
256 static const ThumbImmediateType TypeUInt16
= 2;
266 // If this is an encoded immediate, then it may describe a shift, or a pattern.
268 unsigned shiftValue7
: 7;
269 unsigned shiftAmount
: 5;
272 unsigned immediate
: 8;
273 unsigned pattern
: 4;
275 } ThumbImmediateValue
;
277 // byte0 contains least significant bit; not using an array to make client code endian agnostic.
288 ALWAYS_INLINE
static void countLeadingZerosPartial(uint32_t& value
, int32_t& zeros
, const int N
)
290 if (value
& ~((1 << N
) - 1)) /* check for any of the top N bits (of 2N bits) are set */
291 value
>>= N
; /* if any were set, lose the bottom N */
292 else /* if none of the top N bits are set, */
293 zeros
+= N
; /* then we have identified N leading zeros */
296 static int32_t countLeadingZeros(uint32_t value
)
302 countLeadingZerosPartial(value
, zeros
, 16);
303 countLeadingZerosPartial(value
, zeros
, 8);
304 countLeadingZerosPartial(value
, zeros
, 4);
305 countLeadingZerosPartial(value
, zeros
, 2);
306 countLeadingZerosPartial(value
, zeros
, 1);
311 : m_type(TypeInvalid
)
316 ARMThumbImmediate(ThumbImmediateType type
, ThumbImmediateValue value
)
322 ARMThumbImmediate(ThumbImmediateType type
, uint16_t value
)
325 // Make sure this constructor is only reached with type TypeUInt16;
326 // this extra parameter makes the code a little clearer by making it
327 // explicit at call sites which type is being constructed
328 ASSERT_UNUSED(type
, type
== TypeUInt16
);
330 m_value
.asInt
= value
;
334 static ARMThumbImmediate
makeEncodedImm(uint32_t value
)
336 ThumbImmediateValue encoding
;
339 // okay, these are easy.
341 encoding
.immediate
= value
;
342 encoding
.pattern
= 0;
343 return ARMThumbImmediate(TypeEncoded
, encoding
);
346 int32_t leadingZeros
= countLeadingZeros(value
);
347 // if there were 24 or more leading zeros, then we'd have hit the (value < 256) case.
348 ASSERT(leadingZeros
< 24);
350 // Given a number with bit fields Z:B:C, where count(Z)+count(B)+count(C) == 32,
351 // Z are the bits known zero, B is the 8-bit immediate, C are the bits to check for
352 // zero. count(B) == 8, so the count of bits to be checked is 24 - count(Z).
353 int32_t rightShiftAmount
= 24 - leadingZeros
;
354 if (value
== ((value
>> rightShiftAmount
) << rightShiftAmount
)) {
355 // Shift the value down to the low byte position. The assign to
356 // shiftValue7 drops the implicit top bit.
357 encoding
.shiftValue7
= value
>> rightShiftAmount
;
358 // The endoded shift amount is the magnitude of a right rotate.
359 encoding
.shiftAmount
= 8 + leadingZeros
;
360 return ARMThumbImmediate(TypeEncoded
, encoding
);
366 if ((bytes
.byte0
== bytes
.byte1
) && (bytes
.byte0
== bytes
.byte2
) && (bytes
.byte0
== bytes
.byte3
)) {
367 encoding
.immediate
= bytes
.byte0
;
368 encoding
.pattern
= 3;
369 return ARMThumbImmediate(TypeEncoded
, encoding
);
372 if ((bytes
.byte0
== bytes
.byte2
) && !(bytes
.byte1
| bytes
.byte3
)) {
373 encoding
.immediate
= bytes
.byte0
;
374 encoding
.pattern
= 1;
375 return ARMThumbImmediate(TypeEncoded
, encoding
);
378 if ((bytes
.byte1
== bytes
.byte3
) && !(bytes
.byte0
| bytes
.byte2
)) {
379 encoding
.immediate
= bytes
.byte1
;
380 encoding
.pattern
= 2;
381 return ARMThumbImmediate(TypeEncoded
, encoding
);
384 return ARMThumbImmediate();
387 static ARMThumbImmediate
makeUInt12(int32_t value
)
389 return (!(value
& 0xfffff000))
390 ? ARMThumbImmediate(TypeUInt16
, (uint16_t)value
)
391 : ARMThumbImmediate();
394 static ARMThumbImmediate
makeUInt12OrEncodedImm(int32_t value
)
396 // If this is not a 12-bit unsigned it, try making an encoded immediate.
397 return (!(value
& 0xfffff000))
398 ? ARMThumbImmediate(TypeUInt16
, (uint16_t)value
)
399 : makeEncodedImm(value
);
402 // The 'make' methods, above, return a !isValid() value if the argument
403 // cannot be represented as the requested type. This methods is called
404 // 'get' since the argument can always be represented.
405 static ARMThumbImmediate
makeUInt16(uint16_t value
)
407 return ARMThumbImmediate(TypeUInt16
, value
);
412 return m_type
!= TypeInvalid
;
415 uint16_t asUInt16() const { return m_value
.asInt
; }
417 // These methods rely on the format of encoded byte values.
418 bool isUInt3() { return !(m_value
.asInt
& 0xfff8); }
419 bool isUInt4() { return !(m_value
.asInt
& 0xfff0); }
420 bool isUInt5() { return !(m_value
.asInt
& 0xffe0); }
421 bool isUInt6() { return !(m_value
.asInt
& 0xffc0); }
422 bool isUInt7() { return !(m_value
.asInt
& 0xff80); }
423 bool isUInt8() { return !(m_value
.asInt
& 0xff00); }
424 bool isUInt9() { return (m_type
== TypeUInt16
) && !(m_value
.asInt
& 0xfe00); }
425 bool isUInt10() { return (m_type
== TypeUInt16
) && !(m_value
.asInt
& 0xfc00); }
426 bool isUInt12() { return (m_type
== TypeUInt16
) && !(m_value
.asInt
& 0xf000); }
427 bool isUInt16() { return m_type
== TypeUInt16
; }
428 uint8_t getUInt3() { ASSERT(isUInt3()); return m_value
.asInt
; }
429 uint8_t getUInt4() { ASSERT(isUInt4()); return m_value
.asInt
; }
430 uint8_t getUInt5() { ASSERT(isUInt5()); return m_value
.asInt
; }
431 uint8_t getUInt6() { ASSERT(isUInt6()); return m_value
.asInt
; }
432 uint8_t getUInt7() { ASSERT(isUInt7()); return m_value
.asInt
; }
433 uint8_t getUInt8() { ASSERT(isUInt8()); return m_value
.asInt
; }
434 uint16_t getUInt9() { ASSERT(isUInt9()); return m_value
.asInt
; }
435 uint16_t getUInt10() { ASSERT(isUInt10()); return m_value
.asInt
; }
436 uint16_t getUInt12() { ASSERT(isUInt12()); return m_value
.asInt
; }
437 uint16_t getUInt16() { ASSERT(isUInt16()); return m_value
.asInt
; }
439 bool isEncodedImm() { return m_type
== TypeEncoded
; }
442 ThumbImmediateType m_type
;
443 ThumbImmediateValue m_value
;
452 SRType_RRX
= SRType_ROR
455 class ShiftTypeAndAmount
{
456 friend class ARMv7Assembler
;
461 m_u
.type
= (ARMShiftType
)0;
465 ShiftTypeAndAmount(ARMShiftType type
, unsigned amount
)
468 m_u
.amount
= amount
& 31;
471 unsigned lo4() { return m_u
.lo4
; }
472 unsigned hi4() { return m_u
.hi4
; }
487 class ARMv7Assembler
{
489 typedef ARMRegisters::RegisterID RegisterID
;
490 typedef ARMRegisters::FPSingleRegisterID FPSingleRegisterID
;
491 typedef ARMRegisters::FPDoubleRegisterID FPDoubleRegisterID
;
492 typedef ARMRegisters::FPQuadRegisterID FPQuadRegisterID
;
493 typedef FPDoubleRegisterID FPRegisterID
;
495 static RegisterID
firstRegister() { return ARMRegisters::r0
; }
496 static RegisterID
lastRegister() { return ARMRegisters::r13
; }
498 static FPRegisterID
firstFPRegister() { return ARMRegisters::d0
; }
499 static FPRegisterID
lastFPRegister() { return ARMRegisters::d31
; }
501 // (HS, LO, HI, LS) -> (AE, B, A, BE)
502 // (VS, VC) -> (O, NO)
504 ConditionEQ
, // Zero / Equal.
505 ConditionNE
, // Non-zero / Not equal.
506 ConditionHS
, ConditionCS
= ConditionHS
, // Unsigned higher or same.
507 ConditionLO
, ConditionCC
= ConditionLO
, // Unsigned lower.
508 ConditionMI
, // Negative.
509 ConditionPL
, // Positive or zero.
510 ConditionVS
, // Overflowed.
511 ConditionVC
, // Not overflowed.
512 ConditionHI
, // Unsigned higher.
513 ConditionLS
, // Unsigned lower or same.
514 ConditionGE
, // Signed greater than or equal.
515 ConditionLT
, // Signed less than.
516 ConditionGT
, // Signed greater than.
517 ConditionLE
, // Signed less than or equal.
518 ConditionAL
, // Unconditional / Always execute.
522 #define JUMP_ENUM_WITH_SIZE(index, value) (((value) << 3) | (index))
523 #define JUMP_ENUM_SIZE(jump) ((jump) >> 3)
524 enum JumpType
{ JumpFixed
= JUMP_ENUM_WITH_SIZE(0, 0),
525 JumpNoCondition
= JUMP_ENUM_WITH_SIZE(1, 5 * sizeof(uint16_t)),
526 JumpCondition
= JUMP_ENUM_WITH_SIZE(2, 6 * sizeof(uint16_t)),
527 JumpNoConditionFixedSize
= JUMP_ENUM_WITH_SIZE(3, 5 * sizeof(uint16_t)),
528 JumpConditionFixedSize
= JUMP_ENUM_WITH_SIZE(4, 6 * sizeof(uint16_t))
531 LinkInvalid
= JUMP_ENUM_WITH_SIZE(0, 0),
532 LinkJumpT1
= JUMP_ENUM_WITH_SIZE(1, sizeof(uint16_t)),
533 LinkJumpT2
= JUMP_ENUM_WITH_SIZE(2, sizeof(uint16_t)),
534 LinkJumpT3
= JUMP_ENUM_WITH_SIZE(3, 2 * sizeof(uint16_t)),
535 LinkJumpT4
= JUMP_ENUM_WITH_SIZE(4, 2 * sizeof(uint16_t)),
536 LinkConditionalJumpT4
= JUMP_ENUM_WITH_SIZE(5, 3 * sizeof(uint16_t)),
537 LinkBX
= JUMP_ENUM_WITH_SIZE(6, 5 * sizeof(uint16_t)),
538 LinkConditionalBX
= JUMP_ENUM_WITH_SIZE(7, 6 * sizeof(uint16_t))
543 LinkRecord(intptr_t from
, intptr_t to
, JumpType type
, Condition condition
)
545 data
.realTypes
.m_from
= from
;
546 data
.realTypes
.m_to
= to
;
547 data
.realTypes
.m_type
= type
;
548 data
.realTypes
.m_linkType
= LinkInvalid
;
549 data
.realTypes
.m_condition
= condition
;
551 void operator=(const LinkRecord
& other
)
553 data
.copyTypes
.content
[0] = other
.data
.copyTypes
.content
[0];
554 data
.copyTypes
.content
[1] = other
.data
.copyTypes
.content
[1];
555 data
.copyTypes
.content
[2] = other
.data
.copyTypes
.content
[2];
557 intptr_t from() const { return data
.realTypes
.m_from
; }
558 void setFrom(intptr_t from
) { data
.realTypes
.m_from
= from
; }
559 intptr_t to() const { return data
.realTypes
.m_to
; }
560 JumpType
type() const { return data
.realTypes
.m_type
; }
561 JumpLinkType
linkType() const { return data
.realTypes
.m_linkType
; }
562 void setLinkType(JumpLinkType linkType
) { ASSERT(data
.realTypes
.m_linkType
== LinkInvalid
); data
.realTypes
.m_linkType
= linkType
; }
563 Condition
condition() const { return data
.realTypes
.m_condition
; }
567 intptr_t m_from
: 31;
570 JumpLinkType m_linkType
: 8;
571 Condition m_condition
: 16;
576 COMPILE_ASSERT(sizeof(RealTypes
) == sizeof(CopyTypes
), LinkRecordCopyStructSizeEqualsRealStruct
);
581 : m_indexOfLastWatchpoint(INT_MIN
)
582 , m_indexOfTailOfLastWatchpoint(INT_MIN
)
586 AssemblerBuffer
& buffer() { return m_formatter
.m_buffer
; }
591 static bool BadReg(RegisterID reg
)
593 return (reg
== ARMRegisters::sp
) || (reg
== ARMRegisters::pc
);
596 uint32_t singleRegisterMask(FPSingleRegisterID rdNum
, int highBitsShift
, int lowBitShift
)
598 uint32_t rdMask
= (rdNum
>> 1) << highBitsShift
;
600 rdMask
|= 1 << lowBitShift
;
604 uint32_t doubleRegisterMask(FPDoubleRegisterID rdNum
, int highBitShift
, int lowBitsShift
)
606 uint32_t rdMask
= (rdNum
& 0xf) << lowBitsShift
;
608 rdMask
|= 1 << highBitShift
;
613 OP_ADD_reg_T1
= 0x1800,
614 OP_SUB_reg_T1
= 0x1A00,
615 OP_ADD_imm_T1
= 0x1C00,
616 OP_SUB_imm_T1
= 0x1E00,
617 OP_MOV_imm_T1
= 0x2000,
618 OP_CMP_imm_T1
= 0x2800,
619 OP_ADD_imm_T2
= 0x3000,
620 OP_SUB_imm_T2
= 0x3800,
621 OP_AND_reg_T1
= 0x4000,
622 OP_EOR_reg_T1
= 0x4040,
623 OP_TST_reg_T1
= 0x4200,
624 OP_RSB_imm_T1
= 0x4240,
625 OP_CMP_reg_T1
= 0x4280,
626 OP_ORR_reg_T1
= 0x4300,
627 OP_MVN_reg_T1
= 0x43C0,
628 OP_ADD_reg_T2
= 0x4400,
629 OP_MOV_reg_T1
= 0x4600,
632 OP_STR_reg_T1
= 0x5000,
633 OP_STRH_reg_T1
= 0x5200,
634 OP_STRB_reg_T1
= 0x5400,
635 OP_LDRSB_reg_T1
= 0x5600,
636 OP_LDR_reg_T1
= 0x5800,
637 OP_LDRH_reg_T1
= 0x5A00,
638 OP_LDRB_reg_T1
= 0x5C00,
639 OP_LDRSH_reg_T1
= 0x5E00,
640 OP_STR_imm_T1
= 0x6000,
641 OP_LDR_imm_T1
= 0x6800,
642 OP_STRB_imm_T1
= 0x7000,
643 OP_LDRB_imm_T1
= 0x7800,
644 OP_STRH_imm_T1
= 0x8000,
645 OP_LDRH_imm_T1
= 0x8800,
646 OP_STR_imm_T2
= 0x9000,
647 OP_LDR_imm_T2
= 0x9800,
648 OP_ADD_SP_imm_T1
= 0xA800,
649 OP_ADD_SP_imm_T2
= 0xB000,
650 OP_SUB_SP_imm_T1
= 0xB080,
663 OP_AND_reg_T2
= 0xEA00,
664 OP_TST_reg_T2
= 0xEA10,
665 OP_ORR_reg_T2
= 0xEA40,
666 OP_ORR_S_reg_T2
= 0xEA50,
667 OP_ASR_imm_T1
= 0xEA4F,
668 OP_LSL_imm_T1
= 0xEA4F,
669 OP_LSR_imm_T1
= 0xEA4F,
670 OP_ROR_imm_T1
= 0xEA4F,
671 OP_MVN_reg_T2
= 0xEA6F,
672 OP_EOR_reg_T2
= 0xEA80,
673 OP_ADD_reg_T3
= 0xEB00,
674 OP_ADD_S_reg_T3
= 0xEB10,
675 OP_SUB_reg_T2
= 0xEBA0,
676 OP_SUB_S_reg_T2
= 0xEBB0,
677 OP_CMP_reg_T2
= 0xEBB0,
678 OP_VMOV_CtoD
= 0xEC00,
679 OP_VMOV_DtoC
= 0xEC10,
684 OP_VMOV_CtoS
= 0xEE00,
685 OP_VMOV_StoC
= 0xEE10,
692 OP_VCVT_FPIVFP
= 0xEEB0,
694 OP_VMOV_IMM_T2
= 0xEEB0,
697 OP_VSQRT_T1
= 0xEEB0,
698 OP_VCVTSD_T1
= 0xEEB0,
699 OP_VCVTDS_T1
= 0xEEB0,
702 OP_AND_imm_T1
= 0xF000,
704 OP_ORR_imm_T1
= 0xF040,
705 OP_MOV_imm_T2
= 0xF040,
707 OP_EOR_imm_T1
= 0xF080,
708 OP_ADD_imm_T3
= 0xF100,
709 OP_ADD_S_imm_T3
= 0xF110,
712 OP_SUB_imm_T3
= 0xF1A0,
713 OP_SUB_S_imm_T3
= 0xF1B0,
714 OP_CMP_imm_T2
= 0xF1B0,
715 OP_RSB_imm_T2
= 0xF1C0,
716 OP_RSB_S_imm_T2
= 0xF1D0,
717 OP_ADD_imm_T4
= 0xF200,
718 OP_MOV_imm_T3
= 0xF240,
719 OP_SUB_imm_T4
= 0xF2A0,
723 OP_DMB_SY_T2a
= 0xF3BF,
724 OP_STRB_imm_T3
= 0xF800,
725 OP_STRB_reg_T2
= 0xF800,
726 OP_LDRB_imm_T3
= 0xF810,
727 OP_LDRB_reg_T2
= 0xF810,
728 OP_STRH_imm_T3
= 0xF820,
729 OP_STRH_reg_T2
= 0xF820,
730 OP_LDRH_reg_T2
= 0xF830,
731 OP_LDRH_imm_T3
= 0xF830,
732 OP_STR_imm_T4
= 0xF840,
733 OP_STR_reg_T2
= 0xF840,
734 OP_LDR_imm_T4
= 0xF850,
735 OP_LDR_reg_T2
= 0xF850,
736 OP_STRB_imm_T2
= 0xF880,
737 OP_LDRB_imm_T2
= 0xF890,
738 OP_STRH_imm_T2
= 0xF8A0,
739 OP_LDRH_imm_T2
= 0xF8B0,
740 OP_STR_imm_T3
= 0xF8C0,
741 OP_LDR_imm_T3
= 0xF8D0,
742 OP_LDRSB_reg_T2
= 0xF910,
743 OP_LDRSH_reg_T2
= 0xF930,
744 OP_LSL_reg_T2
= 0xFA00,
745 OP_LSR_reg_T2
= 0xFA20,
746 OP_ASR_reg_T2
= 0xFA40,
747 OP_ROR_reg_T2
= 0xFA60,
749 OP_SMULL_T1
= 0xFB80,
750 #if CPU(APPLE_ARMV7S)
757 OP_VADD_T2b
= 0x0A00,
761 OP_VMOV_IMM_T2b
= 0x0A00,
762 OP_VMOV_T2b
= 0x0A40,
763 OP_VMUL_T2b
= 0x0A00,
766 OP_VMOV_StoCb
= 0x0A10,
767 OP_VMOV_CtoSb
= 0x0A10,
768 OP_VMOV_DtoCb
= 0x0A10,
769 OP_VMOV_CtoDb
= 0x0A10,
771 OP_VABS_T2b
= 0x0A40,
773 OP_VCVT_FPIVFPb
= 0x0A40,
774 OP_VNEG_T2b
= 0x0A40,
775 OP_VSUB_T2b
= 0x0A40,
776 OP_VSQRT_T1b
= 0x0A40,
777 OP_VCVTSD_T1b
= 0x0A40,
778 OP_VCVTDS_T1b
= 0x0A40,
780 OP_DMB_SY_T2b
= 0x8F5F,
786 FourFours(unsigned f3
, unsigned f2
, unsigned f1
, unsigned f0
)
805 class ARMInstructionFormatter
;
808 static bool ifThenElseConditionBit(Condition condition
, bool isIf
)
810 return isIf
? (condition
& 1) : !(condition
& 1);
812 static uint8_t ifThenElse(Condition condition
, bool inst2if
, bool inst3if
, bool inst4if
)
814 int mask
= (ifThenElseConditionBit(condition
, inst2if
) << 3)
815 | (ifThenElseConditionBit(condition
, inst3if
) << 2)
816 | (ifThenElseConditionBit(condition
, inst4if
) << 1)
818 ASSERT((condition
!= ConditionAL
) || !(mask
& (mask
- 1)));
819 return (condition
<< 4) | mask
;
821 static uint8_t ifThenElse(Condition condition
, bool inst2if
, bool inst3if
)
823 int mask
= (ifThenElseConditionBit(condition
, inst2if
) << 3)
824 | (ifThenElseConditionBit(condition
, inst3if
) << 2)
826 ASSERT((condition
!= ConditionAL
) || !(mask
& (mask
- 1)));
827 return (condition
<< 4) | mask
;
829 static uint8_t ifThenElse(Condition condition
, bool inst2if
)
831 int mask
= (ifThenElseConditionBit(condition
, inst2if
) << 3)
833 ASSERT((condition
!= ConditionAL
) || !(mask
& (mask
- 1)));
834 return (condition
<< 4) | mask
;
837 static uint8_t ifThenElse(Condition condition
)
840 return (condition
<< 4) | mask
;
845 void adc(RegisterID rd
, RegisterID rn
, ARMThumbImmediate imm
)
847 // Rd can only be SP if Rn is also SP.
848 ASSERT((rd
!= ARMRegisters::sp
) || (rn
== ARMRegisters::sp
));
849 ASSERT(rd
!= ARMRegisters::pc
);
850 ASSERT(rn
!= ARMRegisters::pc
);
851 ASSERT(imm
.isEncodedImm());
853 m_formatter
.twoWordOp5i6Imm4Reg4EncodedImm(OP_ADC_imm
, rn
, rd
, imm
);
856 void add(RegisterID rd
, RegisterID rn
, ARMThumbImmediate imm
)
858 // Rd can only be SP if Rn is also SP.
859 ASSERT((rd
!= ARMRegisters::sp
) || (rn
== ARMRegisters::sp
));
860 ASSERT(rd
!= ARMRegisters::pc
);
861 ASSERT(rn
!= ARMRegisters::pc
);
862 ASSERT(imm
.isValid());
864 if (rn
== ARMRegisters::sp
&& imm
.isUInt16()) {
865 ASSERT(!(imm
.getUInt16() & 3));
866 if (!(rd
& 8) && imm
.isUInt10()) {
867 m_formatter
.oneWordOp5Reg3Imm8(OP_ADD_SP_imm_T1
, rd
, static_cast<uint8_t>(imm
.getUInt10() >> 2));
869 } else if ((rd
== ARMRegisters::sp
) && imm
.isUInt9()) {
870 m_formatter
.oneWordOp9Imm7(OP_ADD_SP_imm_T2
, static_cast<uint8_t>(imm
.getUInt9() >> 2));
873 } else if (!((rd
| rn
) & 8)) {
875 m_formatter
.oneWordOp7Reg3Reg3Reg3(OP_ADD_imm_T1
, (RegisterID
)imm
.getUInt3(), rn
, rd
);
877 } else if ((rd
== rn
) && imm
.isUInt8()) {
878 m_formatter
.oneWordOp5Reg3Imm8(OP_ADD_imm_T2
, rd
, imm
.getUInt8());
883 if (imm
.isEncodedImm())
884 m_formatter
.twoWordOp5i6Imm4Reg4EncodedImm(OP_ADD_imm_T3
, rn
, rd
, imm
);
886 ASSERT(imm
.isUInt12());
887 m_formatter
.twoWordOp5i6Imm4Reg4EncodedImm(OP_ADD_imm_T4
, rn
, rd
, imm
);
891 ALWAYS_INLINE
void add(RegisterID rd
, RegisterID rn
, RegisterID rm
, ShiftTypeAndAmount shift
)
893 ASSERT((rd
!= ARMRegisters::sp
) || (rn
== ARMRegisters::sp
));
894 ASSERT(rd
!= ARMRegisters::pc
);
895 ASSERT(rn
!= ARMRegisters::pc
);
897 m_formatter
.twoWordOp12Reg4FourFours(OP_ADD_reg_T3
, rn
, FourFours(shift
.hi4(), rd
, shift
.lo4(), rm
));
900 // NOTE: In an IT block, add doesn't modify the flags register.
901 ALWAYS_INLINE
void add(RegisterID rd
, RegisterID rn
, RegisterID rm
)
903 if (rd
== ARMRegisters::sp
) {
909 m_formatter
.oneWordOp8RegReg143(OP_ADD_reg_T2
, rm
, rd
);
911 m_formatter
.oneWordOp8RegReg143(OP_ADD_reg_T2
, rn
, rd
);
912 else if (!((rd
| rn
| rm
) & 8))
913 m_formatter
.oneWordOp7Reg3Reg3Reg3(OP_ADD_reg_T1
, rm
, rn
, rd
);
915 add(rd
, rn
, rm
, ShiftTypeAndAmount());
918 // Not allowed in an IT (if then) block.
919 ALWAYS_INLINE
void add_S(RegisterID rd
, RegisterID rn
, ARMThumbImmediate imm
)
921 // Rd can only be SP if Rn is also SP.
922 ASSERT((rd
!= ARMRegisters::sp
) || (rn
== ARMRegisters::sp
));
923 ASSERT(rd
!= ARMRegisters::pc
);
924 ASSERT(rn
!= ARMRegisters::pc
);
925 ASSERT(imm
.isEncodedImm());
927 if (!((rd
| rn
) & 8)) {
929 m_formatter
.oneWordOp7Reg3Reg3Reg3(OP_ADD_imm_T1
, (RegisterID
)imm
.getUInt3(), rn
, rd
);
931 } else if ((rd
== rn
) && imm
.isUInt8()) {
932 m_formatter
.oneWordOp5Reg3Imm8(OP_ADD_imm_T2
, rd
, imm
.getUInt8());
937 m_formatter
.twoWordOp5i6Imm4Reg4EncodedImm(OP_ADD_S_imm_T3
, rn
, rd
, imm
);
940 // Not allowed in an IT (if then) block?
941 ALWAYS_INLINE
void add_S(RegisterID rd
, RegisterID rn
, RegisterID rm
, ShiftTypeAndAmount shift
)
943 ASSERT((rd
!= ARMRegisters::sp
) || (rn
== ARMRegisters::sp
));
944 ASSERT(rd
!= ARMRegisters::pc
);
945 ASSERT(rn
!= ARMRegisters::pc
);
947 m_formatter
.twoWordOp12Reg4FourFours(OP_ADD_S_reg_T3
, rn
, FourFours(shift
.hi4(), rd
, shift
.lo4(), rm
));
950 // Not allowed in an IT (if then) block.
951 ALWAYS_INLINE
void add_S(RegisterID rd
, RegisterID rn
, RegisterID rm
)
953 if (!((rd
| rn
| rm
) & 8))
954 m_formatter
.oneWordOp7Reg3Reg3Reg3(OP_ADD_reg_T1
, rm
, rn
, rd
);
956 add_S(rd
, rn
, rm
, ShiftTypeAndAmount());
959 ALWAYS_INLINE
void ARM_and(RegisterID rd
, RegisterID rn
, ARMThumbImmediate imm
)
963 ASSERT(imm
.isEncodedImm());
964 m_formatter
.twoWordOp5i6Imm4Reg4EncodedImm(OP_AND_imm_T1
, rn
, rd
, imm
);
967 ALWAYS_INLINE
void ARM_and(RegisterID rd
, RegisterID rn
, RegisterID rm
, ShiftTypeAndAmount shift
)
972 m_formatter
.twoWordOp12Reg4FourFours(OP_AND_reg_T2
, rn
, FourFours(shift
.hi4(), rd
, shift
.lo4(), rm
));
975 ALWAYS_INLINE
void ARM_and(RegisterID rd
, RegisterID rn
, RegisterID rm
)
977 if ((rd
== rn
) && !((rd
| rm
) & 8))
978 m_formatter
.oneWordOp10Reg3Reg3(OP_AND_reg_T1
, rm
, rd
);
979 else if ((rd
== rm
) && !((rd
| rn
) & 8))
980 m_formatter
.oneWordOp10Reg3Reg3(OP_AND_reg_T1
, rn
, rd
);
982 ARM_and(rd
, rn
, rm
, ShiftTypeAndAmount());
985 ALWAYS_INLINE
void asr(RegisterID rd
, RegisterID rm
, int32_t shiftAmount
)
989 ShiftTypeAndAmount
shift(SRType_ASR
, shiftAmount
);
990 m_formatter
.twoWordOp16FourFours(OP_ASR_imm_T1
, FourFours(shift
.hi4(), rd
, shift
.lo4(), rm
));
993 ALWAYS_INLINE
void asr(RegisterID rd
, RegisterID rn
, RegisterID rm
)
998 m_formatter
.twoWordOp12Reg4FourFours(OP_ASR_reg_T2
, rn
, FourFours(0xf, rd
, 0, rm
));
1001 // Only allowed in IT (if then) block if last instruction.
1002 ALWAYS_INLINE AssemblerLabel
b()
1004 m_formatter
.twoWordOp16Op16(OP_B_T4a
, OP_B_T4b
);
1005 return m_formatter
.label();
1008 // Only allowed in IT (if then) block if last instruction.
1009 ALWAYS_INLINE AssemblerLabel
blx(RegisterID rm
)
1011 ASSERT(rm
!= ARMRegisters::pc
);
1012 m_formatter
.oneWordOp8RegReg143(OP_BLX
, rm
, (RegisterID
)8);
1013 return m_formatter
.label();
1016 // Only allowed in IT (if then) block if last instruction.
1017 ALWAYS_INLINE AssemblerLabel
bx(RegisterID rm
)
1019 m_formatter
.oneWordOp8RegReg143(OP_BX
, rm
, (RegisterID
)0);
1020 return m_formatter
.label();
1023 void bkpt(uint8_t imm
= 0)
1025 m_formatter
.oneWordOp8Imm8(OP_BKPT
, imm
);
1028 ALWAYS_INLINE
void clz(RegisterID rd
, RegisterID rm
)
1030 ASSERT(!BadReg(rd
));
1031 ASSERT(!BadReg(rm
));
1032 m_formatter
.twoWordOp12Reg4FourFours(OP_CLZ
, rm
, FourFours(0xf, rd
, 8, rm
));
1035 ALWAYS_INLINE
void cmn(RegisterID rn
, ARMThumbImmediate imm
)
1037 ASSERT(rn
!= ARMRegisters::pc
);
1038 ASSERT(imm
.isEncodedImm());
1040 m_formatter
.twoWordOp5i6Imm4Reg4EncodedImm(OP_CMN_imm
, rn
, (RegisterID
)0xf, imm
);
1043 ALWAYS_INLINE
void cmp(RegisterID rn
, ARMThumbImmediate imm
)
1045 ASSERT(rn
!= ARMRegisters::pc
);
1046 ASSERT(imm
.isEncodedImm());
1048 if (!(rn
& 8) && imm
.isUInt8())
1049 m_formatter
.oneWordOp5Reg3Imm8(OP_CMP_imm_T1
, rn
, imm
.getUInt8());
1051 m_formatter
.twoWordOp5i6Imm4Reg4EncodedImm(OP_CMP_imm_T2
, rn
, (RegisterID
)0xf, imm
);
1054 ALWAYS_INLINE
void cmp(RegisterID rn
, RegisterID rm
, ShiftTypeAndAmount shift
)
1056 ASSERT(rn
!= ARMRegisters::pc
);
1057 ASSERT(!BadReg(rm
));
1058 m_formatter
.twoWordOp12Reg4FourFours(OP_CMP_reg_T2
, rn
, FourFours(shift
.hi4(), 0xf, shift
.lo4(), rm
));
1061 ALWAYS_INLINE
void cmp(RegisterID rn
, RegisterID rm
)
1064 cmp(rn
, rm
, ShiftTypeAndAmount());
1066 m_formatter
.oneWordOp10Reg3Reg3(OP_CMP_reg_T1
, rm
, rn
);
1069 // xor is not spelled with an 'e'. :-(
1070 ALWAYS_INLINE
void eor(RegisterID rd
, RegisterID rn
, ARMThumbImmediate imm
)
1072 ASSERT(!BadReg(rd
));
1073 ASSERT(!BadReg(rn
));
1074 ASSERT(imm
.isEncodedImm());
1075 m_formatter
.twoWordOp5i6Imm4Reg4EncodedImm(OP_EOR_imm_T1
, rn
, rd
, imm
);
1078 // xor is not spelled with an 'e'. :-(
1079 ALWAYS_INLINE
void eor(RegisterID rd
, RegisterID rn
, RegisterID rm
, ShiftTypeAndAmount shift
)
1081 ASSERT(!BadReg(rd
));
1082 ASSERT(!BadReg(rn
));
1083 ASSERT(!BadReg(rm
));
1084 m_formatter
.twoWordOp12Reg4FourFours(OP_EOR_reg_T2
, rn
, FourFours(shift
.hi4(), rd
, shift
.lo4(), rm
));
1087 // xor is not spelled with an 'e'. :-(
1088 void eor(RegisterID rd
, RegisterID rn
, RegisterID rm
)
1090 if ((rd
== rn
) && !((rd
| rm
) & 8))
1091 m_formatter
.oneWordOp10Reg3Reg3(OP_EOR_reg_T1
, rm
, rd
);
1092 else if ((rd
== rm
) && !((rd
| rn
) & 8))
1093 m_formatter
.oneWordOp10Reg3Reg3(OP_EOR_reg_T1
, rn
, rd
);
1095 eor(rd
, rn
, rm
, ShiftTypeAndAmount());
1098 ALWAYS_INLINE
void it(Condition cond
)
1100 m_formatter
.oneWordOp8Imm8(OP_IT
, ifThenElse(cond
));
1103 ALWAYS_INLINE
void it(Condition cond
, bool inst2if
)
1105 m_formatter
.oneWordOp8Imm8(OP_IT
, ifThenElse(cond
, inst2if
));
1108 ALWAYS_INLINE
void it(Condition cond
, bool inst2if
, bool inst3if
)
1110 m_formatter
.oneWordOp8Imm8(OP_IT
, ifThenElse(cond
, inst2if
, inst3if
));
1113 ALWAYS_INLINE
void it(Condition cond
, bool inst2if
, bool inst3if
, bool inst4if
)
1115 m_formatter
.oneWordOp8Imm8(OP_IT
, ifThenElse(cond
, inst2if
, inst3if
, inst4if
));
1118 // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
1119 ALWAYS_INLINE
void ldr(RegisterID rt
, RegisterID rn
, ARMThumbImmediate imm
)
1121 ASSERT(rn
!= ARMRegisters::pc
); // LDR (literal)
1122 ASSERT(imm
.isUInt12());
1124 if (!((rt
| rn
) & 8) && imm
.isUInt7())
1125 m_formatter
.oneWordOp5Imm5Reg3Reg3(OP_LDR_imm_T1
, imm
.getUInt7() >> 2, rn
, rt
);
1126 else if ((rn
== ARMRegisters::sp
) && !(rt
& 8) && imm
.isUInt10())
1127 m_formatter
.oneWordOp5Reg3Imm8(OP_LDR_imm_T2
, rt
, static_cast<uint8_t>(imm
.getUInt10() >> 2));
1129 m_formatter
.twoWordOp12Reg4Reg4Imm12(OP_LDR_imm_T3
, rn
, rt
, imm
.getUInt12());
1132 ALWAYS_INLINE
void ldrWide8BitImmediate(RegisterID rt
, RegisterID rn
, uint8_t immediate
)
1134 ASSERT(rn
!= ARMRegisters::pc
);
1135 m_formatter
.twoWordOp12Reg4Reg4Imm12(OP_LDR_imm_T3
, rn
, rt
, immediate
);
1138 ALWAYS_INLINE
void ldrCompact(RegisterID rt
, RegisterID rn
, ARMThumbImmediate imm
)
1140 ASSERT(rn
!= ARMRegisters::pc
); // LDR (literal)
1141 ASSERT(imm
.isUInt7());
1142 ASSERT(!((rt
| rn
) & 8));
1143 m_formatter
.oneWordOp5Imm5Reg3Reg3(OP_LDR_imm_T1
, imm
.getUInt7() >> 2, rn
, rt
);
1146 // If index is set, this is a regular offset or a pre-indexed load;
1147 // if index is not set then is is a post-index load.
1149 // If wback is set rn is updated - this is a pre or post index load,
1150 // if wback is not set this is a regular offset memory access.
1152 // (-255 <= offset <= 255)
1154 // _tmp = _reg + offset
1155 // MEM[index ? _tmp : _reg] = REG[rt]
1156 // if (wback) REG[rn] = _tmp
1157 ALWAYS_INLINE
void ldr(RegisterID rt
, RegisterID rn
, int offset
, bool index
, bool wback
)
1159 ASSERT(rt
!= ARMRegisters::pc
);
1160 ASSERT(rn
!= ARMRegisters::pc
);
1161 ASSERT(index
|| wback
);
1162 ASSERT(!wback
| (rt
!= rn
));
1169 ASSERT((offset
& ~0xff) == 0);
1171 offset
|= (wback
<< 8);
1172 offset
|= (add
<< 9);
1173 offset
|= (index
<< 10);
1174 offset
|= (1 << 11);
1176 m_formatter
.twoWordOp12Reg4Reg4Imm12(OP_LDR_imm_T4
, rn
, rt
, offset
);
1179 // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
1180 ALWAYS_INLINE
void ldr(RegisterID rt
, RegisterID rn
, RegisterID rm
, unsigned shift
= 0)
1182 ASSERT(rn
!= ARMRegisters::pc
); // LDR (literal)
1183 ASSERT(!BadReg(rm
));
1186 if (!shift
&& !((rt
| rn
| rm
) & 8))
1187 m_formatter
.oneWordOp7Reg3Reg3Reg3(OP_LDR_reg_T1
, rm
, rn
, rt
);
1189 m_formatter
.twoWordOp12Reg4FourFours(OP_LDR_reg_T2
, rn
, FourFours(rt
, 0, shift
, rm
));
1192 // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
1193 ALWAYS_INLINE
void ldrh(RegisterID rt
, RegisterID rn
, ARMThumbImmediate imm
)
1195 ASSERT(rn
!= ARMRegisters::pc
); // LDR (literal)
1196 ASSERT(imm
.isUInt12());
1198 if (!((rt
| rn
) & 8) && imm
.isUInt6())
1199 m_formatter
.oneWordOp5Imm5Reg3Reg3(OP_LDRH_imm_T1
, imm
.getUInt6() >> 2, rn
, rt
);
1201 m_formatter
.twoWordOp12Reg4Reg4Imm12(OP_LDRH_imm_T2
, rn
, rt
, imm
.getUInt12());
1204 // If index is set, this is a regular offset or a pre-indexed load;
1205 // if index is not set then is is a post-index load.
1207 // If wback is set rn is updated - this is a pre or post index load,
1208 // if wback is not set this is a regular offset memory access.
1210 // (-255 <= offset <= 255)
1212 // _tmp = _reg + offset
1213 // MEM[index ? _tmp : _reg] = REG[rt]
1214 // if (wback) REG[rn] = _tmp
1215 ALWAYS_INLINE
void ldrh(RegisterID rt
, RegisterID rn
, int offset
, bool index
, bool wback
)
1217 ASSERT(rt
!= ARMRegisters::pc
);
1218 ASSERT(rn
!= ARMRegisters::pc
);
1219 ASSERT(index
|| wback
);
1220 ASSERT(!wback
| (rt
!= rn
));
1227 ASSERT((offset
& ~0xff) == 0);
1229 offset
|= (wback
<< 8);
1230 offset
|= (add
<< 9);
1231 offset
|= (index
<< 10);
1232 offset
|= (1 << 11);
1234 m_formatter
.twoWordOp12Reg4Reg4Imm12(OP_LDRH_imm_T3
, rn
, rt
, offset
);
1237 ALWAYS_INLINE
void ldrh(RegisterID rt
, RegisterID rn
, RegisterID rm
, unsigned shift
= 0)
1239 ASSERT(!BadReg(rt
)); // Memory hint
1240 ASSERT(rn
!= ARMRegisters::pc
); // LDRH (literal)
1241 ASSERT(!BadReg(rm
));
1244 if (!shift
&& !((rt
| rn
| rm
) & 8))
1245 m_formatter
.oneWordOp7Reg3Reg3Reg3(OP_LDRH_reg_T1
, rm
, rn
, rt
);
1247 m_formatter
.twoWordOp12Reg4FourFours(OP_LDRH_reg_T2
, rn
, FourFours(rt
, 0, shift
, rm
));
1250 void ldrb(RegisterID rt
, RegisterID rn
, ARMThumbImmediate imm
)
1252 ASSERT(rn
!= ARMRegisters::pc
); // LDR (literal)
1253 ASSERT(imm
.isUInt12());
1255 if (!((rt
| rn
) & 8) && imm
.isUInt5())
1256 m_formatter
.oneWordOp5Imm5Reg3Reg3(OP_LDRB_imm_T1
, imm
.getUInt5(), rn
, rt
);
1258 m_formatter
.twoWordOp12Reg4Reg4Imm12(OP_LDRB_imm_T2
, rn
, rt
, imm
.getUInt12());
1261 void ldrb(RegisterID rt
, RegisterID rn
, int offset
, bool index
, bool wback
)
1263 ASSERT(rt
!= ARMRegisters::pc
);
1264 ASSERT(rn
!= ARMRegisters::pc
);
1265 ASSERT(index
|| wback
);
1266 ASSERT(!wback
| (rt
!= rn
));
1274 ASSERT(!(offset
& ~0xff));
1276 offset
|= (wback
<< 8);
1277 offset
|= (add
<< 9);
1278 offset
|= (index
<< 10);
1279 offset
|= (1 << 11);
1281 m_formatter
.twoWordOp12Reg4Reg4Imm12(OP_LDRB_imm_T3
, rn
, rt
, offset
);
1284 ALWAYS_INLINE
void ldrb(RegisterID rt
, RegisterID rn
, RegisterID rm
, unsigned shift
= 0)
1286 ASSERT(rn
!= ARMRegisters::pc
); // LDR (literal)
1287 ASSERT(!BadReg(rm
));
1290 if (!shift
&& !((rt
| rn
| rm
) & 8))
1291 m_formatter
.oneWordOp7Reg3Reg3Reg3(OP_LDRB_reg_T1
, rm
, rn
, rt
);
1293 m_formatter
.twoWordOp12Reg4FourFours(OP_LDRB_reg_T2
, rn
, FourFours(rt
, 0, shift
, rm
));
1296 void ldrsb(RegisterID rt
, RegisterID rn
, RegisterID rm
, unsigned shift
= 0)
1298 ASSERT(rn
!= ARMRegisters::pc
);
1299 ASSERT(!BadReg(rm
));
1302 if (!shift
&& !((rt
| rn
| rm
) & 8))
1303 m_formatter
.oneWordOp7Reg3Reg3Reg3(OP_LDRSB_reg_T1
, rm
, rn
, rt
);
1305 m_formatter
.twoWordOp12Reg4FourFours(OP_LDRSB_reg_T2
, rn
, FourFours(rt
, 0, shift
, rm
));
1308 void ldrsh(RegisterID rt
, RegisterID rn
, RegisterID rm
, unsigned shift
= 0)
1310 ASSERT(rn
!= ARMRegisters::pc
);
1311 ASSERT(!BadReg(rm
));
1314 if (!shift
&& !((rt
| rn
| rm
) & 8))
1315 m_formatter
.oneWordOp7Reg3Reg3Reg3(OP_LDRSH_reg_T1
, rm
, rn
, rt
);
1317 m_formatter
.twoWordOp12Reg4FourFours(OP_LDRSH_reg_T2
, rn
, FourFours(rt
, 0, shift
, rm
));
1320 void lsl(RegisterID rd
, RegisterID rm
, int32_t shiftAmount
)
1322 ASSERT(!BadReg(rd
));
1323 ASSERT(!BadReg(rm
));
1324 ShiftTypeAndAmount
shift(SRType_LSL
, shiftAmount
);
1325 m_formatter
.twoWordOp16FourFours(OP_LSL_imm_T1
, FourFours(shift
.hi4(), rd
, shift
.lo4(), rm
));
1328 ALWAYS_INLINE
void lsl(RegisterID rd
, RegisterID rn
, RegisterID rm
)
1330 ASSERT(!BadReg(rd
));
1331 ASSERT(!BadReg(rn
));
1332 ASSERT(!BadReg(rm
));
1333 m_formatter
.twoWordOp12Reg4FourFours(OP_LSL_reg_T2
, rn
, FourFours(0xf, rd
, 0, rm
));
1336 ALWAYS_INLINE
void lsr(RegisterID rd
, RegisterID rm
, int32_t shiftAmount
)
1338 ASSERT(!BadReg(rd
));
1339 ASSERT(!BadReg(rm
));
1340 ShiftTypeAndAmount
shift(SRType_LSR
, shiftAmount
);
1341 m_formatter
.twoWordOp16FourFours(OP_LSR_imm_T1
, FourFours(shift
.hi4(), rd
, shift
.lo4(), rm
));
1344 ALWAYS_INLINE
void lsr(RegisterID rd
, RegisterID rn
, RegisterID rm
)
1346 ASSERT(!BadReg(rd
));
1347 ASSERT(!BadReg(rn
));
1348 ASSERT(!BadReg(rm
));
1349 m_formatter
.twoWordOp12Reg4FourFours(OP_LSR_reg_T2
, rn
, FourFours(0xf, rd
, 0, rm
));
1352 ALWAYS_INLINE
void movT3(RegisterID rd
, ARMThumbImmediate imm
)
1354 ASSERT(imm
.isValid());
1355 ASSERT(!imm
.isEncodedImm());
1356 ASSERT(!BadReg(rd
));
1358 m_formatter
.twoWordOp5i6Imm4Reg4EncodedImm(OP_MOV_imm_T3
, imm
.m_value
.imm4
, rd
, imm
);
1362 static void revertJumpTo_movT3movtcmpT2(void* instructionStart
, RegisterID left
, RegisterID right
, uintptr_t imm
)
1364 uint16_t* address
= static_cast<uint16_t*>(instructionStart
);
1365 ARMThumbImmediate lo16
= ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(imm
));
1366 ARMThumbImmediate hi16
= ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(imm
>> 16));
1367 address
[0] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOV_imm_T3
, lo16
);
1368 address
[1] = twoWordOp5i6Imm4Reg4EncodedImmSecond(right
, lo16
);
1369 address
[2] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOVT
, hi16
);
1370 address
[3] = twoWordOp5i6Imm4Reg4EncodedImmSecond(right
, hi16
);
1371 address
[4] = OP_CMP_reg_T2
| left
;
1372 cacheFlush(address
, sizeof(uint16_t) * 5);
1375 static void revertJumpTo_movT3(void* instructionStart
, RegisterID rd
, ARMThumbImmediate imm
)
1377 ASSERT(imm
.isValid());
1378 ASSERT(!imm
.isEncodedImm());
1379 ASSERT(!BadReg(rd
));
1381 uint16_t* address
= static_cast<uint16_t*>(instructionStart
);
1382 address
[0] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOV_imm_T3
, imm
);
1383 address
[1] = twoWordOp5i6Imm4Reg4EncodedImmSecond(rd
, imm
);
1384 cacheFlush(address
, sizeof(uint16_t) * 2);
1388 ALWAYS_INLINE
void mov(RegisterID rd
, ARMThumbImmediate imm
)
1390 ASSERT(imm
.isValid());
1391 ASSERT(!BadReg(rd
));
1393 if ((rd
< 8) && imm
.isUInt8())
1394 m_formatter
.oneWordOp5Reg3Imm8(OP_MOV_imm_T1
, rd
, imm
.getUInt8());
1395 else if (imm
.isEncodedImm())
1396 m_formatter
.twoWordOp5i6Imm4Reg4EncodedImm(OP_MOV_imm_T2
, 0xf, rd
, imm
);
1401 ALWAYS_INLINE
void mov(RegisterID rd
, RegisterID rm
)
1403 m_formatter
.oneWordOp8RegReg143(OP_MOV_reg_T1
, rm
, rd
);
1406 ALWAYS_INLINE
void movt(RegisterID rd
, ARMThumbImmediate imm
)
1408 ASSERT(imm
.isUInt16());
1409 ASSERT(!BadReg(rd
));
1410 m_formatter
.twoWordOp5i6Imm4Reg4EncodedImm(OP_MOVT
, imm
.m_value
.imm4
, rd
, imm
);
1413 ALWAYS_INLINE
void mvn(RegisterID rd
, ARMThumbImmediate imm
)
1415 ASSERT(imm
.isEncodedImm());
1416 ASSERT(!BadReg(rd
));
1418 m_formatter
.twoWordOp5i6Imm4Reg4EncodedImm(OP_MVN_imm
, 0xf, rd
, imm
);
1421 ALWAYS_INLINE
void mvn(RegisterID rd
, RegisterID rm
, ShiftTypeAndAmount shift
)
1423 ASSERT(!BadReg(rd
));
1424 ASSERT(!BadReg(rm
));
1425 m_formatter
.twoWordOp16FourFours(OP_MVN_reg_T2
, FourFours(shift
.hi4(), rd
, shift
.lo4(), rm
));
1428 ALWAYS_INLINE
void mvn(RegisterID rd
, RegisterID rm
)
1430 if (!((rd
| rm
) & 8))
1431 m_formatter
.oneWordOp10Reg3Reg3(OP_MVN_reg_T1
, rm
, rd
);
1433 mvn(rd
, rm
, ShiftTypeAndAmount());
1436 ALWAYS_INLINE
void neg(RegisterID rd
, RegisterID rm
)
1438 ARMThumbImmediate zero
= ARMThumbImmediate::makeUInt12(0);
1442 ALWAYS_INLINE
void orr(RegisterID rd
, RegisterID rn
, ARMThumbImmediate imm
)
1444 ASSERT(!BadReg(rd
));
1445 ASSERT(!BadReg(rn
));
1446 ASSERT(imm
.isEncodedImm());
1447 m_formatter
.twoWordOp5i6Imm4Reg4EncodedImm(OP_ORR_imm_T1
, rn
, rd
, imm
);
1450 ALWAYS_INLINE
void orr(RegisterID rd
, RegisterID rn
, RegisterID rm
, ShiftTypeAndAmount shift
)
1452 ASSERT(!BadReg(rd
));
1453 ASSERT(!BadReg(rn
));
1454 ASSERT(!BadReg(rm
));
1455 m_formatter
.twoWordOp12Reg4FourFours(OP_ORR_reg_T2
, rn
, FourFours(shift
.hi4(), rd
, shift
.lo4(), rm
));
1458 void orr(RegisterID rd
, RegisterID rn
, RegisterID rm
)
1460 if ((rd
== rn
) && !((rd
| rm
) & 8))
1461 m_formatter
.oneWordOp10Reg3Reg3(OP_ORR_reg_T1
, rm
, rd
);
1462 else if ((rd
== rm
) && !((rd
| rn
) & 8))
1463 m_formatter
.oneWordOp10Reg3Reg3(OP_ORR_reg_T1
, rn
, rd
);
1465 orr(rd
, rn
, rm
, ShiftTypeAndAmount());
1468 ALWAYS_INLINE
void orr_S(RegisterID rd
, RegisterID rn
, RegisterID rm
, ShiftTypeAndAmount shift
)
1470 ASSERT(!BadReg(rd
));
1471 ASSERT(!BadReg(rn
));
1472 ASSERT(!BadReg(rm
));
1473 m_formatter
.twoWordOp12Reg4FourFours(OP_ORR_S_reg_T2
, rn
, FourFours(shift
.hi4(), rd
, shift
.lo4(), rm
));
1476 void orr_S(RegisterID rd
, RegisterID rn
, RegisterID rm
)
1478 if ((rd
== rn
) && !((rd
| rm
) & 8))
1479 m_formatter
.oneWordOp10Reg3Reg3(OP_ORR_reg_T1
, rm
, rd
);
1480 else if ((rd
== rm
) && !((rd
| rn
) & 8))
1481 m_formatter
.oneWordOp10Reg3Reg3(OP_ORR_reg_T1
, rn
, rd
);
1483 orr_S(rd
, rn
, rm
, ShiftTypeAndAmount());
1486 ALWAYS_INLINE
void ror(RegisterID rd
, RegisterID rm
, int32_t shiftAmount
)
1488 ASSERT(!BadReg(rd
));
1489 ASSERT(!BadReg(rm
));
1490 ShiftTypeAndAmount
shift(SRType_ROR
, shiftAmount
);
1491 m_formatter
.twoWordOp16FourFours(OP_ROR_imm_T1
, FourFours(shift
.hi4(), rd
, shift
.lo4(), rm
));
1494 ALWAYS_INLINE
void ror(RegisterID rd
, RegisterID rn
, RegisterID rm
)
1496 ASSERT(!BadReg(rd
));
1497 ASSERT(!BadReg(rn
));
1498 ASSERT(!BadReg(rm
));
1499 m_formatter
.twoWordOp12Reg4FourFours(OP_ROR_reg_T2
, rn
, FourFours(0xf, rd
, 0, rm
));
1502 ALWAYS_INLINE
void pop(RegisterID dest
)
1504 if (dest
< ARMRegisters::r8
)
1505 m_formatter
.oneWordOp7Imm9(OP_POP_T1
, 1 << dest
);
1507 // Load postindexed with writeback.
1508 ldr(dest
, ARMRegisters::sp
, sizeof(void*), false, true);
1512 ALWAYS_INLINE
void pop(uint32_t registerList
)
1514 ASSERT(WTF::bitCount(registerList
) > 1);
1515 ASSERT(!((1 << ARMRegisters::pc
) & registerList
) || !((1 << ARMRegisters::lr
) & registerList
));
1516 ASSERT(!((1 << ARMRegisters::sp
) & registerList
));
1517 m_formatter
.twoWordOp16Imm16(OP_POP_T2
, registerList
);
1520 ALWAYS_INLINE
void push(RegisterID src
)
1522 if (src
< ARMRegisters::r8
)
1523 m_formatter
.oneWordOp7Imm9(OP_PUSH_T1
, 1 << src
);
1524 else if (src
== ARMRegisters::lr
)
1525 m_formatter
.oneWordOp7Imm9(OP_PUSH_T1
, 0x100);
1527 // Store preindexed with writeback.
1528 str(src
, ARMRegisters::sp
, -sizeof(void*), true, true);
1532 ALWAYS_INLINE
void push(uint32_t registerList
)
1534 ASSERT(WTF::bitCount(registerList
) > 1);
1535 ASSERT(!((1 << ARMRegisters::pc
) & registerList
));
1536 ASSERT(!((1 << ARMRegisters::sp
) & registerList
));
1537 m_formatter
.twoWordOp16Imm16(OP_PUSH_T2
, registerList
);
1540 #if CPU(APPLE_ARMV7S)
1541 template<int datasize
>
1542 ALWAYS_INLINE
void sdiv(RegisterID rd
, RegisterID rn
, RegisterID rm
)
1544 static_assert(datasize
== 32, "sdiv datasize must be 32 for armv7s");
1545 ASSERT(!BadReg(rd
));
1546 ASSERT(!BadReg(rn
));
1547 ASSERT(!BadReg(rm
));
1548 m_formatter
.twoWordOp12Reg4FourFours(OP_SDIV_T1
, rn
, FourFours(0xf, rd
, 0xf, rm
));
1552 ALWAYS_INLINE
void smull(RegisterID rdLo
, RegisterID rdHi
, RegisterID rn
, RegisterID rm
)
1554 ASSERT(!BadReg(rdLo
));
1555 ASSERT(!BadReg(rdHi
));
1556 ASSERT(!BadReg(rn
));
1557 ASSERT(!BadReg(rm
));
1558 ASSERT(rdLo
!= rdHi
);
1559 m_formatter
.twoWordOp12Reg4FourFours(OP_SMULL_T1
, rn
, FourFours(rdLo
, rdHi
, 0, rm
));
1562 // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
1563 ALWAYS_INLINE
void str(RegisterID rt
, RegisterID rn
, ARMThumbImmediate imm
)
1565 ASSERT(rt
!= ARMRegisters::pc
);
1566 ASSERT(rn
!= ARMRegisters::pc
);
1567 ASSERT(imm
.isUInt12());
1569 if (!((rt
| rn
) & 8) && imm
.isUInt7())
1570 m_formatter
.oneWordOp5Imm5Reg3Reg3(OP_STR_imm_T1
, imm
.getUInt7() >> 2, rn
, rt
);
1571 else if ((rn
== ARMRegisters::sp
) && !(rt
& 8) && imm
.isUInt10())
1572 m_formatter
.oneWordOp5Reg3Imm8(OP_STR_imm_T2
, rt
, static_cast<uint8_t>(imm
.getUInt10() >> 2));
1574 m_formatter
.twoWordOp12Reg4Reg4Imm12(OP_STR_imm_T3
, rn
, rt
, imm
.getUInt12());
1577 // If index is set, this is a regular offset or a pre-indexed store;
1578 // if index is not set then is is a post-index store.
1580 // If wback is set rn is updated - this is a pre or post index store,
1581 // if wback is not set this is a regular offset memory access.
1583 // (-255 <= offset <= 255)
1585 // _tmp = _reg + offset
1586 // MEM[index ? _tmp : _reg] = REG[rt]
1587 // if (wback) REG[rn] = _tmp
1588 ALWAYS_INLINE
void str(RegisterID rt
, RegisterID rn
, int offset
, bool index
, bool wback
)
1590 ASSERT(rt
!= ARMRegisters::pc
);
1591 ASSERT(rn
!= ARMRegisters::pc
);
1592 ASSERT(index
|| wback
);
1593 ASSERT(!wback
| (rt
!= rn
));
1600 ASSERT((offset
& ~0xff) == 0);
1602 offset
|= (wback
<< 8);
1603 offset
|= (add
<< 9);
1604 offset
|= (index
<< 10);
1605 offset
|= (1 << 11);
1607 m_formatter
.twoWordOp12Reg4Reg4Imm12(OP_STR_imm_T4
, rn
, rt
, offset
);
1610 // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
1611 ALWAYS_INLINE
void str(RegisterID rt
, RegisterID rn
, RegisterID rm
, unsigned shift
= 0)
1613 ASSERT(rn
!= ARMRegisters::pc
);
1614 ASSERT(!BadReg(rm
));
1617 if (!shift
&& !((rt
| rn
| rm
) & 8))
1618 m_formatter
.oneWordOp7Reg3Reg3Reg3(OP_STR_reg_T1
, rm
, rn
, rt
);
1620 m_formatter
.twoWordOp12Reg4FourFours(OP_STR_reg_T2
, rn
, FourFours(rt
, 0, shift
, rm
));
1623 // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
1624 ALWAYS_INLINE
void strb(RegisterID rt
, RegisterID rn
, ARMThumbImmediate imm
)
1626 ASSERT(rt
!= ARMRegisters::pc
);
1627 ASSERT(rn
!= ARMRegisters::pc
);
1628 ASSERT(imm
.isUInt12());
1630 if (!((rt
| rn
) & 8) && imm
.isUInt7())
1631 m_formatter
.oneWordOp5Imm5Reg3Reg3(OP_STRB_imm_T1
, imm
.getUInt7() >> 2, rn
, rt
);
1633 m_formatter
.twoWordOp12Reg4Reg4Imm12(OP_STRB_imm_T2
, rn
, rt
, imm
.getUInt12());
1636 // If index is set, this is a regular offset or a pre-indexed store;
1637 // if index is not set then is is a post-index store.
1639 // If wback is set rn is updated - this is a pre or post index store,
1640 // if wback is not set this is a regular offset memory access.
1642 // (-255 <= offset <= 255)
1644 // _tmp = _reg + offset
1645 // MEM[index ? _tmp : _reg] = REG[rt]
1646 // if (wback) REG[rn] = _tmp
1647 ALWAYS_INLINE
void strb(RegisterID rt
, RegisterID rn
, int offset
, bool index
, bool wback
)
1649 ASSERT(rt
!= ARMRegisters::pc
);
1650 ASSERT(rn
!= ARMRegisters::pc
);
1651 ASSERT(index
|| wback
);
1652 ASSERT(!wback
| (rt
!= rn
));
1659 ASSERT((offset
& ~0xff) == 0);
1661 offset
|= (wback
<< 8);
1662 offset
|= (add
<< 9);
1663 offset
|= (index
<< 10);
1664 offset
|= (1 << 11);
1666 m_formatter
.twoWordOp12Reg4Reg4Imm12(OP_STRB_imm_T3
, rn
, rt
, offset
);
1669 // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
1670 ALWAYS_INLINE
void strb(RegisterID rt
, RegisterID rn
, RegisterID rm
, unsigned shift
= 0)
1672 ASSERT(rn
!= ARMRegisters::pc
);
1673 ASSERT(!BadReg(rm
));
1676 if (!shift
&& !((rt
| rn
| rm
) & 8))
1677 m_formatter
.oneWordOp7Reg3Reg3Reg3(OP_STRB_reg_T1
, rm
, rn
, rt
);
1679 m_formatter
.twoWordOp12Reg4FourFours(OP_STRB_reg_T2
, rn
, FourFours(rt
, 0, shift
, rm
));
1682 // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
1683 ALWAYS_INLINE
void strh(RegisterID rt
, RegisterID rn
, ARMThumbImmediate imm
)
1685 ASSERT(rt
!= ARMRegisters::pc
);
1686 ASSERT(rn
!= ARMRegisters::pc
);
1687 ASSERT(imm
.isUInt12());
1689 if (!((rt
| rn
) & 8) && imm
.isUInt7())
1690 m_formatter
.oneWordOp5Imm5Reg3Reg3(OP_STRH_imm_T1
, imm
.getUInt7() >> 2, rn
, rt
);
1692 m_formatter
.twoWordOp12Reg4Reg4Imm12(OP_STRH_imm_T2
, rn
, rt
, imm
.getUInt12());
1695 // If index is set, this is a regular offset or a pre-indexed store;
1696 // if index is not set then is is a post-index store.
1698 // If wback is set rn is updated - this is a pre or post index store,
1699 // if wback is not set this is a regular offset memory access.
1701 // (-255 <= offset <= 255)
1703 // _tmp = _reg + offset
1704 // MEM[index ? _tmp : _reg] = REG[rt]
1705 // if (wback) REG[rn] = _tmp
1706 ALWAYS_INLINE
void strh(RegisterID rt
, RegisterID rn
, int offset
, bool index
, bool wback
)
1708 ASSERT(rt
!= ARMRegisters::pc
);
1709 ASSERT(rn
!= ARMRegisters::pc
);
1710 ASSERT(index
|| wback
);
1711 ASSERT(!wback
| (rt
!= rn
));
1718 ASSERT(!(offset
& ~0xff));
1720 offset
|= (wback
<< 8);
1721 offset
|= (add
<< 9);
1722 offset
|= (index
<< 10);
1723 offset
|= (1 << 11);
1725 m_formatter
.twoWordOp12Reg4Reg4Imm12(OP_STRH_imm_T3
, rn
, rt
, offset
);
1728 // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
1729 ALWAYS_INLINE
void strh(RegisterID rt
, RegisterID rn
, RegisterID rm
, unsigned shift
= 0)
1731 ASSERT(rn
!= ARMRegisters::pc
);
1732 ASSERT(!BadReg(rm
));
1735 if (!shift
&& !((rt
| rn
| rm
) & 8))
1736 m_formatter
.oneWordOp7Reg3Reg3Reg3(OP_STRH_reg_T1
, rm
, rn
, rt
);
1738 m_formatter
.twoWordOp12Reg4FourFours(OP_STRH_reg_T2
, rn
, FourFours(rt
, 0, shift
, rm
));
1741 ALWAYS_INLINE
void sub(RegisterID rd
, RegisterID rn
, ARMThumbImmediate imm
)
1743 // Rd can only be SP if Rn is also SP.
1744 ASSERT((rd
!= ARMRegisters::sp
) || (rn
== ARMRegisters::sp
));
1745 ASSERT(rd
!= ARMRegisters::pc
);
1746 ASSERT(rn
!= ARMRegisters::pc
);
1747 ASSERT(imm
.isValid());
1749 if ((rn
== ARMRegisters::sp
) && (rd
== ARMRegisters::sp
) && imm
.isUInt9()) {
1750 ASSERT(!(imm
.getUInt16() & 3));
1751 m_formatter
.oneWordOp9Imm7(OP_SUB_SP_imm_T1
, static_cast<uint8_t>(imm
.getUInt9() >> 2));
1753 } else if (!((rd
| rn
) & 8)) {
1754 if (imm
.isUInt3()) {
1755 m_formatter
.oneWordOp7Reg3Reg3Reg3(OP_SUB_imm_T1
, (RegisterID
)imm
.getUInt3(), rn
, rd
);
1757 } else if ((rd
== rn
) && imm
.isUInt8()) {
1758 m_formatter
.oneWordOp5Reg3Imm8(OP_SUB_imm_T2
, rd
, imm
.getUInt8());
1763 if (imm
.isEncodedImm())
1764 m_formatter
.twoWordOp5i6Imm4Reg4EncodedImm(OP_SUB_imm_T3
, rn
, rd
, imm
);
1766 ASSERT(imm
.isUInt12());
1767 m_formatter
.twoWordOp5i6Imm4Reg4EncodedImm(OP_SUB_imm_T4
, rn
, rd
, imm
);
1771 ALWAYS_INLINE
void sub(RegisterID rd
, ARMThumbImmediate imm
, RegisterID rn
)
1773 ASSERT(rd
!= ARMRegisters::pc
);
1774 ASSERT(rn
!= ARMRegisters::pc
);
1775 ASSERT(imm
.isValid());
1776 ASSERT(imm
.isUInt12());
1778 if (!((rd
| rn
) & 8) && !imm
.getUInt12())
1779 m_formatter
.oneWordOp10Reg3Reg3(OP_RSB_imm_T1
, rn
, rd
);
1781 m_formatter
.twoWordOp5i6Imm4Reg4EncodedImm(OP_RSB_imm_T2
, rn
, rd
, imm
);
1784 ALWAYS_INLINE
void sub(RegisterID rd
, RegisterID rn
, RegisterID rm
, ShiftTypeAndAmount shift
)
1786 ASSERT((rd
!= ARMRegisters::sp
) || (rn
== ARMRegisters::sp
));
1787 ASSERT(rd
!= ARMRegisters::pc
);
1788 ASSERT(rn
!= ARMRegisters::pc
);
1789 ASSERT(!BadReg(rm
));
1790 m_formatter
.twoWordOp12Reg4FourFours(OP_SUB_reg_T2
, rn
, FourFours(shift
.hi4(), rd
, shift
.lo4(), rm
));
1793 // NOTE: In an IT block, add doesn't modify the flags register.
1794 ALWAYS_INLINE
void sub(RegisterID rd
, RegisterID rn
, RegisterID rm
)
1796 if (!((rd
| rn
| rm
) & 8))
1797 m_formatter
.oneWordOp7Reg3Reg3Reg3(OP_SUB_reg_T1
, rm
, rn
, rd
);
1799 sub(rd
, rn
, rm
, ShiftTypeAndAmount());
1802 // Not allowed in an IT (if then) block.
1803 void sub_S(RegisterID rd
, RegisterID rn
, ARMThumbImmediate imm
)
1805 // Rd can only be SP if Rn is also SP.
1806 ASSERT((rd
!= ARMRegisters::sp
) || (rn
== ARMRegisters::sp
));
1807 ASSERT(rd
!= ARMRegisters::pc
);
1808 ASSERT(rn
!= ARMRegisters::pc
);
1809 ASSERT(imm
.isValid());
1811 if ((rn
== ARMRegisters::sp
) && (rd
== ARMRegisters::sp
) && imm
.isUInt9()) {
1812 ASSERT(!(imm
.getUInt16() & 3));
1813 m_formatter
.oneWordOp9Imm7(OP_SUB_SP_imm_T1
, static_cast<uint8_t>(imm
.getUInt9() >> 2));
1815 } else if (!((rd
| rn
) & 8)) {
1816 if (imm
.isUInt3()) {
1817 m_formatter
.oneWordOp7Reg3Reg3Reg3(OP_SUB_imm_T1
, (RegisterID
)imm
.getUInt3(), rn
, rd
);
1819 } else if ((rd
== rn
) && imm
.isUInt8()) {
1820 m_formatter
.oneWordOp5Reg3Imm8(OP_SUB_imm_T2
, rd
, imm
.getUInt8());
1825 m_formatter
.twoWordOp5i6Imm4Reg4EncodedImm(OP_SUB_S_imm_T3
, rn
, rd
, imm
);
1828 ALWAYS_INLINE
void sub_S(RegisterID rd
, ARMThumbImmediate imm
, RegisterID rn
)
1830 ASSERT(rd
!= ARMRegisters::pc
);
1831 ASSERT(rn
!= ARMRegisters::pc
);
1832 ASSERT(imm
.isValid());
1833 ASSERT(imm
.isUInt12());
1835 m_formatter
.twoWordOp5i6Imm4Reg4EncodedImm(OP_RSB_S_imm_T2
, rn
, rd
, imm
);
1838 // Not allowed in an IT (if then) block?
1839 ALWAYS_INLINE
void sub_S(RegisterID rd
, RegisterID rn
, RegisterID rm
, ShiftTypeAndAmount shift
)
1841 ASSERT((rd
!= ARMRegisters::sp
) || (rn
== ARMRegisters::sp
));
1842 ASSERT(rd
!= ARMRegisters::pc
);
1843 ASSERT(rn
!= ARMRegisters::pc
);
1844 ASSERT(!BadReg(rm
));
1845 m_formatter
.twoWordOp12Reg4FourFours(OP_SUB_S_reg_T2
, rn
, FourFours(shift
.hi4(), rd
, shift
.lo4(), rm
));
1848 // Not allowed in an IT (if then) block.
1849 ALWAYS_INLINE
void sub_S(RegisterID rd
, RegisterID rn
, RegisterID rm
)
1851 if (!((rd
| rn
| rm
) & 8))
1852 m_formatter
.oneWordOp7Reg3Reg3Reg3(OP_SUB_reg_T1
, rm
, rn
, rd
);
1854 sub_S(rd
, rn
, rm
, ShiftTypeAndAmount());
1857 ALWAYS_INLINE
void tst(RegisterID rn
, ARMThumbImmediate imm
)
1859 ASSERT(!BadReg(rn
));
1860 ASSERT(imm
.isEncodedImm());
1862 m_formatter
.twoWordOp5i6Imm4Reg4EncodedImm(OP_TST_imm
, rn
, (RegisterID
)0xf, imm
);
1865 ALWAYS_INLINE
void tst(RegisterID rn
, RegisterID rm
, ShiftTypeAndAmount shift
)
1867 ASSERT(!BadReg(rn
));
1868 ASSERT(!BadReg(rm
));
1869 m_formatter
.twoWordOp12Reg4FourFours(OP_TST_reg_T2
, rn
, FourFours(shift
.hi4(), 0xf, shift
.lo4(), rm
));
1872 ALWAYS_INLINE
void tst(RegisterID rn
, RegisterID rm
)
1875 tst(rn
, rm
, ShiftTypeAndAmount());
1877 m_formatter
.oneWordOp10Reg3Reg3(OP_TST_reg_T1
, rm
, rn
);
1880 ALWAYS_INLINE
void ubfx(RegisterID rd
, RegisterID rn
, unsigned lsb
, unsigned width
)
1883 ASSERT((width
>= 1) && (width
<= 32));
1884 ASSERT((lsb
+ width
) <= 32);
1885 m_formatter
.twoWordOp12Reg40Imm3Reg4Imm20Imm5(OP_UBFX_T1
, rd
, rn
, (lsb
& 0x1c) << 10, (lsb
& 0x3) << 6, (width
- 1) & 0x1f);
1888 #if CPU(APPLE_ARMV7S)
1889 ALWAYS_INLINE
void udiv(RegisterID rd
, RegisterID rn
, RegisterID rm
)
1891 ASSERT(!BadReg(rd
));
1892 ASSERT(!BadReg(rn
));
1893 ASSERT(!BadReg(rm
));
1894 m_formatter
.twoWordOp12Reg4FourFours(OP_UDIV_T1
, rn
, FourFours(0xf, rd
, 0xf, rm
));
1898 void vadd(FPDoubleRegisterID rd
, FPDoubleRegisterID rn
, FPDoubleRegisterID rm
)
1900 m_formatter
.vfpOp(OP_VADD_T2
, OP_VADD_T2b
, true, rn
, rd
, rm
);
1903 void vcmp(FPDoubleRegisterID rd
, FPDoubleRegisterID rm
)
1905 m_formatter
.vfpOp(OP_VCMP
, OP_VCMPb
, true, VFPOperand(4), rd
, rm
);
1908 void vcmpz(FPDoubleRegisterID rd
)
1910 m_formatter
.vfpOp(OP_VCMP
, OP_VCMPb
, true, VFPOperand(5), rd
, VFPOperand(0));
1913 void vcvt_signedToFloatingPoint(FPDoubleRegisterID rd
, FPSingleRegisterID rm
)
1915 // boolean values are 64bit (toInt, unsigned, roundZero)
1916 m_formatter
.vfpOp(OP_VCVT_FPIVFP
, OP_VCVT_FPIVFPb
, true, vcvtOp(false, false, false), rd
, rm
);
1919 void vcvt_floatingPointToSigned(FPSingleRegisterID rd
, FPDoubleRegisterID rm
)
1921 // boolean values are 64bit (toInt, unsigned, roundZero)
1922 m_formatter
.vfpOp(OP_VCVT_FPIVFP
, OP_VCVT_FPIVFPb
, true, vcvtOp(true, false, true), rd
, rm
);
1925 void vcvt_floatingPointToUnsigned(FPSingleRegisterID rd
, FPDoubleRegisterID rm
)
1927 // boolean values are 64bit (toInt, unsigned, roundZero)
1928 m_formatter
.vfpOp(OP_VCVT_FPIVFP
, OP_VCVT_FPIVFPb
, true, vcvtOp(true, true, true), rd
, rm
);
1931 void vdiv(FPDoubleRegisterID rd
, FPDoubleRegisterID rn
, FPDoubleRegisterID rm
)
1933 m_formatter
.vfpOp(OP_VDIV
, OP_VDIVb
, true, rn
, rd
, rm
);
1936 void vldr(FPDoubleRegisterID rd
, RegisterID rn
, int32_t imm
)
1938 m_formatter
.vfpMemOp(OP_VLDR
, OP_VLDRb
, true, rn
, rd
, imm
);
1941 void flds(FPSingleRegisterID rd
, RegisterID rn
, int32_t imm
)
1943 m_formatter
.vfpMemOp(OP_FLDS
, OP_FLDSb
, false, rn
, rd
, imm
);
1946 void vmov(RegisterID rd
, FPSingleRegisterID rn
)
1948 ASSERT(!BadReg(rd
));
1949 m_formatter
.vfpOp(OP_VMOV_StoC
, OP_VMOV_StoCb
, false, rn
, rd
, VFPOperand(0));
1952 void vmov(FPSingleRegisterID rd
, RegisterID rn
)
1954 ASSERT(!BadReg(rn
));
1955 m_formatter
.vfpOp(OP_VMOV_CtoS
, OP_VMOV_CtoSb
, false, rd
, rn
, VFPOperand(0));
1958 void vmov(RegisterID rd1
, RegisterID rd2
, FPDoubleRegisterID rn
)
1960 ASSERT(!BadReg(rd1
));
1961 ASSERT(!BadReg(rd2
));
1962 m_formatter
.vfpOp(OP_VMOV_DtoC
, OP_VMOV_DtoCb
, true, rd2
, VFPOperand(rd1
| 16), rn
);
1965 void vmov(FPDoubleRegisterID rd
, RegisterID rn1
, RegisterID rn2
)
1967 ASSERT(!BadReg(rn1
));
1968 ASSERT(!BadReg(rn2
));
1969 m_formatter
.vfpOp(OP_VMOV_CtoD
, OP_VMOV_CtoDb
, true, rn2
, VFPOperand(rn1
| 16), rd
);
1972 void vmov(FPDoubleRegisterID rd
, FPDoubleRegisterID rn
)
1974 m_formatter
.vfpOp(OP_VMOV_T2
, OP_VMOV_T2b
, true, VFPOperand(0), rd
, rn
);
1977 void vmrs(RegisterID reg
= ARMRegisters::pc
)
1979 ASSERT(reg
!= ARMRegisters::sp
);
1980 m_formatter
.vfpOp(OP_VMRS
, OP_VMRSb
, false, VFPOperand(1), VFPOperand(0x10 | reg
), VFPOperand(0));
1983 void vmul(FPDoubleRegisterID rd
, FPDoubleRegisterID rn
, FPDoubleRegisterID rm
)
1985 m_formatter
.vfpOp(OP_VMUL_T2
, OP_VMUL_T2b
, true, rn
, rd
, rm
);
1988 void vstr(FPDoubleRegisterID rd
, RegisterID rn
, int32_t imm
)
1990 m_formatter
.vfpMemOp(OP_VSTR
, OP_VSTRb
, true, rn
, rd
, imm
);
1993 void fsts(FPSingleRegisterID rd
, RegisterID rn
, int32_t imm
)
1995 m_formatter
.vfpMemOp(OP_FSTS
, OP_FSTSb
, false, rn
, rd
, imm
);
1998 void vsub(FPDoubleRegisterID rd
, FPDoubleRegisterID rn
, FPDoubleRegisterID rm
)
2000 m_formatter
.vfpOp(OP_VSUB_T2
, OP_VSUB_T2b
, true, rn
, rd
, rm
);
2003 void vabs(FPDoubleRegisterID rd
, FPDoubleRegisterID rm
)
2005 m_formatter
.vfpOp(OP_VABS_T2
, OP_VABS_T2b
, true, VFPOperand(16), rd
, rm
);
2008 void vneg(FPDoubleRegisterID rd
, FPDoubleRegisterID rm
)
2010 m_formatter
.vfpOp(OP_VNEG_T2
, OP_VNEG_T2b
, true, VFPOperand(1), rd
, rm
);
2013 void vsqrt(FPDoubleRegisterID rd
, FPDoubleRegisterID rm
)
2015 m_formatter
.vfpOp(OP_VSQRT_T1
, OP_VSQRT_T1b
, true, VFPOperand(17), rd
, rm
);
2018 void vcvtds(FPDoubleRegisterID rd
, FPSingleRegisterID rm
)
2020 m_formatter
.vfpOp(OP_VCVTDS_T1
, OP_VCVTDS_T1b
, false, VFPOperand(23), rd
, rm
);
2023 void vcvtsd(FPSingleRegisterID rd
, FPDoubleRegisterID rm
)
2025 m_formatter
.vfpOp(OP_VCVTSD_T1
, OP_VCVTSD_T1b
, true, VFPOperand(23), rd
, rm
);
2030 m_formatter
.oneWordOp8Imm8(OP_NOP_T1
, 0);
2035 m_formatter
.twoWordOp16Op16(OP_NOP_T2a
, OP_NOP_T2b
);
2040 m_formatter
.twoWordOp16Op16(OP_DMB_SY_T2a
, OP_DMB_SY_T2b
);
2043 AssemblerLabel
labelIgnoringWatchpoints()
2045 return m_formatter
.label();
2048 AssemblerLabel
labelForWatchpoint()
2050 AssemblerLabel result
= m_formatter
.label();
2051 if (static_cast<int>(result
.m_offset
) != m_indexOfLastWatchpoint
)
2053 m_indexOfLastWatchpoint
= result
.m_offset
;
2054 m_indexOfTailOfLastWatchpoint
= result
.m_offset
+ maxJumpReplacementSize();
2058 AssemblerLabel
label()
2060 AssemblerLabel result
= m_formatter
.label();
2061 while (UNLIKELY(static_cast<int>(result
.m_offset
) < m_indexOfTailOfLastWatchpoint
)) {
2062 if (UNLIKELY(static_cast<int>(result
.m_offset
) + 4 <= m_indexOfTailOfLastWatchpoint
))
2066 result
= m_formatter
.label();
2071 AssemblerLabel
align(int alignment
)
2073 while (!m_formatter
.isAligned(alignment
))
2079 static void* getRelocatedAddress(void* code
, AssemblerLabel label
)
2081 ASSERT(label
.isSet());
2082 return reinterpret_cast<void*>(reinterpret_cast<ptrdiff_t>(code
) + label
.m_offset
);
2085 static int getDifferenceBetweenLabels(AssemblerLabel a
, AssemblerLabel b
)
2087 return b
.m_offset
- a
.m_offset
;
2090 static int jumpSizeDelta(JumpType jumpType
, JumpLinkType jumpLinkType
) { return JUMP_ENUM_SIZE(jumpType
) - JUMP_ENUM_SIZE(jumpLinkType
); }
2092 // Assembler admin methods:
2094 static ALWAYS_INLINE
bool linkRecordSourceComparator(const LinkRecord
& a
, const LinkRecord
& b
)
2096 return a
.from() < b
.from();
2099 static bool canCompact(JumpType jumpType
)
2101 // The following cannot be compacted:
2102 // JumpFixed: represents custom jump sequence
2103 // JumpNoConditionFixedSize: represents unconditional jump that must remain a fixed size
2104 // JumpConditionFixedSize: represents conditional jump that must remain a fixed size
2105 return (jumpType
== JumpNoCondition
) || (jumpType
== JumpCondition
);
2108 static JumpLinkType
computeJumpType(JumpType jumpType
, const uint8_t* from
, const uint8_t* to
)
2110 if (jumpType
== JumpFixed
)
2113 // for patchable jump we must leave space for the longest code sequence
2114 if (jumpType
== JumpNoConditionFixedSize
)
2116 if (jumpType
== JumpConditionFixedSize
)
2117 return LinkConditionalBX
;
2119 const int paddingSize
= JUMP_ENUM_SIZE(jumpType
);
2121 if (jumpType
== JumpCondition
) {
2122 // 2-byte conditional T1
2123 const uint16_t* jumpT1Location
= reinterpret_cast_ptr
<const uint16_t*>(from
- (paddingSize
- JUMP_ENUM_SIZE(LinkJumpT1
)));
2124 if (canBeJumpT1(jumpT1Location
, to
))
2126 // 4-byte conditional T3
2127 const uint16_t* jumpT3Location
= reinterpret_cast_ptr
<const uint16_t*>(from
- (paddingSize
- JUMP_ENUM_SIZE(LinkJumpT3
)));
2128 if (canBeJumpT3(jumpT3Location
, to
))
2130 // 4-byte conditional T4 with IT
2131 const uint16_t* conditionalJumpT4Location
=
2132 reinterpret_cast_ptr
<const uint16_t*>(from
- (paddingSize
- JUMP_ENUM_SIZE(LinkConditionalJumpT4
)));
2133 if (canBeJumpT4(conditionalJumpT4Location
, to
))
2134 return LinkConditionalJumpT4
;
2136 // 2-byte unconditional T2
2137 const uint16_t* jumpT2Location
= reinterpret_cast_ptr
<const uint16_t*>(from
- (paddingSize
- JUMP_ENUM_SIZE(LinkJumpT2
)));
2138 if (canBeJumpT2(jumpT2Location
, to
))
2140 // 4-byte unconditional T4
2141 const uint16_t* jumpT4Location
= reinterpret_cast_ptr
<const uint16_t*>(from
- (paddingSize
- JUMP_ENUM_SIZE(LinkJumpT4
)));
2142 if (canBeJumpT4(jumpT4Location
, to
))
2144 // use long jump sequence
2148 ASSERT(jumpType
== JumpCondition
);
2149 return LinkConditionalBX
;
2152 static JumpLinkType
computeJumpType(LinkRecord
& record
, const uint8_t* from
, const uint8_t* to
)
2154 JumpLinkType linkType
= computeJumpType(record
.type(), from
, to
);
2155 record
.setLinkType(linkType
);
2159 Vector
<LinkRecord
, 0, UnsafeVectorOverflow
>& jumpsToLink()
2161 std::sort(m_jumpsToLink
.begin(), m_jumpsToLink
.end(), linkRecordSourceComparator
);
2162 return m_jumpsToLink
;
2165 static void ALWAYS_INLINE
link(LinkRecord
& record
, uint8_t* from
, uint8_t* to
)
2167 switch (record
.linkType()) {
2169 linkJumpT1(record
.condition(), reinterpret_cast_ptr
<uint16_t*>(from
), to
);
2172 linkJumpT2(reinterpret_cast_ptr
<uint16_t*>(from
), to
);
2175 linkJumpT3(record
.condition(), reinterpret_cast_ptr
<uint16_t*>(from
), to
);
2178 linkJumpT4(reinterpret_cast_ptr
<uint16_t*>(from
), to
);
2180 case LinkConditionalJumpT4
:
2181 linkConditionalJumpT4(record
.condition(), reinterpret_cast_ptr
<uint16_t*>(from
), to
);
2183 case LinkConditionalBX
:
2184 linkConditionalBX(record
.condition(), reinterpret_cast_ptr
<uint16_t*>(from
), to
);
2187 linkBX(reinterpret_cast_ptr
<uint16_t*>(from
), to
);
2190 RELEASE_ASSERT_NOT_REACHED();
2195 void* unlinkedCode() { return m_formatter
.data(); }
2196 size_t codeSize() const { return m_formatter
.codeSize(); }
2198 static unsigned getCallReturnOffset(AssemblerLabel call
)
2200 ASSERT(call
.isSet());
2201 return call
.m_offset
;
2204 // Linking & patching:
2206 // 'link' and 'patch' methods are for use on unprotected code - such as the code
2207 // within the AssemblerBuffer, and code being patched by the patch buffer. Once
2208 // code has been finalized it is (platform support permitting) within a non-
2209 // writable region of memory; to modify the code in an execute-only execuable
2210 // pool the 'repatch' and 'relink' methods should be used.
2212 void linkJump(AssemblerLabel from
, AssemblerLabel to
, JumpType type
, Condition condition
)
2215 ASSERT(from
.isSet());
2216 m_jumpsToLink
.append(LinkRecord(from
.m_offset
, to
.m_offset
, type
, condition
));
2219 static void linkJump(void* code
, AssemblerLabel from
, void* to
)
2221 ASSERT(from
.isSet());
2223 uint16_t* location
= reinterpret_cast<uint16_t*>(reinterpret_cast<intptr_t>(code
) + from
.m_offset
);
2224 linkJumpAbsolute(location
, to
);
2227 static void linkCall(void* code
, AssemblerLabel from
, void* to
)
2229 ASSERT(!(reinterpret_cast<intptr_t>(code
) & 1));
2230 ASSERT(from
.isSet());
2232 setPointer(reinterpret_cast<uint16_t*>(reinterpret_cast<intptr_t>(code
) + from
.m_offset
) - 1, to
, false);
2235 static void linkPointer(void* code
, AssemblerLabel where
, void* value
)
2237 setPointer(reinterpret_cast<char*>(code
) + where
.m_offset
, value
, false);
2240 static void relinkJump(void* from
, void* to
)
2242 ASSERT(!(reinterpret_cast<intptr_t>(from
) & 1));
2243 ASSERT(!(reinterpret_cast<intptr_t>(to
) & 1));
2245 linkJumpAbsolute(reinterpret_cast<uint16_t*>(from
), to
);
2247 cacheFlush(reinterpret_cast<uint16_t*>(from
) - 5, 5 * sizeof(uint16_t));
2250 static void relinkCall(void* from
, void* to
)
2252 ASSERT(!(reinterpret_cast<intptr_t>(from
) & 1));
2254 setPointer(reinterpret_cast<uint16_t*>(from
) - 1, to
, true);
2257 static void* readCallTarget(void* from
)
2259 return readPointer(reinterpret_cast<uint16_t*>(from
) - 1);
2262 static void repatchInt32(void* where
, int32_t value
)
2264 ASSERT(!(reinterpret_cast<intptr_t>(where
) & 1));
2266 setInt32(where
, value
, true);
2269 static void repatchCompact(void* where
, int32_t offset
)
2271 ASSERT(offset
>= -255 && offset
<= 255);
2279 offset
|= (add
<< 9);
2280 offset
|= (1 << 10);
2281 offset
|= (1 << 11);
2283 uint16_t* location
= reinterpret_cast<uint16_t*>(where
);
2284 location
[1] &= ~((1 << 12) - 1);
2285 location
[1] |= offset
;
2286 cacheFlush(location
, sizeof(uint16_t) * 2);
2289 static void repatchPointer(void* where
, void* value
)
2291 ASSERT(!(reinterpret_cast<intptr_t>(where
) & 1));
2293 setPointer(where
, value
, true);
2296 static void* readPointer(void* where
)
2298 return reinterpret_cast<void*>(readInt32(where
));
2301 static void replaceWithJump(void* instructionStart
, void* to
)
2303 ASSERT(!(bitwise_cast
<uintptr_t>(instructionStart
) & 1));
2304 ASSERT(!(bitwise_cast
<uintptr_t>(to
) & 1));
2307 if (canBeJumpT4(reinterpret_cast<uint16_t*>(instructionStart
), to
)) {
2308 uint16_t* ptr
= reinterpret_cast<uint16_t*>(instructionStart
) + 2;
2309 linkJumpT4(ptr
, to
);
2310 cacheFlush(ptr
- 2, sizeof(uint16_t) * 2);
2312 uint16_t* ptr
= reinterpret_cast<uint16_t*>(instructionStart
) + 5;
2314 cacheFlush(ptr
- 5, sizeof(uint16_t) * 5);
2317 uint16_t* ptr
= reinterpret_cast<uint16_t*>(instructionStart
) + 2;
2318 linkJumpT4(ptr
, to
);
2319 cacheFlush(ptr
- 2, sizeof(uint16_t) * 2);
2323 static ptrdiff_t maxJumpReplacementSize()
2332 static void replaceWithLoad(void* instructionStart
)
2334 ASSERT(!(bitwise_cast
<uintptr_t>(instructionStart
) & 1));
2335 uint16_t* ptr
= reinterpret_cast<uint16_t*>(instructionStart
);
2336 switch (ptr
[0] & 0xFFF0) {
2340 ASSERT(!(ptr
[1] & 0xF000));
2342 ptr
[0] |= OP_LDR_imm_T3
;
2343 ptr
[1] |= (ptr
[1] & 0x0F00) << 4;
2345 cacheFlush(ptr
, sizeof(uint16_t) * 2);
2348 RELEASE_ASSERT_NOT_REACHED();
2352 static void replaceWithAddressComputation(void* instructionStart
)
2354 ASSERT(!(bitwise_cast
<uintptr_t>(instructionStart
) & 1));
2355 uint16_t* ptr
= reinterpret_cast<uint16_t*>(instructionStart
);
2356 switch (ptr
[0] & 0xFFF0) {
2358 ASSERT(!(ptr
[1] & 0x0F00));
2360 ptr
[0] |= OP_ADD_imm_T3
;
2361 ptr
[1] |= (ptr
[1] & 0xF000) >> 4;
2363 cacheFlush(ptr
, sizeof(uint16_t) * 2);
2368 RELEASE_ASSERT_NOT_REACHED();
2372 unsigned debugOffset() { return m_formatter
.debugOffset(); }
2375 static inline void linuxPageFlush(uintptr_t begin
, uintptr_t end
)
2387 : "r" (begin
), "r" (end
)
2388 : "r0", "r1", "r2");
2392 static void cacheFlush(void* code
, size_t size
)
2395 sys_cache_control(kCacheFunctionPrepareForExecution
, code
, size
);
2397 size_t page
= pageSize();
2398 uintptr_t current
= reinterpret_cast<uintptr_t>(code
);
2399 uintptr_t end
= current
+ size
;
2400 uintptr_t firstPageEnd
= (current
& ~(page
- 1)) + page
;
2402 if (end
<= firstPageEnd
) {
2403 linuxPageFlush(current
, end
);
2407 linuxPageFlush(current
, firstPageEnd
);
2409 for (current
= firstPageEnd
; current
+ page
< end
; current
+= page
)
2410 linuxPageFlush(current
, current
+ page
);
2412 linuxPageFlush(current
, end
);
2414 CacheRangeFlush(code
, size
, CACHE_SYNC_ALL
);
2416 #error "The cacheFlush support is missing on this platform."
2421 // VFP operations commonly take one or more 5-bit operands, typically representing a
2422 // floating point register number. This will commonly be encoded in the instruction
2423 // in two parts, with one single bit field, and one 4-bit field. In the case of
2424 // double precision operands the high bit of the register number will be encoded
2425 // separately, and for single precision operands the high bit of the register number
2426 // will be encoded individually.
2427 // VFPOperand encapsulates a 5-bit VFP operand, with bits 0..3 containing the 4-bit
2428 // field to be encoded together in the instruction (the low 4-bits of a double
2429 // register number, or the high 4-bits of a single register number), and bit 4
2430 // contains the bit value to be encoded individually.
2432 explicit VFPOperand(uint32_t value
)
2435 ASSERT(!(m_value
& ~0x1f));
2438 VFPOperand(FPDoubleRegisterID reg
)
2443 VFPOperand(RegisterID reg
)
2448 VFPOperand(FPSingleRegisterID reg
)
2449 : m_value(((reg
& 1) << 4) | (reg
>> 1)) // rotate the lowest bit of 'reg' to the top.
2455 return m_value
>> 4;
2460 return m_value
& 0xf;
2466 VFPOperand
vcvtOp(bool toInteger
, bool isUnsigned
, bool isRoundZero
)
2468 // Cannot specify rounding when converting to float.
2469 ASSERT(toInteger
|| !isRoundZero
);
2473 // opc2 indicates both toInteger & isUnsigned.
2474 op
|= isUnsigned
? 0x4 : 0x5;
2475 // 'op' field in instruction is isRoundZero
2479 ASSERT(!isRoundZero
);
2480 // 'op' field in instruction is isUnsigned
2484 return VFPOperand(op
);
2487 static void setInt32(void* code
, uint32_t value
, bool flush
)
2489 uint16_t* location
= reinterpret_cast<uint16_t*>(code
);
2490 ASSERT(isMOV_imm_T3(location
- 4) && isMOVT(location
- 2));
2492 ARMThumbImmediate lo16
= ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(value
));
2493 ARMThumbImmediate hi16
= ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(value
>> 16));
2494 location
[-4] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOV_imm_T3
, lo16
);
2495 location
[-3] = twoWordOp5i6Imm4Reg4EncodedImmSecond((location
[-3] >> 8) & 0xf, lo16
);
2496 location
[-2] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOVT
, hi16
);
2497 location
[-1] = twoWordOp5i6Imm4Reg4EncodedImmSecond((location
[-1] >> 8) & 0xf, hi16
);
2500 cacheFlush(location
- 4, 4 * sizeof(uint16_t));
2503 static int32_t readInt32(void* code
)
2505 uint16_t* location
= reinterpret_cast<uint16_t*>(code
);
2506 ASSERT(isMOV_imm_T3(location
- 4) && isMOVT(location
- 2));
2508 ARMThumbImmediate lo16
;
2509 ARMThumbImmediate hi16
;
2510 decodeTwoWordOp5i6Imm4Reg4EncodedImmFirst(lo16
, location
[-4]);
2511 decodeTwoWordOp5i6Imm4Reg4EncodedImmSecond(lo16
, location
[-3]);
2512 decodeTwoWordOp5i6Imm4Reg4EncodedImmFirst(hi16
, location
[-2]);
2513 decodeTwoWordOp5i6Imm4Reg4EncodedImmSecond(hi16
, location
[-1]);
2514 uint32_t result
= hi16
.asUInt16();
2516 result
|= lo16
.asUInt16();
2517 return static_cast<int32_t>(result
);
2520 static void setUInt7ForLoad(void* code
, ARMThumbImmediate imm
)
2522 // Requires us to have planted a LDR_imm_T1
2523 ASSERT(imm
.isValid());
2524 ASSERT(imm
.isUInt7());
2525 uint16_t* location
= reinterpret_cast<uint16_t*>(code
);
2526 location
[0] &= ~((static_cast<uint16_t>(0x7f) >> 2) << 6);
2527 location
[0] |= (imm
.getUInt7() >> 2) << 6;
2528 cacheFlush(location
, sizeof(uint16_t));
2531 static void setPointer(void* code
, void* value
, bool flush
)
2533 setInt32(code
, reinterpret_cast<uint32_t>(value
), flush
);
2536 static bool isB(void* address
)
2538 uint16_t* instruction
= static_cast<uint16_t*>(address
);
2539 return ((instruction
[0] & 0xf800) == OP_B_T4a
) && ((instruction
[1] & 0xd000) == OP_B_T4b
);
2542 static bool isBX(void* address
)
2544 uint16_t* instruction
= static_cast<uint16_t*>(address
);
2545 return (instruction
[0] & 0xff87) == OP_BX
;
2548 static bool isMOV_imm_T3(void* address
)
2550 uint16_t* instruction
= static_cast<uint16_t*>(address
);
2551 return ((instruction
[0] & 0xFBF0) == OP_MOV_imm_T3
) && ((instruction
[1] & 0x8000) == 0);
2554 static bool isMOVT(void* address
)
2556 uint16_t* instruction
= static_cast<uint16_t*>(address
);
2557 return ((instruction
[0] & 0xFBF0) == OP_MOVT
) && ((instruction
[1] & 0x8000) == 0);
2560 static bool isNOP_T1(void* address
)
2562 uint16_t* instruction
= static_cast<uint16_t*>(address
);
2563 return instruction
[0] == OP_NOP_T1
;
2566 static bool isNOP_T2(void* address
)
2568 uint16_t* instruction
= static_cast<uint16_t*>(address
);
2569 return (instruction
[0] == OP_NOP_T2a
) && (instruction
[1] == OP_NOP_T2b
);
2572 static bool canBeJumpT1(const uint16_t* instruction
, const void* target
)
2574 ASSERT(!(reinterpret_cast<intptr_t>(instruction
) & 1));
2575 ASSERT(!(reinterpret_cast<intptr_t>(target
) & 1));
2577 intptr_t relative
= reinterpret_cast<intptr_t>(target
) - (reinterpret_cast<intptr_t>(instruction
));
2578 // It does not appear to be documented in the ARM ARM (big surprise), but
2579 // for OP_B_T1 the branch displacement encoded in the instruction is 2
2580 // less than the actual displacement.
2582 return ((relative
<< 23) >> 23) == relative
;
2585 static bool canBeJumpT2(const uint16_t* instruction
, const void* target
)
2587 ASSERT(!(reinterpret_cast<intptr_t>(instruction
) & 1));
2588 ASSERT(!(reinterpret_cast<intptr_t>(target
) & 1));
2590 intptr_t relative
= reinterpret_cast<intptr_t>(target
) - (reinterpret_cast<intptr_t>(instruction
));
2591 // It does not appear to be documented in the ARM ARM (big surprise), but
2592 // for OP_B_T2 the branch displacement encoded in the instruction is 2
2593 // less than the actual displacement.
2595 return ((relative
<< 20) >> 20) == relative
;
2598 static bool canBeJumpT3(const uint16_t* instruction
, const void* target
)
2600 ASSERT(!(reinterpret_cast<intptr_t>(instruction
) & 1));
2601 ASSERT(!(reinterpret_cast<intptr_t>(target
) & 1));
2603 intptr_t relative
= reinterpret_cast<intptr_t>(target
) - (reinterpret_cast<intptr_t>(instruction
));
2604 return ((relative
<< 11) >> 11) == relative
;
2607 static bool canBeJumpT4(const uint16_t* instruction
, const void* target
)
2609 ASSERT(!(reinterpret_cast<intptr_t>(instruction
) & 1));
2610 ASSERT(!(reinterpret_cast<intptr_t>(target
) & 1));
2612 intptr_t relative
= reinterpret_cast<intptr_t>(target
) - (reinterpret_cast<intptr_t>(instruction
));
2613 return ((relative
<< 7) >> 7) == relative
;
2616 static void linkJumpT1(Condition cond
, uint16_t* instruction
, void* target
)
2618 // FIMXE: this should be up in the MacroAssembler layer. :-(
2619 ASSERT(!(reinterpret_cast<intptr_t>(instruction
) & 1));
2620 ASSERT(!(reinterpret_cast<intptr_t>(target
) & 1));
2621 ASSERT(canBeJumpT1(instruction
, target
));
2623 intptr_t relative
= reinterpret_cast<intptr_t>(target
) - (reinterpret_cast<intptr_t>(instruction
));
2624 // It does not appear to be documented in the ARM ARM (big surprise), but
2625 // for OP_B_T1 the branch displacement encoded in the instruction is 2
2626 // less than the actual displacement.
2629 // All branch offsets should be an even distance.
2630 ASSERT(!(relative
& 1));
2631 instruction
[-1] = OP_B_T1
| ((cond
& 0xf) << 8) | ((relative
& 0x1fe) >> 1);
2634 static void linkJumpT2(uint16_t* instruction
, void* target
)
2636 // FIMXE: this should be up in the MacroAssembler layer. :-(
2637 ASSERT(!(reinterpret_cast<intptr_t>(instruction
) & 1));
2638 ASSERT(!(reinterpret_cast<intptr_t>(target
) & 1));
2639 ASSERT(canBeJumpT2(instruction
, target
));
2641 intptr_t relative
= reinterpret_cast<intptr_t>(target
) - (reinterpret_cast<intptr_t>(instruction
));
2642 // It does not appear to be documented in the ARM ARM (big surprise), but
2643 // for OP_B_T2 the branch displacement encoded in the instruction is 2
2644 // less than the actual displacement.
2647 // All branch offsets should be an even distance.
2648 ASSERT(!(relative
& 1));
2649 instruction
[-1] = OP_B_T2
| ((relative
& 0xffe) >> 1);
2652 static void linkJumpT3(Condition cond
, uint16_t* instruction
, void* target
)
2654 // FIMXE: this should be up in the MacroAssembler layer. :-(
2655 ASSERT(!(reinterpret_cast<intptr_t>(instruction
) & 1));
2656 ASSERT(!(reinterpret_cast<intptr_t>(target
) & 1));
2657 ASSERT(canBeJumpT3(instruction
, target
));
2659 intptr_t relative
= reinterpret_cast<intptr_t>(target
) - (reinterpret_cast<intptr_t>(instruction
));
2661 // All branch offsets should be an even distance.
2662 ASSERT(!(relative
& 1));
2663 instruction
[-2] = OP_B_T3a
| ((relative
& 0x100000) >> 10) | ((cond
& 0xf) << 6) | ((relative
& 0x3f000) >> 12);
2664 instruction
[-1] = OP_B_T3b
| ((relative
& 0x80000) >> 8) | ((relative
& 0x40000) >> 5) | ((relative
& 0xffe) >> 1);
2667 static void linkJumpT4(uint16_t* instruction
, void* target
)
2669 // FIMXE: this should be up in the MacroAssembler layer. :-(
2670 ASSERT(!(reinterpret_cast<intptr_t>(instruction
) & 1));
2671 ASSERT(!(reinterpret_cast<intptr_t>(target
) & 1));
2672 ASSERT(canBeJumpT4(instruction
, target
));
2674 intptr_t relative
= reinterpret_cast<intptr_t>(target
) - (reinterpret_cast<intptr_t>(instruction
));
2675 // ARM encoding for the top two bits below the sign bit is 'peculiar'.
2677 relative
^= 0xC00000;
2679 // All branch offsets should be an even distance.
2680 ASSERT(!(relative
& 1));
2681 instruction
[-2] = OP_B_T4a
| ((relative
& 0x1000000) >> 14) | ((relative
& 0x3ff000) >> 12);
2682 instruction
[-1] = OP_B_T4b
| ((relative
& 0x800000) >> 10) | ((relative
& 0x400000) >> 11) | ((relative
& 0xffe) >> 1);
2685 static void linkConditionalJumpT4(Condition cond
, uint16_t* instruction
, void* target
)
2687 // FIMXE: this should be up in the MacroAssembler layer. :-(
2688 ASSERT(!(reinterpret_cast<intptr_t>(instruction
) & 1));
2689 ASSERT(!(reinterpret_cast<intptr_t>(target
) & 1));
2691 instruction
[-3] = ifThenElse(cond
) | OP_IT
;
2692 linkJumpT4(instruction
, target
);
2695 static void linkBX(uint16_t* instruction
, void* target
)
2697 // FIMXE: this should be up in the MacroAssembler layer. :-(
2698 ASSERT(!(reinterpret_cast<intptr_t>(instruction
) & 1));
2699 ASSERT(!(reinterpret_cast<intptr_t>(target
) & 1));
2701 const uint16_t JUMP_TEMPORARY_REGISTER
= ARMRegisters::ip
;
2702 ARMThumbImmediate lo16
= ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(reinterpret_cast<uint32_t>(target
) + 1));
2703 ARMThumbImmediate hi16
= ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(reinterpret_cast<uint32_t>(target
) >> 16));
2704 instruction
[-5] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOV_imm_T3
, lo16
);
2705 instruction
[-4] = twoWordOp5i6Imm4Reg4EncodedImmSecond(JUMP_TEMPORARY_REGISTER
, lo16
);
2706 instruction
[-3] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOVT
, hi16
);
2707 instruction
[-2] = twoWordOp5i6Imm4Reg4EncodedImmSecond(JUMP_TEMPORARY_REGISTER
, hi16
);
2708 instruction
[-1] = OP_BX
| (JUMP_TEMPORARY_REGISTER
<< 3);
2711 static void linkConditionalBX(Condition cond
, uint16_t* instruction
, void* target
)
2713 // FIMXE: this should be up in the MacroAssembler layer. :-(
2714 ASSERT(!(reinterpret_cast<intptr_t>(instruction
) & 1));
2715 ASSERT(!(reinterpret_cast<intptr_t>(target
) & 1));
2717 linkBX(instruction
, target
);
2718 instruction
[-6] = ifThenElse(cond
, true, true) | OP_IT
;
2721 static void linkJumpAbsolute(uint16_t* instruction
, void* target
)
2723 // FIMXE: this should be up in the MacroAssembler layer. :-(
2724 ASSERT(!(reinterpret_cast<intptr_t>(instruction
) & 1));
2725 ASSERT(!(reinterpret_cast<intptr_t>(target
) & 1));
2727 ASSERT((isMOV_imm_T3(instruction
- 5) && isMOVT(instruction
- 3) && isBX(instruction
- 1))
2728 || (isNOP_T1(instruction
- 5) && isNOP_T2(instruction
- 4) && isB(instruction
- 2)));
2730 if (canBeJumpT4(instruction
, target
)) {
2731 // There may be a better way to fix this, but right now put the NOPs first, since in the
2732 // case of an conditional branch this will be coming after an ITTT predicating *three*
2733 // instructions! Looking backwards to modify the ITTT to an IT is not easy, due to
2734 // variable wdith encoding - the previous instruction might *look* like an ITTT but
2735 // actually be the second half of a 2-word op.
2736 instruction
[-5] = OP_NOP_T1
;
2737 instruction
[-4] = OP_NOP_T2a
;
2738 instruction
[-3] = OP_NOP_T2b
;
2739 linkJumpT4(instruction
, target
);
2741 const uint16_t JUMP_TEMPORARY_REGISTER
= ARMRegisters::ip
;
2742 ARMThumbImmediate lo16
= ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(reinterpret_cast<uint32_t>(target
) + 1));
2743 ARMThumbImmediate hi16
= ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(reinterpret_cast<uint32_t>(target
) >> 16));
2744 instruction
[-5] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOV_imm_T3
, lo16
);
2745 instruction
[-4] = twoWordOp5i6Imm4Reg4EncodedImmSecond(JUMP_TEMPORARY_REGISTER
, lo16
);
2746 instruction
[-3] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOVT
, hi16
);
2747 instruction
[-2] = twoWordOp5i6Imm4Reg4EncodedImmSecond(JUMP_TEMPORARY_REGISTER
, hi16
);
2748 instruction
[-1] = OP_BX
| (JUMP_TEMPORARY_REGISTER
<< 3);
2752 static uint16_t twoWordOp5i6Imm4Reg4EncodedImmFirst(uint16_t op
, ARMThumbImmediate imm
)
2754 return op
| (imm
.m_value
.i
<< 10) | imm
.m_value
.imm4
;
2757 static void decodeTwoWordOp5i6Imm4Reg4EncodedImmFirst(ARMThumbImmediate
& result
, uint16_t value
)
2759 result
.m_value
.i
= (value
>> 10) & 1;
2760 result
.m_value
.imm4
= value
& 15;
2763 static uint16_t twoWordOp5i6Imm4Reg4EncodedImmSecond(uint16_t rd
, ARMThumbImmediate imm
)
2765 return (imm
.m_value
.imm3
<< 12) | (rd
<< 8) | imm
.m_value
.imm8
;
2768 static void decodeTwoWordOp5i6Imm4Reg4EncodedImmSecond(ARMThumbImmediate
& result
, uint16_t value
)
2770 result
.m_value
.imm3
= (value
>> 12) & 7;
2771 result
.m_value
.imm8
= value
& 255;
2774 class ARMInstructionFormatter
{
2776 ALWAYS_INLINE
void oneWordOp5Reg3Imm8(OpcodeID op
, RegisterID rd
, uint8_t imm
)
2778 m_buffer
.putShort(op
| (rd
<< 8) | imm
);
2781 ALWAYS_INLINE
void oneWordOp5Imm5Reg3Reg3(OpcodeID op
, uint8_t imm
, RegisterID reg1
, RegisterID reg2
)
2783 m_buffer
.putShort(op
| (imm
<< 6) | (reg1
<< 3) | reg2
);
2786 ALWAYS_INLINE
void oneWordOp7Reg3Reg3Reg3(OpcodeID op
, RegisterID reg1
, RegisterID reg2
, RegisterID reg3
)
2788 m_buffer
.putShort(op
| (reg1
<< 6) | (reg2
<< 3) | reg3
);
2791 ALWAYS_INLINE
void oneWordOp7Imm9(OpcodeID op
, uint16_t imm
)
2793 m_buffer
.putShort(op
| imm
);
2796 ALWAYS_INLINE
void oneWordOp8Imm8(OpcodeID op
, uint8_t imm
)
2798 m_buffer
.putShort(op
| imm
);
2801 ALWAYS_INLINE
void oneWordOp8RegReg143(OpcodeID op
, RegisterID reg1
, RegisterID reg2
)
2803 m_buffer
.putShort(op
| ((reg2
& 8) << 4) | (reg1
<< 3) | (reg2
& 7));
2806 ALWAYS_INLINE
void oneWordOp9Imm7(OpcodeID op
, uint8_t imm
)
2808 m_buffer
.putShort(op
| imm
);
2811 ALWAYS_INLINE
void oneWordOp10Reg3Reg3(OpcodeID op
, RegisterID reg1
, RegisterID reg2
)
2813 m_buffer
.putShort(op
| (reg1
<< 3) | reg2
);
2816 ALWAYS_INLINE
void twoWordOp12Reg4FourFours(OpcodeID1 op
, RegisterID reg
, FourFours ff
)
2818 m_buffer
.putShort(op
| reg
);
2819 m_buffer
.putShort(ff
.m_u
.value
);
2822 ALWAYS_INLINE
void twoWordOp16FourFours(OpcodeID1 op
, FourFours ff
)
2824 m_buffer
.putShort(op
);
2825 m_buffer
.putShort(ff
.m_u
.value
);
2828 ALWAYS_INLINE
void twoWordOp16Op16(OpcodeID1 op1
, OpcodeID2 op2
)
2830 m_buffer
.putShort(op1
);
2831 m_buffer
.putShort(op2
);
2834 ALWAYS_INLINE
void twoWordOp16Imm16(OpcodeID1 op1
, uint16_t imm
)
2836 m_buffer
.putShort(op1
);
2837 m_buffer
.putShort(imm
);
2840 ALWAYS_INLINE
void twoWordOp5i6Imm4Reg4EncodedImm(OpcodeID1 op
, int imm4
, RegisterID rd
, ARMThumbImmediate imm
)
2842 ARMThumbImmediate newImm
= imm
;
2843 newImm
.m_value
.imm4
= imm4
;
2845 m_buffer
.putShort(ARMv7Assembler::twoWordOp5i6Imm4Reg4EncodedImmFirst(op
, newImm
));
2846 m_buffer
.putShort(ARMv7Assembler::twoWordOp5i6Imm4Reg4EncodedImmSecond(rd
, newImm
));
2849 ALWAYS_INLINE
void twoWordOp12Reg4Reg4Imm12(OpcodeID1 op
, RegisterID reg1
, RegisterID reg2
, uint16_t imm
)
2851 m_buffer
.putShort(op
| reg1
);
2852 m_buffer
.putShort((reg2
<< 12) | imm
);
2855 ALWAYS_INLINE
void twoWordOp12Reg40Imm3Reg4Imm20Imm5(OpcodeID1 op
, RegisterID reg1
, RegisterID reg2
, uint16_t imm1
, uint16_t imm2
, uint16_t imm3
)
2857 m_buffer
.putShort(op
| reg1
);
2858 m_buffer
.putShort((imm1
<< 12) | (reg2
<< 8) | (imm2
<< 6) | imm3
);
2861 // Formats up instructions of the pattern:
2862 // 111111111B11aaaa:bbbb222SA2C2cccc
2863 // Where 1s in the pattern come from op1, 2s in the pattern come from op2, S is the provided size bit.
2864 // Operands provide 5 bit values of the form Aaaaa, Bbbbb, Ccccc.
2865 ALWAYS_INLINE
void vfpOp(OpcodeID1 op1
, OpcodeID2 op2
, bool size
, VFPOperand a
, VFPOperand b
, VFPOperand c
)
2867 ASSERT(!(op1
& 0x004f));
2868 ASSERT(!(op2
& 0xf1af));
2869 m_buffer
.putShort(op1
| b
.bits1() << 6 | a
.bits4());
2870 m_buffer
.putShort(op2
| b
.bits4() << 12 | size
<< 8 | a
.bits1() << 7 | c
.bits1() << 5 | c
.bits4());
2873 // Arm vfp addresses can be offset by a 9-bit ones-comp immediate, left shifted by 2.
2874 // (i.e. +/-(0..255) 32-bit words)
2875 ALWAYS_INLINE
void vfpMemOp(OpcodeID1 op1
, OpcodeID2 op2
, bool size
, RegisterID rn
, VFPOperand rd
, int32_t imm
)
2883 uint32_t offset
= imm
;
2884 ASSERT(!(offset
& ~0x3fc));
2887 m_buffer
.putShort(op1
| (up
<< 7) | rd
.bits1() << 6 | rn
);
2888 m_buffer
.putShort(op2
| rd
.bits4() << 12 | size
<< 8 | offset
);
2891 // Administrative methods:
2893 size_t codeSize() const { return m_buffer
.codeSize(); }
2894 AssemblerLabel
label() const { return m_buffer
.label(); }
2895 bool isAligned(int alignment
) const { return m_buffer
.isAligned(alignment
); }
2896 void* data() const { return m_buffer
.data(); }
2898 unsigned debugOffset() { return m_buffer
.debugOffset(); }
2900 AssemblerBuffer m_buffer
;
2903 Vector
<LinkRecord
, 0, UnsafeVectorOverflow
> m_jumpsToLink
;
2904 int m_indexOfLastWatchpoint
;
2905 int m_indexOfTailOfLastWatchpoint
;
2910 #endif // ENABLE(ASSEMBLER) && CPU(ARM_THUMB2)
2912 #endif // ARMAssembler_h