2 * Copyright (C) 2009, 2010, 2012, 2013, 2014 Apple Inc. All rights reserved.
3 * Copyright (C) 2010 University of Szeged
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
15 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
18 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
19 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
20 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
21 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
22 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
24 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #ifndef ARMAssembler_h
28 #define ARMAssembler_h
30 #if ENABLE(ASSEMBLER) && CPU(ARM_THUMB2)
32 #include "AssemblerBuffer.h"
34 #include <wtf/Assertions.h>
35 #include <wtf/Vector.h>
40 namespace ARMRegisters
{
49 r7
, fp
= r7
, // frame pointer
51 r9
, sb
= r9
, // static base
52 r10
, sl
= r10
, // stack limit
128 } FPDoubleRegisterID
;
165 inline FPSingleRegisterID
asSingle(FPDoubleRegisterID reg
)
168 return (FPSingleRegisterID
)(reg
<< 1);
171 inline FPDoubleRegisterID
asDouble(FPSingleRegisterID reg
)
174 return (FPDoubleRegisterID
)(reg
>> 1);
178 #define FOR_EACH_CPU_REGISTER(V) \
179 FOR_EACH_CPU_GPREGISTER(V) \
180 FOR_EACH_CPU_SPECIAL_REGISTER(V) \
181 FOR_EACH_CPU_FPREGISTER(V)
183 #define FOR_EACH_CPU_GPREGISTER(V) \
201 #define FOR_EACH_CPU_SPECIAL_REGISTER(V) \
205 #define FOR_EACH_CPU_FPREGISTER(V) \
222 FOR_EACH_CPU_FPREGISTER_EXTENSION(V)
224 #if CPU(APPLE_ARMV7S)
225 #define FOR_EACH_CPU_FPREGISTER_EXTENSION(V) \
243 #define FOR_EACH_CPU_FPREGISTER_EXTENSION(V) // Nothing to add.
244 #endif // CPU(APPLE_ARMV7S)
246 #endif // USE(MASM_PROBE)
249 class ARMv7Assembler
;
250 class ARMThumbImmediate
{
251 friend class ARMv7Assembler
;
253 typedef uint8_t ThumbImmediateType
;
254 static const ThumbImmediateType TypeInvalid
= 0;
255 static const ThumbImmediateType TypeEncoded
= 1;
256 static const ThumbImmediateType TypeUInt16
= 2;
266 // If this is an encoded immediate, then it may describe a shift, or a pattern.
268 unsigned shiftValue7
: 7;
269 unsigned shiftAmount
: 5;
272 unsigned immediate
: 8;
273 unsigned pattern
: 4;
275 } ThumbImmediateValue
;
277 // byte0 contains least significant bit; not using an array to make client code endian agnostic.
288 ALWAYS_INLINE
static void countLeadingZerosPartial(uint32_t& value
, int32_t& zeros
, const int N
)
290 if (value
& ~((1 << N
) - 1)) /* check for any of the top N bits (of 2N bits) are set */
291 value
>>= N
; /* if any were set, lose the bottom N */
292 else /* if none of the top N bits are set, */
293 zeros
+= N
; /* then we have identified N leading zeros */
296 static int32_t countLeadingZeros(uint32_t value
)
302 countLeadingZerosPartial(value
, zeros
, 16);
303 countLeadingZerosPartial(value
, zeros
, 8);
304 countLeadingZerosPartial(value
, zeros
, 4);
305 countLeadingZerosPartial(value
, zeros
, 2);
306 countLeadingZerosPartial(value
, zeros
, 1);
311 : m_type(TypeInvalid
)
316 ARMThumbImmediate(ThumbImmediateType type
, ThumbImmediateValue value
)
322 ARMThumbImmediate(ThumbImmediateType type
, uint16_t value
)
325 // Make sure this constructor is only reached with type TypeUInt16;
326 // this extra parameter makes the code a little clearer by making it
327 // explicit at call sites which type is being constructed
328 ASSERT_UNUSED(type
, type
== TypeUInt16
);
330 m_value
.asInt
= value
;
334 static ARMThumbImmediate
makeEncodedImm(uint32_t value
)
336 ThumbImmediateValue encoding
;
339 // okay, these are easy.
341 encoding
.immediate
= value
;
342 encoding
.pattern
= 0;
343 return ARMThumbImmediate(TypeEncoded
, encoding
);
346 int32_t leadingZeros
= countLeadingZeros(value
);
347 // if there were 24 or more leading zeros, then we'd have hit the (value < 256) case.
348 ASSERT(leadingZeros
< 24);
350 // Given a number with bit fields Z:B:C, where count(Z)+count(B)+count(C) == 32,
351 // Z are the bits known zero, B is the 8-bit immediate, C are the bits to check for
352 // zero. count(B) == 8, so the count of bits to be checked is 24 - count(Z).
353 int32_t rightShiftAmount
= 24 - leadingZeros
;
354 if (value
== ((value
>> rightShiftAmount
) << rightShiftAmount
)) {
355 // Shift the value down to the low byte position. The assign to
356 // shiftValue7 drops the implicit top bit.
357 encoding
.shiftValue7
= value
>> rightShiftAmount
;
358 // The endoded shift amount is the magnitude of a right rotate.
359 encoding
.shiftAmount
= 8 + leadingZeros
;
360 return ARMThumbImmediate(TypeEncoded
, encoding
);
366 if ((bytes
.byte0
== bytes
.byte1
) && (bytes
.byte0
== bytes
.byte2
) && (bytes
.byte0
== bytes
.byte3
)) {
367 encoding
.immediate
= bytes
.byte0
;
368 encoding
.pattern
= 3;
369 return ARMThumbImmediate(TypeEncoded
, encoding
);
372 if ((bytes
.byte0
== bytes
.byte2
) && !(bytes
.byte1
| bytes
.byte3
)) {
373 encoding
.immediate
= bytes
.byte0
;
374 encoding
.pattern
= 1;
375 return ARMThumbImmediate(TypeEncoded
, encoding
);
378 if ((bytes
.byte1
== bytes
.byte3
) && !(bytes
.byte0
| bytes
.byte2
)) {
379 encoding
.immediate
= bytes
.byte1
;
380 encoding
.pattern
= 2;
381 return ARMThumbImmediate(TypeEncoded
, encoding
);
384 return ARMThumbImmediate();
387 static ARMThumbImmediate
makeUInt12(int32_t value
)
389 return (!(value
& 0xfffff000))
390 ? ARMThumbImmediate(TypeUInt16
, (uint16_t)value
)
391 : ARMThumbImmediate();
394 static ARMThumbImmediate
makeUInt12OrEncodedImm(int32_t value
)
396 // If this is not a 12-bit unsigned it, try making an encoded immediate.
397 return (!(value
& 0xfffff000))
398 ? ARMThumbImmediate(TypeUInt16
, (uint16_t)value
)
399 : makeEncodedImm(value
);
402 // The 'make' methods, above, return a !isValid() value if the argument
403 // cannot be represented as the requested type. This methods is called
404 // 'get' since the argument can always be represented.
405 static ARMThumbImmediate
makeUInt16(uint16_t value
)
407 return ARMThumbImmediate(TypeUInt16
, value
);
412 return m_type
!= TypeInvalid
;
415 uint16_t asUInt16() const { return m_value
.asInt
; }
417 // These methods rely on the format of encoded byte values.
418 bool isUInt3() { return !(m_value
.asInt
& 0xfff8); }
419 bool isUInt4() { return !(m_value
.asInt
& 0xfff0); }
420 bool isUInt5() { return !(m_value
.asInt
& 0xffe0); }
421 bool isUInt6() { return !(m_value
.asInt
& 0xffc0); }
422 bool isUInt7() { return !(m_value
.asInt
& 0xff80); }
423 bool isUInt8() { return !(m_value
.asInt
& 0xff00); }
424 bool isUInt9() { return (m_type
== TypeUInt16
) && !(m_value
.asInt
& 0xfe00); }
425 bool isUInt10() { return (m_type
== TypeUInt16
) && !(m_value
.asInt
& 0xfc00); }
426 bool isUInt12() { return (m_type
== TypeUInt16
) && !(m_value
.asInt
& 0xf000); }
427 bool isUInt16() { return m_type
== TypeUInt16
; }
428 uint8_t getUInt3() { ASSERT(isUInt3()); return m_value
.asInt
; }
429 uint8_t getUInt4() { ASSERT(isUInt4()); return m_value
.asInt
; }
430 uint8_t getUInt5() { ASSERT(isUInt5()); return m_value
.asInt
; }
431 uint8_t getUInt6() { ASSERT(isUInt6()); return m_value
.asInt
; }
432 uint8_t getUInt7() { ASSERT(isUInt7()); return m_value
.asInt
; }
433 uint8_t getUInt8() { ASSERT(isUInt8()); return m_value
.asInt
; }
434 uint16_t getUInt9() { ASSERT(isUInt9()); return m_value
.asInt
; }
435 uint16_t getUInt10() { ASSERT(isUInt10()); return m_value
.asInt
; }
436 uint16_t getUInt12() { ASSERT(isUInt12()); return m_value
.asInt
; }
437 uint16_t getUInt16() { ASSERT(isUInt16()); return m_value
.asInt
; }
439 bool isEncodedImm() { return m_type
== TypeEncoded
; }
442 ThumbImmediateType m_type
;
443 ThumbImmediateValue m_value
;
452 SRType_RRX
= SRType_ROR
455 class ShiftTypeAndAmount
{
456 friend class ARMv7Assembler
;
461 m_u
.type
= (ARMShiftType
)0;
465 ShiftTypeAndAmount(ARMShiftType type
, unsigned amount
)
468 m_u
.amount
= amount
& 31;
471 unsigned lo4() { return m_u
.lo4
; }
472 unsigned hi4() { return m_u
.hi4
; }
487 class ARMv7Assembler
{
489 typedef ARMRegisters::RegisterID RegisterID
;
490 typedef ARMRegisters::FPSingleRegisterID FPSingleRegisterID
;
491 typedef ARMRegisters::FPDoubleRegisterID FPDoubleRegisterID
;
492 typedef ARMRegisters::FPQuadRegisterID FPQuadRegisterID
;
493 typedef FPDoubleRegisterID FPRegisterID
;
495 static RegisterID
firstRegister() { return ARMRegisters::r0
; }
496 static RegisterID
lastRegister() { return ARMRegisters::r13
; }
498 static FPRegisterID
firstFPRegister() { return ARMRegisters::d0
; }
499 static FPRegisterID
lastFPRegister() { return ARMRegisters::d31
; }
501 // (HS, LO, HI, LS) -> (AE, B, A, BE)
502 // (VS, VC) -> (O, NO)
504 ConditionEQ
, // Zero / Equal.
505 ConditionNE
, // Non-zero / Not equal.
506 ConditionHS
, ConditionCS
= ConditionHS
, // Unsigned higher or same.
507 ConditionLO
, ConditionCC
= ConditionLO
, // Unsigned lower.
508 ConditionMI
, // Negative.
509 ConditionPL
, // Positive or zero.
510 ConditionVS
, // Overflowed.
511 ConditionVC
, // Not overflowed.
512 ConditionHI
, // Unsigned higher.
513 ConditionLS
, // Unsigned lower or same.
514 ConditionGE
, // Signed greater than or equal.
515 ConditionLT
, // Signed less than.
516 ConditionGT
, // Signed greater than.
517 ConditionLE
, // Signed less than or equal.
518 ConditionAL
, // Unconditional / Always execute.
522 #define JUMP_ENUM_WITH_SIZE(index, value) (((value) << 3) | (index))
523 #define JUMP_ENUM_SIZE(jump) ((jump) >> 3)
524 enum JumpType
{ JumpFixed
= JUMP_ENUM_WITH_SIZE(0, 0),
525 JumpNoCondition
= JUMP_ENUM_WITH_SIZE(1, 5 * sizeof(uint16_t)),
526 JumpCondition
= JUMP_ENUM_WITH_SIZE(2, 6 * sizeof(uint16_t)),
527 JumpNoConditionFixedSize
= JUMP_ENUM_WITH_SIZE(3, 5 * sizeof(uint16_t)),
528 JumpConditionFixedSize
= JUMP_ENUM_WITH_SIZE(4, 6 * sizeof(uint16_t))
531 LinkInvalid
= JUMP_ENUM_WITH_SIZE(0, 0),
532 LinkJumpT1
= JUMP_ENUM_WITH_SIZE(1, sizeof(uint16_t)),
533 LinkJumpT2
= JUMP_ENUM_WITH_SIZE(2, sizeof(uint16_t)),
534 LinkJumpT3
= JUMP_ENUM_WITH_SIZE(3, 2 * sizeof(uint16_t)),
535 LinkJumpT4
= JUMP_ENUM_WITH_SIZE(4, 2 * sizeof(uint16_t)),
536 LinkConditionalJumpT4
= JUMP_ENUM_WITH_SIZE(5, 3 * sizeof(uint16_t)),
537 LinkBX
= JUMP_ENUM_WITH_SIZE(6, 5 * sizeof(uint16_t)),
538 LinkConditionalBX
= JUMP_ENUM_WITH_SIZE(7, 6 * sizeof(uint16_t))
543 LinkRecord(intptr_t from
, intptr_t to
, JumpType type
, Condition condition
)
545 data
.realTypes
.m_from
= from
;
546 data
.realTypes
.m_to
= to
;
547 data
.realTypes
.m_type
= type
;
548 data
.realTypes
.m_linkType
= LinkInvalid
;
549 data
.realTypes
.m_condition
= condition
;
551 void operator=(const LinkRecord
& other
)
553 data
.copyTypes
.content
[0] = other
.data
.copyTypes
.content
[0];
554 data
.copyTypes
.content
[1] = other
.data
.copyTypes
.content
[1];
555 data
.copyTypes
.content
[2] = other
.data
.copyTypes
.content
[2];
557 intptr_t from() const { return data
.realTypes
.m_from
; }
558 void setFrom(intptr_t from
) { data
.realTypes
.m_from
= from
; }
559 intptr_t to() const { return data
.realTypes
.m_to
; }
560 JumpType
type() const { return data
.realTypes
.m_type
; }
561 JumpLinkType
linkType() const { return data
.realTypes
.m_linkType
; }
562 void setLinkType(JumpLinkType linkType
) { ASSERT(data
.realTypes
.m_linkType
== LinkInvalid
); data
.realTypes
.m_linkType
= linkType
; }
563 Condition
condition() const { return data
.realTypes
.m_condition
; }
567 intptr_t m_from
: 31;
570 JumpLinkType m_linkType
: 8;
571 Condition m_condition
: 16;
576 COMPILE_ASSERT(sizeof(RealTypes
) == sizeof(CopyTypes
), LinkRecordCopyStructSizeEqualsRealStruct
);
581 : m_indexOfLastWatchpoint(INT_MIN
)
582 , m_indexOfTailOfLastWatchpoint(INT_MIN
)
586 AssemblerBuffer
& buffer() { return m_formatter
.m_buffer
; }
591 static bool BadReg(RegisterID reg
)
593 return (reg
== ARMRegisters::sp
) || (reg
== ARMRegisters::pc
);
596 uint32_t singleRegisterMask(FPSingleRegisterID rdNum
, int highBitsShift
, int lowBitShift
)
598 uint32_t rdMask
= (rdNum
>> 1) << highBitsShift
;
600 rdMask
|= 1 << lowBitShift
;
604 uint32_t doubleRegisterMask(FPDoubleRegisterID rdNum
, int highBitShift
, int lowBitsShift
)
606 uint32_t rdMask
= (rdNum
& 0xf) << lowBitsShift
;
608 rdMask
|= 1 << highBitShift
;
613 OP_ADD_reg_T1
= 0x1800,
614 OP_SUB_reg_T1
= 0x1A00,
615 OP_ADD_imm_T1
= 0x1C00,
616 OP_SUB_imm_T1
= 0x1E00,
617 OP_MOV_imm_T1
= 0x2000,
618 OP_CMP_imm_T1
= 0x2800,
619 OP_ADD_imm_T2
= 0x3000,
620 OP_SUB_imm_T2
= 0x3800,
621 OP_AND_reg_T1
= 0x4000,
622 OP_EOR_reg_T1
= 0x4040,
623 OP_TST_reg_T1
= 0x4200,
624 OP_RSB_imm_T1
= 0x4240,
625 OP_CMP_reg_T1
= 0x4280,
626 OP_ORR_reg_T1
= 0x4300,
627 OP_MVN_reg_T1
= 0x43C0,
628 OP_ADD_reg_T2
= 0x4400,
629 OP_MOV_reg_T1
= 0x4600,
632 OP_STR_reg_T1
= 0x5000,
633 OP_STRH_reg_T1
= 0x5200,
634 OP_STRB_reg_T1
= 0x5400,
635 OP_LDRSB_reg_T1
= 0x5600,
636 OP_LDR_reg_T1
= 0x5800,
637 OP_LDRH_reg_T1
= 0x5A00,
638 OP_LDRB_reg_T1
= 0x5C00,
639 OP_LDRSH_reg_T1
= 0x5E00,
640 OP_STR_imm_T1
= 0x6000,
641 OP_LDR_imm_T1
= 0x6800,
642 OP_STRB_imm_T1
= 0x7000,
643 OP_LDRB_imm_T1
= 0x7800,
644 OP_STRH_imm_T1
= 0x8000,
645 OP_LDRH_imm_T1
= 0x8800,
646 OP_STR_imm_T2
= 0x9000,
647 OP_LDR_imm_T2
= 0x9800,
648 OP_ADD_SP_imm_T1
= 0xA800,
649 OP_ADD_SP_imm_T2
= 0xB000,
650 OP_SUB_SP_imm_T1
= 0xB080,
663 OP_AND_reg_T2
= 0xEA00,
664 OP_TST_reg_T2
= 0xEA10,
665 OP_ORR_reg_T2
= 0xEA40,
666 OP_ORR_S_reg_T2
= 0xEA50,
667 OP_ASR_imm_T1
= 0xEA4F,
668 OP_LSL_imm_T1
= 0xEA4F,
669 OP_LSR_imm_T1
= 0xEA4F,
670 OP_ROR_imm_T1
= 0xEA4F,
671 OP_MVN_reg_T2
= 0xEA6F,
672 OP_EOR_reg_T2
= 0xEA80,
673 OP_ADD_reg_T3
= 0xEB00,
674 OP_ADD_S_reg_T3
= 0xEB10,
675 OP_SUB_reg_T2
= 0xEBA0,
676 OP_SUB_S_reg_T2
= 0xEBB0,
677 OP_CMP_reg_T2
= 0xEBB0,
678 OP_VMOV_CtoD
= 0xEC00,
679 OP_VMOV_DtoC
= 0xEC10,
684 OP_VMOV_CtoS
= 0xEE00,
685 OP_VMOV_StoC
= 0xEE10,
692 OP_VCVT_FPIVFP
= 0xEEB0,
694 OP_VMOV_IMM_T2
= 0xEEB0,
697 OP_VSQRT_T1
= 0xEEB0,
698 OP_VCVTSD_T1
= 0xEEB0,
699 OP_VCVTDS_T1
= 0xEEB0,
702 OP_AND_imm_T1
= 0xF000,
704 OP_ORR_imm_T1
= 0xF040,
705 OP_MOV_imm_T2
= 0xF040,
707 OP_EOR_imm_T1
= 0xF080,
708 OP_ADD_imm_T3
= 0xF100,
709 OP_ADD_S_imm_T3
= 0xF110,
712 OP_SUB_imm_T3
= 0xF1A0,
713 OP_SUB_S_imm_T3
= 0xF1B0,
714 OP_CMP_imm_T2
= 0xF1B0,
715 OP_RSB_imm_T2
= 0xF1C0,
716 OP_RSB_S_imm_T2
= 0xF1D0,
717 OP_ADD_imm_T4
= 0xF200,
718 OP_MOV_imm_T3
= 0xF240,
719 OP_SUB_imm_T4
= 0xF2A0,
723 OP_DMB_SY_T2a
= 0xF3BF,
724 OP_STRB_imm_T3
= 0xF800,
725 OP_STRB_reg_T2
= 0xF800,
726 OP_LDRB_imm_T3
= 0xF810,
727 OP_LDRB_reg_T2
= 0xF810,
728 OP_STRH_imm_T3
= 0xF820,
729 OP_STRH_reg_T2
= 0xF820,
730 OP_LDRH_reg_T2
= 0xF830,
731 OP_LDRH_imm_T3
= 0xF830,
732 OP_STR_imm_T4
= 0xF840,
733 OP_STR_reg_T2
= 0xF840,
734 OP_LDR_imm_T4
= 0xF850,
735 OP_LDR_reg_T2
= 0xF850,
736 OP_STRB_imm_T2
= 0xF880,
737 OP_LDRB_imm_T2
= 0xF890,
738 OP_STRH_imm_T2
= 0xF8A0,
739 OP_LDRH_imm_T2
= 0xF8B0,
740 OP_STR_imm_T3
= 0xF8C0,
741 OP_LDR_imm_T3
= 0xF8D0,
742 OP_LDRSB_reg_T2
= 0xF910,
743 OP_LDRSH_reg_T2
= 0xF930,
744 OP_LSL_reg_T2
= 0xFA00,
745 OP_LSR_reg_T2
= 0xFA20,
746 OP_ASR_reg_T2
= 0xFA40,
747 OP_ROR_reg_T2
= 0xFA60,
749 OP_SMULL_T1
= 0xFB80,
750 #if CPU(APPLE_ARMV7S)
757 OP_VADD_T2b
= 0x0A00,
761 OP_VMOV_IMM_T2b
= 0x0A00,
762 OP_VMOV_T2b
= 0x0A40,
763 OP_VMUL_T2b
= 0x0A00,
766 OP_VMOV_StoCb
= 0x0A10,
767 OP_VMOV_CtoSb
= 0x0A10,
768 OP_VMOV_DtoCb
= 0x0A10,
769 OP_VMOV_CtoDb
= 0x0A10,
771 OP_VABS_T2b
= 0x0A40,
773 OP_VCVT_FPIVFPb
= 0x0A40,
774 OP_VNEG_T2b
= 0x0A40,
775 OP_VSUB_T2b
= 0x0A40,
776 OP_VSQRT_T1b
= 0x0A40,
777 OP_VCVTSD_T1b
= 0x0A40,
778 OP_VCVTDS_T1b
= 0x0A40,
780 OP_DMB_SY_T2b
= 0x8F5F,
786 FourFours(unsigned f3
, unsigned f2
, unsigned f1
, unsigned f0
)
805 class ARMInstructionFormatter
;
808 static bool ifThenElseConditionBit(Condition condition
, bool isIf
)
810 return isIf
? (condition
& 1) : !(condition
& 1);
812 static uint8_t ifThenElse(Condition condition
, bool inst2if
, bool inst3if
, bool inst4if
)
814 int mask
= (ifThenElseConditionBit(condition
, inst2if
) << 3)
815 | (ifThenElseConditionBit(condition
, inst3if
) << 2)
816 | (ifThenElseConditionBit(condition
, inst4if
) << 1)
818 ASSERT((condition
!= ConditionAL
) || !(mask
& (mask
- 1)));
819 return (condition
<< 4) | mask
;
821 static uint8_t ifThenElse(Condition condition
, bool inst2if
, bool inst3if
)
823 int mask
= (ifThenElseConditionBit(condition
, inst2if
) << 3)
824 | (ifThenElseConditionBit(condition
, inst3if
) << 2)
826 ASSERT((condition
!= ConditionAL
) || !(mask
& (mask
- 1)));
827 return (condition
<< 4) | mask
;
829 static uint8_t ifThenElse(Condition condition
, bool inst2if
)
831 int mask
= (ifThenElseConditionBit(condition
, inst2if
) << 3)
833 ASSERT((condition
!= ConditionAL
) || !(mask
& (mask
- 1)));
834 return (condition
<< 4) | mask
;
837 static uint8_t ifThenElse(Condition condition
)
840 return (condition
<< 4) | mask
;
845 void adc(RegisterID rd
, RegisterID rn
, ARMThumbImmediate imm
)
847 // Rd can only be SP if Rn is also SP.
848 ASSERT((rd
!= ARMRegisters::sp
) || (rn
== ARMRegisters::sp
));
849 ASSERT(rd
!= ARMRegisters::pc
);
850 ASSERT(rn
!= ARMRegisters::pc
);
851 ASSERT(imm
.isEncodedImm());
853 m_formatter
.twoWordOp5i6Imm4Reg4EncodedImm(OP_ADC_imm
, rn
, rd
, imm
);
856 void add(RegisterID rd
, RegisterID rn
, ARMThumbImmediate imm
)
858 // Rd can only be SP if Rn is also SP.
859 ASSERT((rd
!= ARMRegisters::sp
) || (rn
== ARMRegisters::sp
));
860 ASSERT(rd
!= ARMRegisters::pc
);
861 ASSERT(rn
!= ARMRegisters::pc
);
862 ASSERT(imm
.isValid());
864 if (rn
== ARMRegisters::sp
&& imm
.isUInt16()) {
865 ASSERT(!(imm
.getUInt16() & 3));
866 if (!(rd
& 8) && imm
.isUInt10()) {
867 m_formatter
.oneWordOp5Reg3Imm8(OP_ADD_SP_imm_T1
, rd
, static_cast<uint8_t>(imm
.getUInt10() >> 2));
869 } else if ((rd
== ARMRegisters::sp
) && imm
.isUInt9()) {
870 m_formatter
.oneWordOp9Imm7(OP_ADD_SP_imm_T2
, static_cast<uint8_t>(imm
.getUInt9() >> 2));
873 } else if (!((rd
| rn
) & 8)) {
875 m_formatter
.oneWordOp7Reg3Reg3Reg3(OP_ADD_imm_T1
, (RegisterID
)imm
.getUInt3(), rn
, rd
);
877 } else if ((rd
== rn
) && imm
.isUInt8()) {
878 m_formatter
.oneWordOp5Reg3Imm8(OP_ADD_imm_T2
, rd
, imm
.getUInt8());
883 if (imm
.isEncodedImm())
884 m_formatter
.twoWordOp5i6Imm4Reg4EncodedImm(OP_ADD_imm_T3
, rn
, rd
, imm
);
886 ASSERT(imm
.isUInt12());
887 m_formatter
.twoWordOp5i6Imm4Reg4EncodedImm(OP_ADD_imm_T4
, rn
, rd
, imm
);
891 ALWAYS_INLINE
void add(RegisterID rd
, RegisterID rn
, RegisterID rm
, ShiftTypeAndAmount shift
)
893 ASSERT((rd
!= ARMRegisters::sp
) || (rn
== ARMRegisters::sp
));
894 ASSERT(rd
!= ARMRegisters::pc
);
895 ASSERT(rn
!= ARMRegisters::pc
);
897 m_formatter
.twoWordOp12Reg4FourFours(OP_ADD_reg_T3
, rn
, FourFours(shift
.hi4(), rd
, shift
.lo4(), rm
));
900 // NOTE: In an IT block, add doesn't modify the flags register.
901 ALWAYS_INLINE
void add(RegisterID rd
, RegisterID rn
, RegisterID rm
)
903 if (rd
== ARMRegisters::sp
) {
909 m_formatter
.oneWordOp8RegReg143(OP_ADD_reg_T2
, rm
, rd
);
911 m_formatter
.oneWordOp8RegReg143(OP_ADD_reg_T2
, rn
, rd
);
912 else if (!((rd
| rn
| rm
) & 8))
913 m_formatter
.oneWordOp7Reg3Reg3Reg3(OP_ADD_reg_T1
, rm
, rn
, rd
);
915 add(rd
, rn
, rm
, ShiftTypeAndAmount());
918 // Not allowed in an IT (if then) block.
919 ALWAYS_INLINE
void add_S(RegisterID rd
, RegisterID rn
, ARMThumbImmediate imm
)
921 // Rd can only be SP if Rn is also SP.
922 ASSERT((rd
!= ARMRegisters::sp
) || (rn
== ARMRegisters::sp
));
923 ASSERT(rd
!= ARMRegisters::pc
);
924 ASSERT(rn
!= ARMRegisters::pc
);
925 ASSERT(imm
.isEncodedImm());
927 if (!((rd
| rn
) & 8)) {
929 m_formatter
.oneWordOp7Reg3Reg3Reg3(OP_ADD_imm_T1
, (RegisterID
)imm
.getUInt3(), rn
, rd
);
931 } else if ((rd
== rn
) && imm
.isUInt8()) {
932 m_formatter
.oneWordOp5Reg3Imm8(OP_ADD_imm_T2
, rd
, imm
.getUInt8());
937 m_formatter
.twoWordOp5i6Imm4Reg4EncodedImm(OP_ADD_S_imm_T3
, rn
, rd
, imm
);
940 // Not allowed in an IT (if then) block?
941 ALWAYS_INLINE
void add_S(RegisterID rd
, RegisterID rn
, RegisterID rm
, ShiftTypeAndAmount shift
)
943 ASSERT((rd
!= ARMRegisters::sp
) || (rn
== ARMRegisters::sp
));
944 ASSERT(rd
!= ARMRegisters::pc
);
945 ASSERT(rn
!= ARMRegisters::pc
);
947 m_formatter
.twoWordOp12Reg4FourFours(OP_ADD_S_reg_T3
, rn
, FourFours(shift
.hi4(), rd
, shift
.lo4(), rm
));
950 // Not allowed in an IT (if then) block.
951 ALWAYS_INLINE
void add_S(RegisterID rd
, RegisterID rn
, RegisterID rm
)
953 if (!((rd
| rn
| rm
) & 8))
954 m_formatter
.oneWordOp7Reg3Reg3Reg3(OP_ADD_reg_T1
, rm
, rn
, rd
);
956 add_S(rd
, rn
, rm
, ShiftTypeAndAmount());
959 ALWAYS_INLINE
void ARM_and(RegisterID rd
, RegisterID rn
, ARMThumbImmediate imm
)
963 ASSERT(imm
.isEncodedImm());
964 m_formatter
.twoWordOp5i6Imm4Reg4EncodedImm(OP_AND_imm_T1
, rn
, rd
, imm
);
967 ALWAYS_INLINE
void ARM_and(RegisterID rd
, RegisterID rn
, RegisterID rm
, ShiftTypeAndAmount shift
)
972 m_formatter
.twoWordOp12Reg4FourFours(OP_AND_reg_T2
, rn
, FourFours(shift
.hi4(), rd
, shift
.lo4(), rm
));
975 ALWAYS_INLINE
void ARM_and(RegisterID rd
, RegisterID rn
, RegisterID rm
)
977 if ((rd
== rn
) && !((rd
| rm
) & 8))
978 m_formatter
.oneWordOp10Reg3Reg3(OP_AND_reg_T1
, rm
, rd
);
979 else if ((rd
== rm
) && !((rd
| rn
) & 8))
980 m_formatter
.oneWordOp10Reg3Reg3(OP_AND_reg_T1
, rn
, rd
);
982 ARM_and(rd
, rn
, rm
, ShiftTypeAndAmount());
985 ALWAYS_INLINE
void asr(RegisterID rd
, RegisterID rm
, int32_t shiftAmount
)
989 ShiftTypeAndAmount
shift(SRType_ASR
, shiftAmount
);
990 m_formatter
.twoWordOp16FourFours(OP_ASR_imm_T1
, FourFours(shift
.hi4(), rd
, shift
.lo4(), rm
));
993 ALWAYS_INLINE
void asr(RegisterID rd
, RegisterID rn
, RegisterID rm
)
998 m_formatter
.twoWordOp12Reg4FourFours(OP_ASR_reg_T2
, rn
, FourFours(0xf, rd
, 0, rm
));
1001 // Only allowed in IT (if then) block if last instruction.
1002 ALWAYS_INLINE AssemblerLabel
b()
1004 m_formatter
.twoWordOp16Op16(OP_B_T4a
, OP_B_T4b
);
1005 return m_formatter
.label();
1008 // Only allowed in IT (if then) block if last instruction.
1009 ALWAYS_INLINE AssemblerLabel
blx(RegisterID rm
)
1011 ASSERT(rm
!= ARMRegisters::pc
);
1012 m_formatter
.oneWordOp8RegReg143(OP_BLX
, rm
, (RegisterID
)8);
1013 return m_formatter
.label();
1016 // Only allowed in IT (if then) block if last instruction.
1017 ALWAYS_INLINE AssemblerLabel
bx(RegisterID rm
)
1019 m_formatter
.oneWordOp8RegReg143(OP_BX
, rm
, (RegisterID
)0);
1020 return m_formatter
.label();
1023 void bkpt(uint8_t imm
= 0)
1025 m_formatter
.oneWordOp8Imm8(OP_BKPT
, imm
);
1028 ALWAYS_INLINE
void clz(RegisterID rd
, RegisterID rm
)
1030 ASSERT(!BadReg(rd
));
1031 ASSERT(!BadReg(rm
));
1032 m_formatter
.twoWordOp12Reg4FourFours(OP_CLZ
, rm
, FourFours(0xf, rd
, 8, rm
));
1035 ALWAYS_INLINE
void cmn(RegisterID rn
, ARMThumbImmediate imm
)
1037 ASSERT(rn
!= ARMRegisters::pc
);
1038 ASSERT(imm
.isEncodedImm());
1040 m_formatter
.twoWordOp5i6Imm4Reg4EncodedImm(OP_CMN_imm
, rn
, (RegisterID
)0xf, imm
);
1043 ALWAYS_INLINE
void cmp(RegisterID rn
, ARMThumbImmediate imm
)
1045 ASSERT(rn
!= ARMRegisters::pc
);
1046 ASSERT(imm
.isEncodedImm());
1048 if (!(rn
& 8) && imm
.isUInt8())
1049 m_formatter
.oneWordOp5Reg3Imm8(OP_CMP_imm_T1
, rn
, imm
.getUInt8());
1051 m_formatter
.twoWordOp5i6Imm4Reg4EncodedImm(OP_CMP_imm_T2
, rn
, (RegisterID
)0xf, imm
);
1054 ALWAYS_INLINE
void cmp(RegisterID rn
, RegisterID rm
, ShiftTypeAndAmount shift
)
1056 ASSERT(rn
!= ARMRegisters::pc
);
1057 ASSERT(!BadReg(rm
));
1058 m_formatter
.twoWordOp12Reg4FourFours(OP_CMP_reg_T2
, rn
, FourFours(shift
.hi4(), 0xf, shift
.lo4(), rm
));
1061 ALWAYS_INLINE
void cmp(RegisterID rn
, RegisterID rm
)
1064 cmp(rn
, rm
, ShiftTypeAndAmount());
1066 m_formatter
.oneWordOp10Reg3Reg3(OP_CMP_reg_T1
, rm
, rn
);
1069 // xor is not spelled with an 'e'. :-(
1070 ALWAYS_INLINE
void eor(RegisterID rd
, RegisterID rn
, ARMThumbImmediate imm
)
1072 ASSERT(!BadReg(rd
));
1073 ASSERT(!BadReg(rn
));
1074 ASSERT(imm
.isEncodedImm());
1075 m_formatter
.twoWordOp5i6Imm4Reg4EncodedImm(OP_EOR_imm_T1
, rn
, rd
, imm
);
1078 // xor is not spelled with an 'e'. :-(
1079 ALWAYS_INLINE
void eor(RegisterID rd
, RegisterID rn
, RegisterID rm
, ShiftTypeAndAmount shift
)
1081 ASSERT(!BadReg(rd
));
1082 ASSERT(!BadReg(rn
));
1083 ASSERT(!BadReg(rm
));
1084 m_formatter
.twoWordOp12Reg4FourFours(OP_EOR_reg_T2
, rn
, FourFours(shift
.hi4(), rd
, shift
.lo4(), rm
));
1087 // xor is not spelled with an 'e'. :-(
1088 void eor(RegisterID rd
, RegisterID rn
, RegisterID rm
)
1090 if ((rd
== rn
) && !((rd
| rm
) & 8))
1091 m_formatter
.oneWordOp10Reg3Reg3(OP_EOR_reg_T1
, rm
, rd
);
1092 else if ((rd
== rm
) && !((rd
| rn
) & 8))
1093 m_formatter
.oneWordOp10Reg3Reg3(OP_EOR_reg_T1
, rn
, rd
);
1095 eor(rd
, rn
, rm
, ShiftTypeAndAmount());
1098 ALWAYS_INLINE
void it(Condition cond
)
1100 m_formatter
.oneWordOp8Imm8(OP_IT
, ifThenElse(cond
));
1103 ALWAYS_INLINE
void it(Condition cond
, bool inst2if
)
1105 m_formatter
.oneWordOp8Imm8(OP_IT
, ifThenElse(cond
, inst2if
));
1108 ALWAYS_INLINE
void it(Condition cond
, bool inst2if
, bool inst3if
)
1110 m_formatter
.oneWordOp8Imm8(OP_IT
, ifThenElse(cond
, inst2if
, inst3if
));
1113 ALWAYS_INLINE
void it(Condition cond
, bool inst2if
, bool inst3if
, bool inst4if
)
1115 m_formatter
.oneWordOp8Imm8(OP_IT
, ifThenElse(cond
, inst2if
, inst3if
, inst4if
));
1118 // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
1119 ALWAYS_INLINE
void ldr(RegisterID rt
, RegisterID rn
, ARMThumbImmediate imm
)
1121 ASSERT(rn
!= ARMRegisters::pc
); // LDR (literal)
1122 ASSERT(imm
.isUInt12());
1124 if (!((rt
| rn
) & 8) && imm
.isUInt7())
1125 m_formatter
.oneWordOp5Imm5Reg3Reg3(OP_LDR_imm_T1
, imm
.getUInt7() >> 2, rn
, rt
);
1126 else if ((rn
== ARMRegisters::sp
) && !(rt
& 8) && imm
.isUInt10())
1127 m_formatter
.oneWordOp5Reg3Imm8(OP_LDR_imm_T2
, rt
, static_cast<uint8_t>(imm
.getUInt10() >> 2));
1129 m_formatter
.twoWordOp12Reg4Reg4Imm12(OP_LDR_imm_T3
, rn
, rt
, imm
.getUInt12());
1132 ALWAYS_INLINE
void ldrWide8BitImmediate(RegisterID rt
, RegisterID rn
, uint8_t immediate
)
1134 ASSERT(rn
!= ARMRegisters::pc
);
1135 m_formatter
.twoWordOp12Reg4Reg4Imm12(OP_LDR_imm_T3
, rn
, rt
, immediate
);
1138 ALWAYS_INLINE
void ldrCompact(RegisterID rt
, RegisterID rn
, ARMThumbImmediate imm
)
1140 ASSERT(rn
!= ARMRegisters::pc
); // LDR (literal)
1141 ASSERT(imm
.isUInt7());
1142 ASSERT(!((rt
| rn
) & 8));
1143 m_formatter
.oneWordOp5Imm5Reg3Reg3(OP_LDR_imm_T1
, imm
.getUInt7() >> 2, rn
, rt
);
1146 // If index is set, this is a regular offset or a pre-indexed load;
1147 // if index is not set then is is a post-index load.
1149 // If wback is set rn is updated - this is a pre or post index load,
1150 // if wback is not set this is a regular offset memory access.
1152 // (-255 <= offset <= 255)
1154 // _tmp = _reg + offset
1155 // MEM[index ? _tmp : _reg] = REG[rt]
1156 // if (wback) REG[rn] = _tmp
1157 ALWAYS_INLINE
void ldr(RegisterID rt
, RegisterID rn
, int offset
, bool index
, bool wback
)
1159 ASSERT(rt
!= ARMRegisters::pc
);
1160 ASSERT(rn
!= ARMRegisters::pc
);
1161 ASSERT(index
|| wback
);
1162 ASSERT(!wback
| (rt
!= rn
));
1169 ASSERT((offset
& ~0xff) == 0);
1171 offset
|= (wback
<< 8);
1172 offset
|= (add
<< 9);
1173 offset
|= (index
<< 10);
1174 offset
|= (1 << 11);
1176 m_formatter
.twoWordOp12Reg4Reg4Imm12(OP_LDR_imm_T4
, rn
, rt
, offset
);
1179 // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
1180 ALWAYS_INLINE
void ldr(RegisterID rt
, RegisterID rn
, RegisterID rm
, unsigned shift
= 0)
1182 ASSERT(rn
!= ARMRegisters::pc
); // LDR (literal)
1183 ASSERT(!BadReg(rm
));
1186 if (!shift
&& !((rt
| rn
| rm
) & 8))
1187 m_formatter
.oneWordOp7Reg3Reg3Reg3(OP_LDR_reg_T1
, rm
, rn
, rt
);
1189 m_formatter
.twoWordOp12Reg4FourFours(OP_LDR_reg_T2
, rn
, FourFours(rt
, 0, shift
, rm
));
1192 // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
1193 ALWAYS_INLINE
void ldrh(RegisterID rt
, RegisterID rn
, ARMThumbImmediate imm
)
1195 ASSERT(rn
!= ARMRegisters::pc
); // LDR (literal)
1196 ASSERT(imm
.isUInt12());
1197 ASSERT(!(imm
.getUInt12() & 1));
1199 if (!((rt
| rn
) & 8) && imm
.isUInt6())
1200 m_formatter
.oneWordOp5Imm5Reg3Reg3(OP_LDRH_imm_T1
, imm
.getUInt6() >> 1, rn
, rt
);
1202 m_formatter
.twoWordOp12Reg4Reg4Imm12(OP_LDRH_imm_T2
, rn
, rt
, imm
.getUInt12());
1205 // If index is set, this is a regular offset or a pre-indexed load;
1206 // if index is not set then is is a post-index load.
1208 // If wback is set rn is updated - this is a pre or post index load,
1209 // if wback is not set this is a regular offset memory access.
1211 // (-255 <= offset <= 255)
1213 // _tmp = _reg + offset
1214 // MEM[index ? _tmp : _reg] = REG[rt]
1215 // if (wback) REG[rn] = _tmp
1216 ALWAYS_INLINE
void ldrh(RegisterID rt
, RegisterID rn
, int offset
, bool index
, bool wback
)
1218 ASSERT(rt
!= ARMRegisters::pc
);
1219 ASSERT(rn
!= ARMRegisters::pc
);
1220 ASSERT(index
|| wback
);
1221 ASSERT(!wback
| (rt
!= rn
));
1228 ASSERT((offset
& ~0xff) == 0);
1230 offset
|= (wback
<< 8);
1231 offset
|= (add
<< 9);
1232 offset
|= (index
<< 10);
1233 offset
|= (1 << 11);
1235 m_formatter
.twoWordOp12Reg4Reg4Imm12(OP_LDRH_imm_T3
, rn
, rt
, offset
);
1238 ALWAYS_INLINE
void ldrh(RegisterID rt
, RegisterID rn
, RegisterID rm
, unsigned shift
= 0)
1240 ASSERT(!BadReg(rt
)); // Memory hint
1241 ASSERT(rn
!= ARMRegisters::pc
); // LDRH (literal)
1242 ASSERT(!BadReg(rm
));
1245 if (!shift
&& !((rt
| rn
| rm
) & 8))
1246 m_formatter
.oneWordOp7Reg3Reg3Reg3(OP_LDRH_reg_T1
, rm
, rn
, rt
);
1248 m_formatter
.twoWordOp12Reg4FourFours(OP_LDRH_reg_T2
, rn
, FourFours(rt
, 0, shift
, rm
));
1251 void ldrb(RegisterID rt
, RegisterID rn
, ARMThumbImmediate imm
)
1253 ASSERT(rn
!= ARMRegisters::pc
); // LDR (literal)
1254 ASSERT(imm
.isUInt12());
1256 if (!((rt
| rn
) & 8) && imm
.isUInt5())
1257 m_formatter
.oneWordOp5Imm5Reg3Reg3(OP_LDRB_imm_T1
, imm
.getUInt5(), rn
, rt
);
1259 m_formatter
.twoWordOp12Reg4Reg4Imm12(OP_LDRB_imm_T2
, rn
, rt
, imm
.getUInt12());
1262 void ldrb(RegisterID rt
, RegisterID rn
, int offset
, bool index
, bool wback
)
1264 ASSERT(rt
!= ARMRegisters::pc
);
1265 ASSERT(rn
!= ARMRegisters::pc
);
1266 ASSERT(index
|| wback
);
1267 ASSERT(!wback
| (rt
!= rn
));
1275 ASSERT(!(offset
& ~0xff));
1277 offset
|= (wback
<< 8);
1278 offset
|= (add
<< 9);
1279 offset
|= (index
<< 10);
1280 offset
|= (1 << 11);
1282 m_formatter
.twoWordOp12Reg4Reg4Imm12(OP_LDRB_imm_T3
, rn
, rt
, offset
);
1285 ALWAYS_INLINE
void ldrb(RegisterID rt
, RegisterID rn
, RegisterID rm
, unsigned shift
= 0)
1287 ASSERT(rn
!= ARMRegisters::pc
); // LDR (literal)
1288 ASSERT(!BadReg(rm
));
1291 if (!shift
&& !((rt
| rn
| rm
) & 8))
1292 m_formatter
.oneWordOp7Reg3Reg3Reg3(OP_LDRB_reg_T1
, rm
, rn
, rt
);
1294 m_formatter
.twoWordOp12Reg4FourFours(OP_LDRB_reg_T2
, rn
, FourFours(rt
, 0, shift
, rm
));
1297 void ldrsb(RegisterID rt
, RegisterID rn
, RegisterID rm
, unsigned shift
= 0)
1299 ASSERT(rn
!= ARMRegisters::pc
);
1300 ASSERT(!BadReg(rm
));
1303 if (!shift
&& !((rt
| rn
| rm
) & 8))
1304 m_formatter
.oneWordOp7Reg3Reg3Reg3(OP_LDRSB_reg_T1
, rm
, rn
, rt
);
1306 m_formatter
.twoWordOp12Reg4FourFours(OP_LDRSB_reg_T2
, rn
, FourFours(rt
, 0, shift
, rm
));
1309 void ldrsh(RegisterID rt
, RegisterID rn
, RegisterID rm
, unsigned shift
= 0)
1311 ASSERT(rn
!= ARMRegisters::pc
);
1312 ASSERT(!BadReg(rm
));
1315 if (!shift
&& !((rt
| rn
| rm
) & 8))
1316 m_formatter
.oneWordOp7Reg3Reg3Reg3(OP_LDRSH_reg_T1
, rm
, rn
, rt
);
1318 m_formatter
.twoWordOp12Reg4FourFours(OP_LDRSH_reg_T2
, rn
, FourFours(rt
, 0, shift
, rm
));
1321 void lsl(RegisterID rd
, RegisterID rm
, int32_t shiftAmount
)
1323 ASSERT(!BadReg(rd
));
1324 ASSERT(!BadReg(rm
));
1325 ShiftTypeAndAmount
shift(SRType_LSL
, shiftAmount
);
1326 m_formatter
.twoWordOp16FourFours(OP_LSL_imm_T1
, FourFours(shift
.hi4(), rd
, shift
.lo4(), rm
));
1329 ALWAYS_INLINE
void lsl(RegisterID rd
, RegisterID rn
, RegisterID rm
)
1331 ASSERT(!BadReg(rd
));
1332 ASSERT(!BadReg(rn
));
1333 ASSERT(!BadReg(rm
));
1334 m_formatter
.twoWordOp12Reg4FourFours(OP_LSL_reg_T2
, rn
, FourFours(0xf, rd
, 0, rm
));
1337 ALWAYS_INLINE
void lsr(RegisterID rd
, RegisterID rm
, int32_t shiftAmount
)
1339 ASSERT(!BadReg(rd
));
1340 ASSERT(!BadReg(rm
));
1341 ShiftTypeAndAmount
shift(SRType_LSR
, shiftAmount
);
1342 m_formatter
.twoWordOp16FourFours(OP_LSR_imm_T1
, FourFours(shift
.hi4(), rd
, shift
.lo4(), rm
));
1345 ALWAYS_INLINE
void lsr(RegisterID rd
, RegisterID rn
, RegisterID rm
)
1347 ASSERT(!BadReg(rd
));
1348 ASSERT(!BadReg(rn
));
1349 ASSERT(!BadReg(rm
));
1350 m_formatter
.twoWordOp12Reg4FourFours(OP_LSR_reg_T2
, rn
, FourFours(0xf, rd
, 0, rm
));
1353 ALWAYS_INLINE
void movT3(RegisterID rd
, ARMThumbImmediate imm
)
1355 ASSERT(imm
.isValid());
1356 ASSERT(!imm
.isEncodedImm());
1357 ASSERT(!BadReg(rd
));
1359 m_formatter
.twoWordOp5i6Imm4Reg4EncodedImm(OP_MOV_imm_T3
, imm
.m_value
.imm4
, rd
, imm
);
1363 static void revertJumpTo_movT3movtcmpT2(void* instructionStart
, RegisterID left
, RegisterID right
, uintptr_t imm
)
1365 uint16_t* address
= static_cast<uint16_t*>(instructionStart
);
1366 ARMThumbImmediate lo16
= ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(imm
));
1367 ARMThumbImmediate hi16
= ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(imm
>> 16));
1368 address
[0] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOV_imm_T3
, lo16
);
1369 address
[1] = twoWordOp5i6Imm4Reg4EncodedImmSecond(right
, lo16
);
1370 address
[2] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOVT
, hi16
);
1371 address
[3] = twoWordOp5i6Imm4Reg4EncodedImmSecond(right
, hi16
);
1372 address
[4] = OP_CMP_reg_T2
| left
;
1373 cacheFlush(address
, sizeof(uint16_t) * 5);
1376 static void revertJumpTo_movT3(void* instructionStart
, RegisterID rd
, ARMThumbImmediate imm
)
1378 ASSERT(imm
.isValid());
1379 ASSERT(!imm
.isEncodedImm());
1380 ASSERT(!BadReg(rd
));
1382 uint16_t* address
= static_cast<uint16_t*>(instructionStart
);
1383 address
[0] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOV_imm_T3
, imm
);
1384 address
[1] = twoWordOp5i6Imm4Reg4EncodedImmSecond(rd
, imm
);
1385 cacheFlush(address
, sizeof(uint16_t) * 2);
1389 ALWAYS_INLINE
void mov(RegisterID rd
, ARMThumbImmediate imm
)
1391 ASSERT(imm
.isValid());
1392 ASSERT(!BadReg(rd
));
1394 if ((rd
< 8) && imm
.isUInt8())
1395 m_formatter
.oneWordOp5Reg3Imm8(OP_MOV_imm_T1
, rd
, imm
.getUInt8());
1396 else if (imm
.isEncodedImm())
1397 m_formatter
.twoWordOp5i6Imm4Reg4EncodedImm(OP_MOV_imm_T2
, 0xf, rd
, imm
);
1402 ALWAYS_INLINE
void mov(RegisterID rd
, RegisterID rm
)
1404 m_formatter
.oneWordOp8RegReg143(OP_MOV_reg_T1
, rm
, rd
);
1407 ALWAYS_INLINE
void movt(RegisterID rd
, ARMThumbImmediate imm
)
1409 ASSERT(imm
.isUInt16());
1410 ASSERT(!BadReg(rd
));
1411 m_formatter
.twoWordOp5i6Imm4Reg4EncodedImm(OP_MOVT
, imm
.m_value
.imm4
, rd
, imm
);
1414 ALWAYS_INLINE
void mvn(RegisterID rd
, ARMThumbImmediate imm
)
1416 ASSERT(imm
.isEncodedImm());
1417 ASSERT(!BadReg(rd
));
1419 m_formatter
.twoWordOp5i6Imm4Reg4EncodedImm(OP_MVN_imm
, 0xf, rd
, imm
);
1422 ALWAYS_INLINE
void mvn(RegisterID rd
, RegisterID rm
, ShiftTypeAndAmount shift
)
1424 ASSERT(!BadReg(rd
));
1425 ASSERT(!BadReg(rm
));
1426 m_formatter
.twoWordOp16FourFours(OP_MVN_reg_T2
, FourFours(shift
.hi4(), rd
, shift
.lo4(), rm
));
1429 ALWAYS_INLINE
void mvn(RegisterID rd
, RegisterID rm
)
1431 if (!((rd
| rm
) & 8))
1432 m_formatter
.oneWordOp10Reg3Reg3(OP_MVN_reg_T1
, rm
, rd
);
1434 mvn(rd
, rm
, ShiftTypeAndAmount());
1437 ALWAYS_INLINE
void neg(RegisterID rd
, RegisterID rm
)
1439 ARMThumbImmediate zero
= ARMThumbImmediate::makeUInt12(0);
1443 ALWAYS_INLINE
void orr(RegisterID rd
, RegisterID rn
, ARMThumbImmediate imm
)
1445 ASSERT(!BadReg(rd
));
1446 ASSERT(!BadReg(rn
));
1447 ASSERT(imm
.isEncodedImm());
1448 m_formatter
.twoWordOp5i6Imm4Reg4EncodedImm(OP_ORR_imm_T1
, rn
, rd
, imm
);
1451 ALWAYS_INLINE
void orr(RegisterID rd
, RegisterID rn
, RegisterID rm
, ShiftTypeAndAmount shift
)
1453 ASSERT(!BadReg(rd
));
1454 ASSERT(!BadReg(rn
));
1455 ASSERT(!BadReg(rm
));
1456 m_formatter
.twoWordOp12Reg4FourFours(OP_ORR_reg_T2
, rn
, FourFours(shift
.hi4(), rd
, shift
.lo4(), rm
));
1459 void orr(RegisterID rd
, RegisterID rn
, RegisterID rm
)
1461 if ((rd
== rn
) && !((rd
| rm
) & 8))
1462 m_formatter
.oneWordOp10Reg3Reg3(OP_ORR_reg_T1
, rm
, rd
);
1463 else if ((rd
== rm
) && !((rd
| rn
) & 8))
1464 m_formatter
.oneWordOp10Reg3Reg3(OP_ORR_reg_T1
, rn
, rd
);
1466 orr(rd
, rn
, rm
, ShiftTypeAndAmount());
1469 ALWAYS_INLINE
void orr_S(RegisterID rd
, RegisterID rn
, RegisterID rm
, ShiftTypeAndAmount shift
)
1471 ASSERT(!BadReg(rd
));
1472 ASSERT(!BadReg(rn
));
1473 ASSERT(!BadReg(rm
));
1474 m_formatter
.twoWordOp12Reg4FourFours(OP_ORR_S_reg_T2
, rn
, FourFours(shift
.hi4(), rd
, shift
.lo4(), rm
));
1477 void orr_S(RegisterID rd
, RegisterID rn
, RegisterID rm
)
1479 if ((rd
== rn
) && !((rd
| rm
) & 8))
1480 m_formatter
.oneWordOp10Reg3Reg3(OP_ORR_reg_T1
, rm
, rd
);
1481 else if ((rd
== rm
) && !((rd
| rn
) & 8))
1482 m_formatter
.oneWordOp10Reg3Reg3(OP_ORR_reg_T1
, rn
, rd
);
1484 orr_S(rd
, rn
, rm
, ShiftTypeAndAmount());
1487 ALWAYS_INLINE
void ror(RegisterID rd
, RegisterID rm
, int32_t shiftAmount
)
1489 ASSERT(!BadReg(rd
));
1490 ASSERT(!BadReg(rm
));
1491 ShiftTypeAndAmount
shift(SRType_ROR
, shiftAmount
);
1492 m_formatter
.twoWordOp16FourFours(OP_ROR_imm_T1
, FourFours(shift
.hi4(), rd
, shift
.lo4(), rm
));
1495 ALWAYS_INLINE
void ror(RegisterID rd
, RegisterID rn
, RegisterID rm
)
1497 ASSERT(!BadReg(rd
));
1498 ASSERT(!BadReg(rn
));
1499 ASSERT(!BadReg(rm
));
1500 m_formatter
.twoWordOp12Reg4FourFours(OP_ROR_reg_T2
, rn
, FourFours(0xf, rd
, 0, rm
));
1503 ALWAYS_INLINE
void pop(RegisterID dest
)
1505 if (dest
< ARMRegisters::r8
)
1506 m_formatter
.oneWordOp7Imm9(OP_POP_T1
, 1 << dest
);
1508 // Load postindexed with writeback.
1509 ldr(dest
, ARMRegisters::sp
, sizeof(void*), false, true);
1513 ALWAYS_INLINE
void pop(uint32_t registerList
)
1515 ASSERT(WTF::bitCount(registerList
) > 1);
1516 ASSERT(!((1 << ARMRegisters::pc
) & registerList
) || !((1 << ARMRegisters::lr
) & registerList
));
1517 ASSERT(!((1 << ARMRegisters::sp
) & registerList
));
1518 m_formatter
.twoWordOp16Imm16(OP_POP_T2
, registerList
);
1521 ALWAYS_INLINE
void push(RegisterID src
)
1523 if (src
< ARMRegisters::r8
)
1524 m_formatter
.oneWordOp7Imm9(OP_PUSH_T1
, 1 << src
);
1525 else if (src
== ARMRegisters::lr
)
1526 m_formatter
.oneWordOp7Imm9(OP_PUSH_T1
, 0x100);
1528 // Store preindexed with writeback.
1529 str(src
, ARMRegisters::sp
, -sizeof(void*), true, true);
1533 ALWAYS_INLINE
void push(uint32_t registerList
)
1535 ASSERT(WTF::bitCount(registerList
) > 1);
1536 ASSERT(!((1 << ARMRegisters::pc
) & registerList
));
1537 ASSERT(!((1 << ARMRegisters::sp
) & registerList
));
1538 m_formatter
.twoWordOp16Imm16(OP_PUSH_T2
, registerList
);
1541 #if CPU(APPLE_ARMV7S)
1542 template<int datasize
>
1543 ALWAYS_INLINE
void sdiv(RegisterID rd
, RegisterID rn
, RegisterID rm
)
1545 static_assert(datasize
== 32, "sdiv datasize must be 32 for armv7s");
1546 ASSERT(!BadReg(rd
));
1547 ASSERT(!BadReg(rn
));
1548 ASSERT(!BadReg(rm
));
1549 m_formatter
.twoWordOp12Reg4FourFours(OP_SDIV_T1
, rn
, FourFours(0xf, rd
, 0xf, rm
));
1553 ALWAYS_INLINE
void smull(RegisterID rdLo
, RegisterID rdHi
, RegisterID rn
, RegisterID rm
)
1555 ASSERT(!BadReg(rdLo
));
1556 ASSERT(!BadReg(rdHi
));
1557 ASSERT(!BadReg(rn
));
1558 ASSERT(!BadReg(rm
));
1559 ASSERT(rdLo
!= rdHi
);
1560 m_formatter
.twoWordOp12Reg4FourFours(OP_SMULL_T1
, rn
, FourFours(rdLo
, rdHi
, 0, rm
));
1563 // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
1564 ALWAYS_INLINE
void str(RegisterID rt
, RegisterID rn
, ARMThumbImmediate imm
)
1566 ASSERT(rt
!= ARMRegisters::pc
);
1567 ASSERT(rn
!= ARMRegisters::pc
);
1568 ASSERT(imm
.isUInt12());
1570 if (!((rt
| rn
) & 8) && imm
.isUInt7())
1571 m_formatter
.oneWordOp5Imm5Reg3Reg3(OP_STR_imm_T1
, imm
.getUInt7() >> 2, rn
, rt
);
1572 else if ((rn
== ARMRegisters::sp
) && !(rt
& 8) && imm
.isUInt10())
1573 m_formatter
.oneWordOp5Reg3Imm8(OP_STR_imm_T2
, rt
, static_cast<uint8_t>(imm
.getUInt10() >> 2));
1575 m_formatter
.twoWordOp12Reg4Reg4Imm12(OP_STR_imm_T3
, rn
, rt
, imm
.getUInt12());
1578 // If index is set, this is a regular offset or a pre-indexed store;
1579 // if index is not set then is is a post-index store.
1581 // If wback is set rn is updated - this is a pre or post index store,
1582 // if wback is not set this is a regular offset memory access.
1584 // (-255 <= offset <= 255)
1586 // _tmp = _reg + offset
1587 // MEM[index ? _tmp : _reg] = REG[rt]
1588 // if (wback) REG[rn] = _tmp
1589 ALWAYS_INLINE
void str(RegisterID rt
, RegisterID rn
, int offset
, bool index
, bool wback
)
1591 ASSERT(rt
!= ARMRegisters::pc
);
1592 ASSERT(rn
!= ARMRegisters::pc
);
1593 ASSERT(index
|| wback
);
1594 ASSERT(!wback
| (rt
!= rn
));
1601 ASSERT((offset
& ~0xff) == 0);
1603 offset
|= (wback
<< 8);
1604 offset
|= (add
<< 9);
1605 offset
|= (index
<< 10);
1606 offset
|= (1 << 11);
1608 m_formatter
.twoWordOp12Reg4Reg4Imm12(OP_STR_imm_T4
, rn
, rt
, offset
);
1611 // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
1612 ALWAYS_INLINE
void str(RegisterID rt
, RegisterID rn
, RegisterID rm
, unsigned shift
= 0)
1614 ASSERT(rn
!= ARMRegisters::pc
);
1615 ASSERT(!BadReg(rm
));
1618 if (!shift
&& !((rt
| rn
| rm
) & 8))
1619 m_formatter
.oneWordOp7Reg3Reg3Reg3(OP_STR_reg_T1
, rm
, rn
, rt
);
1621 m_formatter
.twoWordOp12Reg4FourFours(OP_STR_reg_T2
, rn
, FourFours(rt
, 0, shift
, rm
));
1624 // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
1625 ALWAYS_INLINE
void strb(RegisterID rt
, RegisterID rn
, ARMThumbImmediate imm
)
1627 ASSERT(rt
!= ARMRegisters::pc
);
1628 ASSERT(rn
!= ARMRegisters::pc
);
1629 ASSERT(imm
.isUInt12());
1631 if (!((rt
| rn
) & 8) && imm
.isUInt7())
1632 m_formatter
.oneWordOp5Imm5Reg3Reg3(OP_STRB_imm_T1
, imm
.getUInt7() >> 2, rn
, rt
);
1634 m_formatter
.twoWordOp12Reg4Reg4Imm12(OP_STRB_imm_T2
, rn
, rt
, imm
.getUInt12());
1637 // If index is set, this is a regular offset or a pre-indexed store;
1638 // if index is not set then is is a post-index store.
1640 // If wback is set rn is updated - this is a pre or post index store,
1641 // if wback is not set this is a regular offset memory access.
1643 // (-255 <= offset <= 255)
1645 // _tmp = _reg + offset
1646 // MEM[index ? _tmp : _reg] = REG[rt]
1647 // if (wback) REG[rn] = _tmp
1648 ALWAYS_INLINE
void strb(RegisterID rt
, RegisterID rn
, int offset
, bool index
, bool wback
)
1650 ASSERT(rt
!= ARMRegisters::pc
);
1651 ASSERT(rn
!= ARMRegisters::pc
);
1652 ASSERT(index
|| wback
);
1653 ASSERT(!wback
| (rt
!= rn
));
1660 ASSERT((offset
& ~0xff) == 0);
1662 offset
|= (wback
<< 8);
1663 offset
|= (add
<< 9);
1664 offset
|= (index
<< 10);
1665 offset
|= (1 << 11);
1667 m_formatter
.twoWordOp12Reg4Reg4Imm12(OP_STRB_imm_T3
, rn
, rt
, offset
);
1670 // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
1671 ALWAYS_INLINE
void strb(RegisterID rt
, RegisterID rn
, RegisterID rm
, unsigned shift
= 0)
1673 ASSERT(rn
!= ARMRegisters::pc
);
1674 ASSERT(!BadReg(rm
));
1677 if (!shift
&& !((rt
| rn
| rm
) & 8))
1678 m_formatter
.oneWordOp7Reg3Reg3Reg3(OP_STRB_reg_T1
, rm
, rn
, rt
);
1680 m_formatter
.twoWordOp12Reg4FourFours(OP_STRB_reg_T2
, rn
, FourFours(rt
, 0, shift
, rm
));
1683 // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
1684 ALWAYS_INLINE
void strh(RegisterID rt
, RegisterID rn
, ARMThumbImmediate imm
)
1686 ASSERT(rt
!= ARMRegisters::pc
);
1687 ASSERT(rn
!= ARMRegisters::pc
);
1688 ASSERT(imm
.isUInt12());
1690 if (!((rt
| rn
) & 8) && imm
.isUInt7())
1691 m_formatter
.oneWordOp5Imm5Reg3Reg3(OP_STRH_imm_T1
, imm
.getUInt7() >> 2, rn
, rt
);
1693 m_formatter
.twoWordOp12Reg4Reg4Imm12(OP_STRH_imm_T2
, rn
, rt
, imm
.getUInt12());
1696 // If index is set, this is a regular offset or a pre-indexed store;
1697 // if index is not set then is is a post-index store.
1699 // If wback is set rn is updated - this is a pre or post index store,
1700 // if wback is not set this is a regular offset memory access.
1702 // (-255 <= offset <= 255)
1704 // _tmp = _reg + offset
1705 // MEM[index ? _tmp : _reg] = REG[rt]
1706 // if (wback) REG[rn] = _tmp
1707 ALWAYS_INLINE
void strh(RegisterID rt
, RegisterID rn
, int offset
, bool index
, bool wback
)
1709 ASSERT(rt
!= ARMRegisters::pc
);
1710 ASSERT(rn
!= ARMRegisters::pc
);
1711 ASSERT(index
|| wback
);
1712 ASSERT(!wback
| (rt
!= rn
));
1719 ASSERT(!(offset
& ~0xff));
1721 offset
|= (wback
<< 8);
1722 offset
|= (add
<< 9);
1723 offset
|= (index
<< 10);
1724 offset
|= (1 << 11);
1726 m_formatter
.twoWordOp12Reg4Reg4Imm12(OP_STRH_imm_T3
, rn
, rt
, offset
);
1729 // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
1730 ALWAYS_INLINE
void strh(RegisterID rt
, RegisterID rn
, RegisterID rm
, unsigned shift
= 0)
1732 ASSERT(rn
!= ARMRegisters::pc
);
1733 ASSERT(!BadReg(rm
));
1736 if (!shift
&& !((rt
| rn
| rm
) & 8))
1737 m_formatter
.oneWordOp7Reg3Reg3Reg3(OP_STRH_reg_T1
, rm
, rn
, rt
);
1739 m_formatter
.twoWordOp12Reg4FourFours(OP_STRH_reg_T2
, rn
, FourFours(rt
, 0, shift
, rm
));
1742 ALWAYS_INLINE
void sub(RegisterID rd
, RegisterID rn
, ARMThumbImmediate imm
)
1744 // Rd can only be SP if Rn is also SP.
1745 ASSERT((rd
!= ARMRegisters::sp
) || (rn
== ARMRegisters::sp
));
1746 ASSERT(rd
!= ARMRegisters::pc
);
1747 ASSERT(rn
!= ARMRegisters::pc
);
1748 ASSERT(imm
.isValid());
1750 if ((rn
== ARMRegisters::sp
) && (rd
== ARMRegisters::sp
) && imm
.isUInt9()) {
1751 ASSERT(!(imm
.getUInt16() & 3));
1752 m_formatter
.oneWordOp9Imm7(OP_SUB_SP_imm_T1
, static_cast<uint8_t>(imm
.getUInt9() >> 2));
1754 } else if (!((rd
| rn
) & 8)) {
1755 if (imm
.isUInt3()) {
1756 m_formatter
.oneWordOp7Reg3Reg3Reg3(OP_SUB_imm_T1
, (RegisterID
)imm
.getUInt3(), rn
, rd
);
1758 } else if ((rd
== rn
) && imm
.isUInt8()) {
1759 m_formatter
.oneWordOp5Reg3Imm8(OP_SUB_imm_T2
, rd
, imm
.getUInt8());
1764 if (imm
.isEncodedImm())
1765 m_formatter
.twoWordOp5i6Imm4Reg4EncodedImm(OP_SUB_imm_T3
, rn
, rd
, imm
);
1767 ASSERT(imm
.isUInt12());
1768 m_formatter
.twoWordOp5i6Imm4Reg4EncodedImm(OP_SUB_imm_T4
, rn
, rd
, imm
);
1772 ALWAYS_INLINE
void sub(RegisterID rd
, ARMThumbImmediate imm
, RegisterID rn
)
1774 ASSERT(rd
!= ARMRegisters::pc
);
1775 ASSERT(rn
!= ARMRegisters::pc
);
1776 ASSERT(imm
.isValid());
1777 ASSERT(imm
.isUInt12());
1779 if (!((rd
| rn
) & 8) && !imm
.getUInt12())
1780 m_formatter
.oneWordOp10Reg3Reg3(OP_RSB_imm_T1
, rn
, rd
);
1782 m_formatter
.twoWordOp5i6Imm4Reg4EncodedImm(OP_RSB_imm_T2
, rn
, rd
, imm
);
1785 ALWAYS_INLINE
void sub(RegisterID rd
, RegisterID rn
, RegisterID rm
, ShiftTypeAndAmount shift
)
1787 ASSERT((rd
!= ARMRegisters::sp
) || (rn
== ARMRegisters::sp
));
1788 ASSERT(rd
!= ARMRegisters::pc
);
1789 ASSERT(rn
!= ARMRegisters::pc
);
1790 ASSERT(!BadReg(rm
));
1791 m_formatter
.twoWordOp12Reg4FourFours(OP_SUB_reg_T2
, rn
, FourFours(shift
.hi4(), rd
, shift
.lo4(), rm
));
1794 // NOTE: In an IT block, add doesn't modify the flags register.
1795 ALWAYS_INLINE
void sub(RegisterID rd
, RegisterID rn
, RegisterID rm
)
1797 if (!((rd
| rn
| rm
) & 8))
1798 m_formatter
.oneWordOp7Reg3Reg3Reg3(OP_SUB_reg_T1
, rm
, rn
, rd
);
1800 sub(rd
, rn
, rm
, ShiftTypeAndAmount());
1803 // Not allowed in an IT (if then) block.
1804 void sub_S(RegisterID rd
, RegisterID rn
, ARMThumbImmediate imm
)
1806 // Rd can only be SP if Rn is also SP.
1807 ASSERT((rd
!= ARMRegisters::sp
) || (rn
== ARMRegisters::sp
));
1808 ASSERT(rd
!= ARMRegisters::pc
);
1809 ASSERT(rn
!= ARMRegisters::pc
);
1810 ASSERT(imm
.isValid());
1812 if ((rn
== ARMRegisters::sp
) && (rd
== ARMRegisters::sp
) && imm
.isUInt9()) {
1813 ASSERT(!(imm
.getUInt16() & 3));
1814 m_formatter
.oneWordOp9Imm7(OP_SUB_SP_imm_T1
, static_cast<uint8_t>(imm
.getUInt9() >> 2));
1816 } else if (!((rd
| rn
) & 8)) {
1817 if (imm
.isUInt3()) {
1818 m_formatter
.oneWordOp7Reg3Reg3Reg3(OP_SUB_imm_T1
, (RegisterID
)imm
.getUInt3(), rn
, rd
);
1820 } else if ((rd
== rn
) && imm
.isUInt8()) {
1821 m_formatter
.oneWordOp5Reg3Imm8(OP_SUB_imm_T2
, rd
, imm
.getUInt8());
1826 m_formatter
.twoWordOp5i6Imm4Reg4EncodedImm(OP_SUB_S_imm_T3
, rn
, rd
, imm
);
1829 ALWAYS_INLINE
void sub_S(RegisterID rd
, ARMThumbImmediate imm
, RegisterID rn
)
1831 ASSERT(rd
!= ARMRegisters::pc
);
1832 ASSERT(rn
!= ARMRegisters::pc
);
1833 ASSERT(imm
.isValid());
1834 ASSERT(imm
.isUInt12());
1836 m_formatter
.twoWordOp5i6Imm4Reg4EncodedImm(OP_RSB_S_imm_T2
, rn
, rd
, imm
);
1839 // Not allowed in an IT (if then) block?
1840 ALWAYS_INLINE
void sub_S(RegisterID rd
, RegisterID rn
, RegisterID rm
, ShiftTypeAndAmount shift
)
1842 ASSERT((rd
!= ARMRegisters::sp
) || (rn
== ARMRegisters::sp
));
1843 ASSERT(rd
!= ARMRegisters::pc
);
1844 ASSERT(rn
!= ARMRegisters::pc
);
1845 ASSERT(!BadReg(rm
));
1846 m_formatter
.twoWordOp12Reg4FourFours(OP_SUB_S_reg_T2
, rn
, FourFours(shift
.hi4(), rd
, shift
.lo4(), rm
));
1849 // Not allowed in an IT (if then) block.
1850 ALWAYS_INLINE
void sub_S(RegisterID rd
, RegisterID rn
, RegisterID rm
)
1852 if (!((rd
| rn
| rm
) & 8))
1853 m_formatter
.oneWordOp7Reg3Reg3Reg3(OP_SUB_reg_T1
, rm
, rn
, rd
);
1855 sub_S(rd
, rn
, rm
, ShiftTypeAndAmount());
1858 ALWAYS_INLINE
void tst(RegisterID rn
, ARMThumbImmediate imm
)
1860 ASSERT(!BadReg(rn
));
1861 ASSERT(imm
.isEncodedImm());
1863 m_formatter
.twoWordOp5i6Imm4Reg4EncodedImm(OP_TST_imm
, rn
, (RegisterID
)0xf, imm
);
1866 ALWAYS_INLINE
void tst(RegisterID rn
, RegisterID rm
, ShiftTypeAndAmount shift
)
1868 ASSERT(!BadReg(rn
));
1869 ASSERT(!BadReg(rm
));
1870 m_formatter
.twoWordOp12Reg4FourFours(OP_TST_reg_T2
, rn
, FourFours(shift
.hi4(), 0xf, shift
.lo4(), rm
));
1873 ALWAYS_INLINE
void tst(RegisterID rn
, RegisterID rm
)
1876 tst(rn
, rm
, ShiftTypeAndAmount());
1878 m_formatter
.oneWordOp10Reg3Reg3(OP_TST_reg_T1
, rm
, rn
);
1881 ALWAYS_INLINE
void ubfx(RegisterID rd
, RegisterID rn
, unsigned lsb
, unsigned width
)
1884 ASSERT((width
>= 1) && (width
<= 32));
1885 ASSERT((lsb
+ width
) <= 32);
1886 m_formatter
.twoWordOp12Reg40Imm3Reg4Imm20Imm5(OP_UBFX_T1
, rd
, rn
, (lsb
& 0x1c) << 10, (lsb
& 0x3) << 6, (width
- 1) & 0x1f);
1889 #if CPU(APPLE_ARMV7S)
1890 ALWAYS_INLINE
void udiv(RegisterID rd
, RegisterID rn
, RegisterID rm
)
1892 ASSERT(!BadReg(rd
));
1893 ASSERT(!BadReg(rn
));
1894 ASSERT(!BadReg(rm
));
1895 m_formatter
.twoWordOp12Reg4FourFours(OP_UDIV_T1
, rn
, FourFours(0xf, rd
, 0xf, rm
));
1899 void vadd(FPDoubleRegisterID rd
, FPDoubleRegisterID rn
, FPDoubleRegisterID rm
)
1901 m_formatter
.vfpOp(OP_VADD_T2
, OP_VADD_T2b
, true, rn
, rd
, rm
);
1904 void vcmp(FPDoubleRegisterID rd
, FPDoubleRegisterID rm
)
1906 m_formatter
.vfpOp(OP_VCMP
, OP_VCMPb
, true, VFPOperand(4), rd
, rm
);
1909 void vcmpz(FPDoubleRegisterID rd
)
1911 m_formatter
.vfpOp(OP_VCMP
, OP_VCMPb
, true, VFPOperand(5), rd
, VFPOperand(0));
1914 void vcvt_signedToFloatingPoint(FPDoubleRegisterID rd
, FPSingleRegisterID rm
)
1916 // boolean values are 64bit (toInt, unsigned, roundZero)
1917 m_formatter
.vfpOp(OP_VCVT_FPIVFP
, OP_VCVT_FPIVFPb
, true, vcvtOp(false, false, false), rd
, rm
);
1920 void vcvt_floatingPointToSigned(FPSingleRegisterID rd
, FPDoubleRegisterID rm
)
1922 // boolean values are 64bit (toInt, unsigned, roundZero)
1923 m_formatter
.vfpOp(OP_VCVT_FPIVFP
, OP_VCVT_FPIVFPb
, true, vcvtOp(true, false, true), rd
, rm
);
1926 void vcvt_floatingPointToUnsigned(FPSingleRegisterID rd
, FPDoubleRegisterID rm
)
1928 // boolean values are 64bit (toInt, unsigned, roundZero)
1929 m_formatter
.vfpOp(OP_VCVT_FPIVFP
, OP_VCVT_FPIVFPb
, true, vcvtOp(true, true, true), rd
, rm
);
1932 void vdiv(FPDoubleRegisterID rd
, FPDoubleRegisterID rn
, FPDoubleRegisterID rm
)
1934 m_formatter
.vfpOp(OP_VDIV
, OP_VDIVb
, true, rn
, rd
, rm
);
1937 void vldr(FPDoubleRegisterID rd
, RegisterID rn
, int32_t imm
)
1939 m_formatter
.vfpMemOp(OP_VLDR
, OP_VLDRb
, true, rn
, rd
, imm
);
1942 void flds(FPSingleRegisterID rd
, RegisterID rn
, int32_t imm
)
1944 m_formatter
.vfpMemOp(OP_FLDS
, OP_FLDSb
, false, rn
, rd
, imm
);
1947 void vmov(RegisterID rd
, FPSingleRegisterID rn
)
1949 ASSERT(!BadReg(rd
));
1950 m_formatter
.vfpOp(OP_VMOV_StoC
, OP_VMOV_StoCb
, false, rn
, rd
, VFPOperand(0));
1953 void vmov(FPSingleRegisterID rd
, RegisterID rn
)
1955 ASSERT(!BadReg(rn
));
1956 m_formatter
.vfpOp(OP_VMOV_CtoS
, OP_VMOV_CtoSb
, false, rd
, rn
, VFPOperand(0));
1959 void vmov(RegisterID rd1
, RegisterID rd2
, FPDoubleRegisterID rn
)
1961 ASSERT(!BadReg(rd1
));
1962 ASSERT(!BadReg(rd2
));
1963 m_formatter
.vfpOp(OP_VMOV_DtoC
, OP_VMOV_DtoCb
, true, rd2
, VFPOperand(rd1
| 16), rn
);
1966 void vmov(FPDoubleRegisterID rd
, RegisterID rn1
, RegisterID rn2
)
1968 ASSERT(!BadReg(rn1
));
1969 ASSERT(!BadReg(rn2
));
1970 m_formatter
.vfpOp(OP_VMOV_CtoD
, OP_VMOV_CtoDb
, true, rn2
, VFPOperand(rn1
| 16), rd
);
1973 void vmov(FPDoubleRegisterID rd
, FPDoubleRegisterID rn
)
1975 m_formatter
.vfpOp(OP_VMOV_T2
, OP_VMOV_T2b
, true, VFPOperand(0), rd
, rn
);
1978 void vmrs(RegisterID reg
= ARMRegisters::pc
)
1980 ASSERT(reg
!= ARMRegisters::sp
);
1981 m_formatter
.vfpOp(OP_VMRS
, OP_VMRSb
, false, VFPOperand(1), VFPOperand(0x10 | reg
), VFPOperand(0));
1984 void vmul(FPDoubleRegisterID rd
, FPDoubleRegisterID rn
, FPDoubleRegisterID rm
)
1986 m_formatter
.vfpOp(OP_VMUL_T2
, OP_VMUL_T2b
, true, rn
, rd
, rm
);
1989 void vstr(FPDoubleRegisterID rd
, RegisterID rn
, int32_t imm
)
1991 m_formatter
.vfpMemOp(OP_VSTR
, OP_VSTRb
, true, rn
, rd
, imm
);
1994 void fsts(FPSingleRegisterID rd
, RegisterID rn
, int32_t imm
)
1996 m_formatter
.vfpMemOp(OP_FSTS
, OP_FSTSb
, false, rn
, rd
, imm
);
1999 void vsub(FPDoubleRegisterID rd
, FPDoubleRegisterID rn
, FPDoubleRegisterID rm
)
2001 m_formatter
.vfpOp(OP_VSUB_T2
, OP_VSUB_T2b
, true, rn
, rd
, rm
);
2004 void vabs(FPDoubleRegisterID rd
, FPDoubleRegisterID rm
)
2006 m_formatter
.vfpOp(OP_VABS_T2
, OP_VABS_T2b
, true, VFPOperand(16), rd
, rm
);
2009 void vneg(FPDoubleRegisterID rd
, FPDoubleRegisterID rm
)
2011 m_formatter
.vfpOp(OP_VNEG_T2
, OP_VNEG_T2b
, true, VFPOperand(1), rd
, rm
);
2014 void vsqrt(FPDoubleRegisterID rd
, FPDoubleRegisterID rm
)
2016 m_formatter
.vfpOp(OP_VSQRT_T1
, OP_VSQRT_T1b
, true, VFPOperand(17), rd
, rm
);
2019 void vcvtds(FPDoubleRegisterID rd
, FPSingleRegisterID rm
)
2021 m_formatter
.vfpOp(OP_VCVTDS_T1
, OP_VCVTDS_T1b
, false, VFPOperand(23), rd
, rm
);
2024 void vcvtsd(FPSingleRegisterID rd
, FPDoubleRegisterID rm
)
2026 m_formatter
.vfpOp(OP_VCVTSD_T1
, OP_VCVTSD_T1b
, true, VFPOperand(23), rd
, rm
);
2031 m_formatter
.oneWordOp8Imm8(OP_NOP_T1
, 0);
2036 m_formatter
.twoWordOp16Op16(OP_NOP_T2a
, OP_NOP_T2b
);
2041 m_formatter
.twoWordOp16Op16(OP_DMB_SY_T2a
, OP_DMB_SY_T2b
);
2044 AssemblerLabel
labelIgnoringWatchpoints()
2046 return m_formatter
.label();
2049 AssemblerLabel
labelForWatchpoint()
2051 AssemblerLabel result
= m_formatter
.label();
2052 if (static_cast<int>(result
.m_offset
) != m_indexOfLastWatchpoint
)
2054 m_indexOfLastWatchpoint
= result
.m_offset
;
2055 m_indexOfTailOfLastWatchpoint
= result
.m_offset
+ maxJumpReplacementSize();
2059 AssemblerLabel
label()
2061 AssemblerLabel result
= m_formatter
.label();
2062 while (UNLIKELY(static_cast<int>(result
.m_offset
) < m_indexOfTailOfLastWatchpoint
)) {
2063 if (UNLIKELY(static_cast<int>(result
.m_offset
) + 4 <= m_indexOfTailOfLastWatchpoint
))
2067 result
= m_formatter
.label();
2072 AssemblerLabel
align(int alignment
)
2074 while (!m_formatter
.isAligned(alignment
))
2080 static void* getRelocatedAddress(void* code
, AssemblerLabel label
)
2082 ASSERT(label
.isSet());
2083 return reinterpret_cast<void*>(reinterpret_cast<ptrdiff_t>(code
) + label
.m_offset
);
2086 static int getDifferenceBetweenLabels(AssemblerLabel a
, AssemblerLabel b
)
2088 return b
.m_offset
- a
.m_offset
;
2091 static int jumpSizeDelta(JumpType jumpType
, JumpLinkType jumpLinkType
) { return JUMP_ENUM_SIZE(jumpType
) - JUMP_ENUM_SIZE(jumpLinkType
); }
2093 // Assembler admin methods:
2095 static ALWAYS_INLINE
bool linkRecordSourceComparator(const LinkRecord
& a
, const LinkRecord
& b
)
2097 return a
.from() < b
.from();
2100 static bool canCompact(JumpType jumpType
)
2102 // The following cannot be compacted:
2103 // JumpFixed: represents custom jump sequence
2104 // JumpNoConditionFixedSize: represents unconditional jump that must remain a fixed size
2105 // JumpConditionFixedSize: represents conditional jump that must remain a fixed size
2106 return (jumpType
== JumpNoCondition
) || (jumpType
== JumpCondition
);
2109 static JumpLinkType
computeJumpType(JumpType jumpType
, const uint8_t* from
, const uint8_t* to
)
2111 if (jumpType
== JumpFixed
)
2114 // for patchable jump we must leave space for the longest code sequence
2115 if (jumpType
== JumpNoConditionFixedSize
)
2117 if (jumpType
== JumpConditionFixedSize
)
2118 return LinkConditionalBX
;
2120 const int paddingSize
= JUMP_ENUM_SIZE(jumpType
);
2122 if (jumpType
== JumpCondition
) {
2123 // 2-byte conditional T1
2124 const uint16_t* jumpT1Location
= reinterpret_cast_ptr
<const uint16_t*>(from
- (paddingSize
- JUMP_ENUM_SIZE(LinkJumpT1
)));
2125 if (canBeJumpT1(jumpT1Location
, to
))
2127 // 4-byte conditional T3
2128 const uint16_t* jumpT3Location
= reinterpret_cast_ptr
<const uint16_t*>(from
- (paddingSize
- JUMP_ENUM_SIZE(LinkJumpT3
)));
2129 if (canBeJumpT3(jumpT3Location
, to
))
2131 // 4-byte conditional T4 with IT
2132 const uint16_t* conditionalJumpT4Location
=
2133 reinterpret_cast_ptr
<const uint16_t*>(from
- (paddingSize
- JUMP_ENUM_SIZE(LinkConditionalJumpT4
)));
2134 if (canBeJumpT4(conditionalJumpT4Location
, to
))
2135 return LinkConditionalJumpT4
;
2137 // 2-byte unconditional T2
2138 const uint16_t* jumpT2Location
= reinterpret_cast_ptr
<const uint16_t*>(from
- (paddingSize
- JUMP_ENUM_SIZE(LinkJumpT2
)));
2139 if (canBeJumpT2(jumpT2Location
, to
))
2141 // 4-byte unconditional T4
2142 const uint16_t* jumpT4Location
= reinterpret_cast_ptr
<const uint16_t*>(from
- (paddingSize
- JUMP_ENUM_SIZE(LinkJumpT4
)));
2143 if (canBeJumpT4(jumpT4Location
, to
))
2145 // use long jump sequence
2149 ASSERT(jumpType
== JumpCondition
);
2150 return LinkConditionalBX
;
2153 static JumpLinkType
computeJumpType(LinkRecord
& record
, const uint8_t* from
, const uint8_t* to
)
2155 JumpLinkType linkType
= computeJumpType(record
.type(), from
, to
);
2156 record
.setLinkType(linkType
);
2160 Vector
<LinkRecord
, 0, UnsafeVectorOverflow
>& jumpsToLink()
2162 std::sort(m_jumpsToLink
.begin(), m_jumpsToLink
.end(), linkRecordSourceComparator
);
2163 return m_jumpsToLink
;
2166 static void ALWAYS_INLINE
link(LinkRecord
& record
, uint8_t* from
, uint8_t* to
)
2168 switch (record
.linkType()) {
2170 linkJumpT1(record
.condition(), reinterpret_cast_ptr
<uint16_t*>(from
), to
);
2173 linkJumpT2(reinterpret_cast_ptr
<uint16_t*>(from
), to
);
2176 linkJumpT3(record
.condition(), reinterpret_cast_ptr
<uint16_t*>(from
), to
);
2179 linkJumpT4(reinterpret_cast_ptr
<uint16_t*>(from
), to
);
2181 case LinkConditionalJumpT4
:
2182 linkConditionalJumpT4(record
.condition(), reinterpret_cast_ptr
<uint16_t*>(from
), to
);
2184 case LinkConditionalBX
:
2185 linkConditionalBX(record
.condition(), reinterpret_cast_ptr
<uint16_t*>(from
), to
);
2188 linkBX(reinterpret_cast_ptr
<uint16_t*>(from
), to
);
2191 RELEASE_ASSERT_NOT_REACHED();
2196 void* unlinkedCode() { return m_formatter
.data(); }
2197 size_t codeSize() const { return m_formatter
.codeSize(); }
2199 static unsigned getCallReturnOffset(AssemblerLabel call
)
2201 ASSERT(call
.isSet());
2202 return call
.m_offset
;
2205 // Linking & patching:
2207 // 'link' and 'patch' methods are for use on unprotected code - such as the code
2208 // within the AssemblerBuffer, and code being patched by the patch buffer. Once
2209 // code has been finalized it is (platform support permitting) within a non-
2210 // writable region of memory; to modify the code in an execute-only execuable
2211 // pool the 'repatch' and 'relink' methods should be used.
2213 void linkJump(AssemblerLabel from
, AssemblerLabel to
, JumpType type
, Condition condition
)
2216 ASSERT(from
.isSet());
2217 m_jumpsToLink
.append(LinkRecord(from
.m_offset
, to
.m_offset
, type
, condition
));
2220 static void linkJump(void* code
, AssemblerLabel from
, void* to
)
2222 ASSERT(from
.isSet());
2224 uint16_t* location
= reinterpret_cast<uint16_t*>(reinterpret_cast<intptr_t>(code
) + from
.m_offset
);
2225 linkJumpAbsolute(location
, to
);
2228 static void linkCall(void* code
, AssemblerLabel from
, void* to
)
2230 ASSERT(!(reinterpret_cast<intptr_t>(code
) & 1));
2231 ASSERT(from
.isSet());
2233 setPointer(reinterpret_cast<uint16_t*>(reinterpret_cast<intptr_t>(code
) + from
.m_offset
) - 1, to
, false);
2236 static void linkPointer(void* code
, AssemblerLabel where
, void* value
)
2238 setPointer(reinterpret_cast<char*>(code
) + where
.m_offset
, value
, false);
2241 static void relinkJump(void* from
, void* to
)
2243 ASSERT(!(reinterpret_cast<intptr_t>(from
) & 1));
2244 ASSERT(!(reinterpret_cast<intptr_t>(to
) & 1));
2246 linkJumpAbsolute(reinterpret_cast<uint16_t*>(from
), to
);
2248 cacheFlush(reinterpret_cast<uint16_t*>(from
) - 5, 5 * sizeof(uint16_t));
2251 static void relinkCall(void* from
, void* to
)
2253 ASSERT(!(reinterpret_cast<intptr_t>(from
) & 1));
2255 setPointer(reinterpret_cast<uint16_t*>(from
) - 1, to
, true);
2258 static void* readCallTarget(void* from
)
2260 return readPointer(reinterpret_cast<uint16_t*>(from
) - 1);
2263 static void repatchInt32(void* where
, int32_t value
)
2265 ASSERT(!(reinterpret_cast<intptr_t>(where
) & 1));
2267 setInt32(where
, value
, true);
2270 static void repatchCompact(void* where
, int32_t offset
)
2272 ASSERT(offset
>= -255 && offset
<= 255);
2280 offset
|= (add
<< 9);
2281 offset
|= (1 << 10);
2282 offset
|= (1 << 11);
2284 uint16_t* location
= reinterpret_cast<uint16_t*>(where
);
2285 location
[1] &= ~((1 << 12) - 1);
2286 location
[1] |= offset
;
2287 cacheFlush(location
, sizeof(uint16_t) * 2);
2290 static void repatchPointer(void* where
, void* value
)
2292 ASSERT(!(reinterpret_cast<intptr_t>(where
) & 1));
2294 setPointer(where
, value
, true);
2297 static void* readPointer(void* where
)
2299 return reinterpret_cast<void*>(readInt32(where
));
2302 static void replaceWithJump(void* instructionStart
, void* to
)
2304 ASSERT(!(bitwise_cast
<uintptr_t>(instructionStart
) & 1));
2305 ASSERT(!(bitwise_cast
<uintptr_t>(to
) & 1));
2308 if (canBeJumpT4(reinterpret_cast<uint16_t*>(instructionStart
), to
)) {
2309 uint16_t* ptr
= reinterpret_cast<uint16_t*>(instructionStart
) + 2;
2310 linkJumpT4(ptr
, to
);
2311 cacheFlush(ptr
- 2, sizeof(uint16_t) * 2);
2313 uint16_t* ptr
= reinterpret_cast<uint16_t*>(instructionStart
) + 5;
2315 cacheFlush(ptr
- 5, sizeof(uint16_t) * 5);
2318 uint16_t* ptr
= reinterpret_cast<uint16_t*>(instructionStart
) + 2;
2319 linkJumpT4(ptr
, to
);
2320 cacheFlush(ptr
- 2, sizeof(uint16_t) * 2);
2324 static ptrdiff_t maxJumpReplacementSize()
2333 static void replaceWithLoad(void* instructionStart
)
2335 ASSERT(!(bitwise_cast
<uintptr_t>(instructionStart
) & 1));
2336 uint16_t* ptr
= reinterpret_cast<uint16_t*>(instructionStart
);
2337 switch (ptr
[0] & 0xFFF0) {
2341 ASSERT(!(ptr
[1] & 0xF000));
2343 ptr
[0] |= OP_LDR_imm_T3
;
2344 ptr
[1] |= (ptr
[1] & 0x0F00) << 4;
2346 cacheFlush(ptr
, sizeof(uint16_t) * 2);
2349 RELEASE_ASSERT_NOT_REACHED();
2353 static void replaceWithAddressComputation(void* instructionStart
)
2355 ASSERT(!(bitwise_cast
<uintptr_t>(instructionStart
) & 1));
2356 uint16_t* ptr
= reinterpret_cast<uint16_t*>(instructionStart
);
2357 switch (ptr
[0] & 0xFFF0) {
2359 ASSERT(!(ptr
[1] & 0x0F00));
2361 ptr
[0] |= OP_ADD_imm_T3
;
2362 ptr
[1] |= (ptr
[1] & 0xF000) >> 4;
2364 cacheFlush(ptr
, sizeof(uint16_t) * 2);
2369 RELEASE_ASSERT_NOT_REACHED();
2373 unsigned debugOffset() { return m_formatter
.debugOffset(); }
2376 static inline void linuxPageFlush(uintptr_t begin
, uintptr_t end
)
2388 : "r" (begin
), "r" (end
)
2389 : "r0", "r1", "r2");
2393 static void cacheFlush(void* code
, size_t size
)
2396 sys_cache_control(kCacheFunctionPrepareForExecution
, code
, size
);
2398 size_t page
= pageSize();
2399 uintptr_t current
= reinterpret_cast<uintptr_t>(code
);
2400 uintptr_t end
= current
+ size
;
2401 uintptr_t firstPageEnd
= (current
& ~(page
- 1)) + page
;
2403 if (end
<= firstPageEnd
) {
2404 linuxPageFlush(current
, end
);
2408 linuxPageFlush(current
, firstPageEnd
);
2410 for (current
= firstPageEnd
; current
+ page
< end
; current
+= page
)
2411 linuxPageFlush(current
, current
+ page
);
2413 linuxPageFlush(current
, end
);
2415 CacheRangeFlush(code
, size
, CACHE_SYNC_ALL
);
2417 #error "The cacheFlush support is missing on this platform."
2422 // VFP operations commonly take one or more 5-bit operands, typically representing a
2423 // floating point register number. This will commonly be encoded in the instruction
2424 // in two parts, with one single bit field, and one 4-bit field. In the case of
2425 // double precision operands the high bit of the register number will be encoded
2426 // separately, and for single precision operands the high bit of the register number
2427 // will be encoded individually.
2428 // VFPOperand encapsulates a 5-bit VFP operand, with bits 0..3 containing the 4-bit
2429 // field to be encoded together in the instruction (the low 4-bits of a double
2430 // register number, or the high 4-bits of a single register number), and bit 4
2431 // contains the bit value to be encoded individually.
2433 explicit VFPOperand(uint32_t value
)
2436 ASSERT(!(m_value
& ~0x1f));
2439 VFPOperand(FPDoubleRegisterID reg
)
2444 VFPOperand(RegisterID reg
)
2449 VFPOperand(FPSingleRegisterID reg
)
2450 : m_value(((reg
& 1) << 4) | (reg
>> 1)) // rotate the lowest bit of 'reg' to the top.
2456 return m_value
>> 4;
2461 return m_value
& 0xf;
2467 VFPOperand
vcvtOp(bool toInteger
, bool isUnsigned
, bool isRoundZero
)
2469 // Cannot specify rounding when converting to float.
2470 ASSERT(toInteger
|| !isRoundZero
);
2474 // opc2 indicates both toInteger & isUnsigned.
2475 op
|= isUnsigned
? 0x4 : 0x5;
2476 // 'op' field in instruction is isRoundZero
2480 ASSERT(!isRoundZero
);
2481 // 'op' field in instruction is isUnsigned
2485 return VFPOperand(op
);
2488 static void setInt32(void* code
, uint32_t value
, bool flush
)
2490 uint16_t* location
= reinterpret_cast<uint16_t*>(code
);
2491 ASSERT(isMOV_imm_T3(location
- 4) && isMOVT(location
- 2));
2493 ARMThumbImmediate lo16
= ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(value
));
2494 ARMThumbImmediate hi16
= ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(value
>> 16));
2495 location
[-4] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOV_imm_T3
, lo16
);
2496 location
[-3] = twoWordOp5i6Imm4Reg4EncodedImmSecond((location
[-3] >> 8) & 0xf, lo16
);
2497 location
[-2] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOVT
, hi16
);
2498 location
[-1] = twoWordOp5i6Imm4Reg4EncodedImmSecond((location
[-1] >> 8) & 0xf, hi16
);
2501 cacheFlush(location
- 4, 4 * sizeof(uint16_t));
2504 static int32_t readInt32(void* code
)
2506 uint16_t* location
= reinterpret_cast<uint16_t*>(code
);
2507 ASSERT(isMOV_imm_T3(location
- 4) && isMOVT(location
- 2));
2509 ARMThumbImmediate lo16
;
2510 ARMThumbImmediate hi16
;
2511 decodeTwoWordOp5i6Imm4Reg4EncodedImmFirst(lo16
, location
[-4]);
2512 decodeTwoWordOp5i6Imm4Reg4EncodedImmSecond(lo16
, location
[-3]);
2513 decodeTwoWordOp5i6Imm4Reg4EncodedImmFirst(hi16
, location
[-2]);
2514 decodeTwoWordOp5i6Imm4Reg4EncodedImmSecond(hi16
, location
[-1]);
2515 uint32_t result
= hi16
.asUInt16();
2517 result
|= lo16
.asUInt16();
2518 return static_cast<int32_t>(result
);
2521 static void setUInt7ForLoad(void* code
, ARMThumbImmediate imm
)
2523 // Requires us to have planted a LDR_imm_T1
2524 ASSERT(imm
.isValid());
2525 ASSERT(imm
.isUInt7());
2526 uint16_t* location
= reinterpret_cast<uint16_t*>(code
);
2527 location
[0] &= ~((static_cast<uint16_t>(0x7f) >> 2) << 6);
2528 location
[0] |= (imm
.getUInt7() >> 2) << 6;
2529 cacheFlush(location
, sizeof(uint16_t));
2532 static void setPointer(void* code
, void* value
, bool flush
)
2534 setInt32(code
, reinterpret_cast<uint32_t>(value
), flush
);
2537 static bool isB(void* address
)
2539 uint16_t* instruction
= static_cast<uint16_t*>(address
);
2540 return ((instruction
[0] & 0xf800) == OP_B_T4a
) && ((instruction
[1] & 0xd000) == OP_B_T4b
);
2543 static bool isBX(void* address
)
2545 uint16_t* instruction
= static_cast<uint16_t*>(address
);
2546 return (instruction
[0] & 0xff87) == OP_BX
;
2549 static bool isMOV_imm_T3(void* address
)
2551 uint16_t* instruction
= static_cast<uint16_t*>(address
);
2552 return ((instruction
[0] & 0xFBF0) == OP_MOV_imm_T3
) && ((instruction
[1] & 0x8000) == 0);
2555 static bool isMOVT(void* address
)
2557 uint16_t* instruction
= static_cast<uint16_t*>(address
);
2558 return ((instruction
[0] & 0xFBF0) == OP_MOVT
) && ((instruction
[1] & 0x8000) == 0);
2561 static bool isNOP_T1(void* address
)
2563 uint16_t* instruction
= static_cast<uint16_t*>(address
);
2564 return instruction
[0] == OP_NOP_T1
;
2567 static bool isNOP_T2(void* address
)
2569 uint16_t* instruction
= static_cast<uint16_t*>(address
);
2570 return (instruction
[0] == OP_NOP_T2a
) && (instruction
[1] == OP_NOP_T2b
);
2573 static bool canBeJumpT1(const uint16_t* instruction
, const void* target
)
2575 ASSERT(!(reinterpret_cast<intptr_t>(instruction
) & 1));
2576 ASSERT(!(reinterpret_cast<intptr_t>(target
) & 1));
2578 intptr_t relative
= reinterpret_cast<intptr_t>(target
) - (reinterpret_cast<intptr_t>(instruction
));
2579 // It does not appear to be documented in the ARM ARM (big surprise), but
2580 // for OP_B_T1 the branch displacement encoded in the instruction is 2
2581 // less than the actual displacement.
2583 return ((relative
<< 23) >> 23) == relative
;
2586 static bool canBeJumpT2(const uint16_t* instruction
, const void* target
)
2588 ASSERT(!(reinterpret_cast<intptr_t>(instruction
) & 1));
2589 ASSERT(!(reinterpret_cast<intptr_t>(target
) & 1));
2591 intptr_t relative
= reinterpret_cast<intptr_t>(target
) - (reinterpret_cast<intptr_t>(instruction
));
2592 // It does not appear to be documented in the ARM ARM (big surprise), but
2593 // for OP_B_T2 the branch displacement encoded in the instruction is 2
2594 // less than the actual displacement.
2596 return ((relative
<< 20) >> 20) == relative
;
2599 static bool canBeJumpT3(const uint16_t* instruction
, const void* target
)
2601 ASSERT(!(reinterpret_cast<intptr_t>(instruction
) & 1));
2602 ASSERT(!(reinterpret_cast<intptr_t>(target
) & 1));
2604 intptr_t relative
= reinterpret_cast<intptr_t>(target
) - (reinterpret_cast<intptr_t>(instruction
));
2605 return ((relative
<< 11) >> 11) == relative
;
2608 static bool canBeJumpT4(const uint16_t* instruction
, const void* target
)
2610 ASSERT(!(reinterpret_cast<intptr_t>(instruction
) & 1));
2611 ASSERT(!(reinterpret_cast<intptr_t>(target
) & 1));
2613 intptr_t relative
= reinterpret_cast<intptr_t>(target
) - (reinterpret_cast<intptr_t>(instruction
));
2614 return ((relative
<< 7) >> 7) == relative
;
2617 static void linkJumpT1(Condition cond
, uint16_t* instruction
, void* target
)
2619 // FIMXE: this should be up in the MacroAssembler layer. :-(
2620 ASSERT(!(reinterpret_cast<intptr_t>(instruction
) & 1));
2621 ASSERT(!(reinterpret_cast<intptr_t>(target
) & 1));
2622 ASSERT(canBeJumpT1(instruction
, target
));
2624 intptr_t relative
= reinterpret_cast<intptr_t>(target
) - (reinterpret_cast<intptr_t>(instruction
));
2625 // It does not appear to be documented in the ARM ARM (big surprise), but
2626 // for OP_B_T1 the branch displacement encoded in the instruction is 2
2627 // less than the actual displacement.
2630 // All branch offsets should be an even distance.
2631 ASSERT(!(relative
& 1));
2632 instruction
[-1] = OP_B_T1
| ((cond
& 0xf) << 8) | ((relative
& 0x1fe) >> 1);
2635 static void linkJumpT2(uint16_t* instruction
, void* target
)
2637 // FIMXE: this should be up in the MacroAssembler layer. :-(
2638 ASSERT(!(reinterpret_cast<intptr_t>(instruction
) & 1));
2639 ASSERT(!(reinterpret_cast<intptr_t>(target
) & 1));
2640 ASSERT(canBeJumpT2(instruction
, target
));
2642 intptr_t relative
= reinterpret_cast<intptr_t>(target
) - (reinterpret_cast<intptr_t>(instruction
));
2643 // It does not appear to be documented in the ARM ARM (big surprise), but
2644 // for OP_B_T2 the branch displacement encoded in the instruction is 2
2645 // less than the actual displacement.
2648 // All branch offsets should be an even distance.
2649 ASSERT(!(relative
& 1));
2650 instruction
[-1] = OP_B_T2
| ((relative
& 0xffe) >> 1);
2653 static void linkJumpT3(Condition cond
, uint16_t* instruction
, void* target
)
2655 // FIMXE: this should be up in the MacroAssembler layer. :-(
2656 ASSERT(!(reinterpret_cast<intptr_t>(instruction
) & 1));
2657 ASSERT(!(reinterpret_cast<intptr_t>(target
) & 1));
2658 ASSERT(canBeJumpT3(instruction
, target
));
2660 intptr_t relative
= reinterpret_cast<intptr_t>(target
) - (reinterpret_cast<intptr_t>(instruction
));
2662 // All branch offsets should be an even distance.
2663 ASSERT(!(relative
& 1));
2664 instruction
[-2] = OP_B_T3a
| ((relative
& 0x100000) >> 10) | ((cond
& 0xf) << 6) | ((relative
& 0x3f000) >> 12);
2665 instruction
[-1] = OP_B_T3b
| ((relative
& 0x80000) >> 8) | ((relative
& 0x40000) >> 5) | ((relative
& 0xffe) >> 1);
2668 static void linkJumpT4(uint16_t* instruction
, void* target
)
2670 // FIMXE: this should be up in the MacroAssembler layer. :-(
2671 ASSERT(!(reinterpret_cast<intptr_t>(instruction
) & 1));
2672 ASSERT(!(reinterpret_cast<intptr_t>(target
) & 1));
2673 ASSERT(canBeJumpT4(instruction
, target
));
2675 intptr_t relative
= reinterpret_cast<intptr_t>(target
) - (reinterpret_cast<intptr_t>(instruction
));
2676 // ARM encoding for the top two bits below the sign bit is 'peculiar'.
2678 relative
^= 0xC00000;
2680 // All branch offsets should be an even distance.
2681 ASSERT(!(relative
& 1));
2682 instruction
[-2] = OP_B_T4a
| ((relative
& 0x1000000) >> 14) | ((relative
& 0x3ff000) >> 12);
2683 instruction
[-1] = OP_B_T4b
| ((relative
& 0x800000) >> 10) | ((relative
& 0x400000) >> 11) | ((relative
& 0xffe) >> 1);
2686 static void linkConditionalJumpT4(Condition cond
, uint16_t* instruction
, void* target
)
2688 // FIMXE: this should be up in the MacroAssembler layer. :-(
2689 ASSERT(!(reinterpret_cast<intptr_t>(instruction
) & 1));
2690 ASSERT(!(reinterpret_cast<intptr_t>(target
) & 1));
2692 instruction
[-3] = ifThenElse(cond
) | OP_IT
;
2693 linkJumpT4(instruction
, target
);
2696 static void linkBX(uint16_t* instruction
, void* target
)
2698 // FIMXE: this should be up in the MacroAssembler layer. :-(
2699 ASSERT(!(reinterpret_cast<intptr_t>(instruction
) & 1));
2700 ASSERT(!(reinterpret_cast<intptr_t>(target
) & 1));
2702 const uint16_t JUMP_TEMPORARY_REGISTER
= ARMRegisters::ip
;
2703 ARMThumbImmediate lo16
= ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(reinterpret_cast<uint32_t>(target
) + 1));
2704 ARMThumbImmediate hi16
= ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(reinterpret_cast<uint32_t>(target
) >> 16));
2705 instruction
[-5] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOV_imm_T3
, lo16
);
2706 instruction
[-4] = twoWordOp5i6Imm4Reg4EncodedImmSecond(JUMP_TEMPORARY_REGISTER
, lo16
);
2707 instruction
[-3] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOVT
, hi16
);
2708 instruction
[-2] = twoWordOp5i6Imm4Reg4EncodedImmSecond(JUMP_TEMPORARY_REGISTER
, hi16
);
2709 instruction
[-1] = OP_BX
| (JUMP_TEMPORARY_REGISTER
<< 3);
2712 static void linkConditionalBX(Condition cond
, uint16_t* instruction
, void* target
)
2714 // FIMXE: this should be up in the MacroAssembler layer. :-(
2715 ASSERT(!(reinterpret_cast<intptr_t>(instruction
) & 1));
2716 ASSERT(!(reinterpret_cast<intptr_t>(target
) & 1));
2718 linkBX(instruction
, target
);
2719 instruction
[-6] = ifThenElse(cond
, true, true) | OP_IT
;
2722 static void linkJumpAbsolute(uint16_t* instruction
, void* target
)
2724 // FIMXE: this should be up in the MacroAssembler layer. :-(
2725 ASSERT(!(reinterpret_cast<intptr_t>(instruction
) & 1));
2726 ASSERT(!(reinterpret_cast<intptr_t>(target
) & 1));
2728 ASSERT((isMOV_imm_T3(instruction
- 5) && isMOVT(instruction
- 3) && isBX(instruction
- 1))
2729 || (isNOP_T1(instruction
- 5) && isNOP_T2(instruction
- 4) && isB(instruction
- 2)));
2731 if (canBeJumpT4(instruction
, target
)) {
2732 // There may be a better way to fix this, but right now put the NOPs first, since in the
2733 // case of an conditional branch this will be coming after an ITTT predicating *three*
2734 // instructions! Looking backwards to modify the ITTT to an IT is not easy, due to
2735 // variable wdith encoding - the previous instruction might *look* like an ITTT but
2736 // actually be the second half of a 2-word op.
2737 instruction
[-5] = OP_NOP_T1
;
2738 instruction
[-4] = OP_NOP_T2a
;
2739 instruction
[-3] = OP_NOP_T2b
;
2740 linkJumpT4(instruction
, target
);
2742 const uint16_t JUMP_TEMPORARY_REGISTER
= ARMRegisters::ip
;
2743 ARMThumbImmediate lo16
= ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(reinterpret_cast<uint32_t>(target
) + 1));
2744 ARMThumbImmediate hi16
= ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(reinterpret_cast<uint32_t>(target
) >> 16));
2745 instruction
[-5] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOV_imm_T3
, lo16
);
2746 instruction
[-4] = twoWordOp5i6Imm4Reg4EncodedImmSecond(JUMP_TEMPORARY_REGISTER
, lo16
);
2747 instruction
[-3] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOVT
, hi16
);
2748 instruction
[-2] = twoWordOp5i6Imm4Reg4EncodedImmSecond(JUMP_TEMPORARY_REGISTER
, hi16
);
2749 instruction
[-1] = OP_BX
| (JUMP_TEMPORARY_REGISTER
<< 3);
2753 static uint16_t twoWordOp5i6Imm4Reg4EncodedImmFirst(uint16_t op
, ARMThumbImmediate imm
)
2755 return op
| (imm
.m_value
.i
<< 10) | imm
.m_value
.imm4
;
2758 static void decodeTwoWordOp5i6Imm4Reg4EncodedImmFirst(ARMThumbImmediate
& result
, uint16_t value
)
2760 result
.m_value
.i
= (value
>> 10) & 1;
2761 result
.m_value
.imm4
= value
& 15;
2764 static uint16_t twoWordOp5i6Imm4Reg4EncodedImmSecond(uint16_t rd
, ARMThumbImmediate imm
)
2766 return (imm
.m_value
.imm3
<< 12) | (rd
<< 8) | imm
.m_value
.imm8
;
2769 static void decodeTwoWordOp5i6Imm4Reg4EncodedImmSecond(ARMThumbImmediate
& result
, uint16_t value
)
2771 result
.m_value
.imm3
= (value
>> 12) & 7;
2772 result
.m_value
.imm8
= value
& 255;
2775 class ARMInstructionFormatter
{
2777 ALWAYS_INLINE
void oneWordOp5Reg3Imm8(OpcodeID op
, RegisterID rd
, uint8_t imm
)
2779 m_buffer
.putShort(op
| (rd
<< 8) | imm
);
2782 ALWAYS_INLINE
void oneWordOp5Imm5Reg3Reg3(OpcodeID op
, uint8_t imm
, RegisterID reg1
, RegisterID reg2
)
2784 m_buffer
.putShort(op
| (imm
<< 6) | (reg1
<< 3) | reg2
);
2787 ALWAYS_INLINE
void oneWordOp7Reg3Reg3Reg3(OpcodeID op
, RegisterID reg1
, RegisterID reg2
, RegisterID reg3
)
2789 m_buffer
.putShort(op
| (reg1
<< 6) | (reg2
<< 3) | reg3
);
2792 ALWAYS_INLINE
void oneWordOp7Imm9(OpcodeID op
, uint16_t imm
)
2794 m_buffer
.putShort(op
| imm
);
2797 ALWAYS_INLINE
void oneWordOp8Imm8(OpcodeID op
, uint8_t imm
)
2799 m_buffer
.putShort(op
| imm
);
2802 ALWAYS_INLINE
void oneWordOp8RegReg143(OpcodeID op
, RegisterID reg1
, RegisterID reg2
)
2804 m_buffer
.putShort(op
| ((reg2
& 8) << 4) | (reg1
<< 3) | (reg2
& 7));
2807 ALWAYS_INLINE
void oneWordOp9Imm7(OpcodeID op
, uint8_t imm
)
2809 m_buffer
.putShort(op
| imm
);
2812 ALWAYS_INLINE
void oneWordOp10Reg3Reg3(OpcodeID op
, RegisterID reg1
, RegisterID reg2
)
2814 m_buffer
.putShort(op
| (reg1
<< 3) | reg2
);
2817 ALWAYS_INLINE
void twoWordOp12Reg4FourFours(OpcodeID1 op
, RegisterID reg
, FourFours ff
)
2819 m_buffer
.putShort(op
| reg
);
2820 m_buffer
.putShort(ff
.m_u
.value
);
2823 ALWAYS_INLINE
void twoWordOp16FourFours(OpcodeID1 op
, FourFours ff
)
2825 m_buffer
.putShort(op
);
2826 m_buffer
.putShort(ff
.m_u
.value
);
2829 ALWAYS_INLINE
void twoWordOp16Op16(OpcodeID1 op1
, OpcodeID2 op2
)
2831 m_buffer
.putShort(op1
);
2832 m_buffer
.putShort(op2
);
2835 ALWAYS_INLINE
void twoWordOp16Imm16(OpcodeID1 op1
, uint16_t imm
)
2837 m_buffer
.putShort(op1
);
2838 m_buffer
.putShort(imm
);
2841 ALWAYS_INLINE
void twoWordOp5i6Imm4Reg4EncodedImm(OpcodeID1 op
, int imm4
, RegisterID rd
, ARMThumbImmediate imm
)
2843 ARMThumbImmediate newImm
= imm
;
2844 newImm
.m_value
.imm4
= imm4
;
2846 m_buffer
.putShort(ARMv7Assembler::twoWordOp5i6Imm4Reg4EncodedImmFirst(op
, newImm
));
2847 m_buffer
.putShort(ARMv7Assembler::twoWordOp5i6Imm4Reg4EncodedImmSecond(rd
, newImm
));
2850 ALWAYS_INLINE
void twoWordOp12Reg4Reg4Imm12(OpcodeID1 op
, RegisterID reg1
, RegisterID reg2
, uint16_t imm
)
2852 m_buffer
.putShort(op
| reg1
);
2853 m_buffer
.putShort((reg2
<< 12) | imm
);
2856 ALWAYS_INLINE
void twoWordOp12Reg40Imm3Reg4Imm20Imm5(OpcodeID1 op
, RegisterID reg1
, RegisterID reg2
, uint16_t imm1
, uint16_t imm2
, uint16_t imm3
)
2858 m_buffer
.putShort(op
| reg1
);
2859 m_buffer
.putShort((imm1
<< 12) | (reg2
<< 8) | (imm2
<< 6) | imm3
);
2862 // Formats up instructions of the pattern:
2863 // 111111111B11aaaa:bbbb222SA2C2cccc
2864 // Where 1s in the pattern come from op1, 2s in the pattern come from op2, S is the provided size bit.
2865 // Operands provide 5 bit values of the form Aaaaa, Bbbbb, Ccccc.
2866 ALWAYS_INLINE
void vfpOp(OpcodeID1 op1
, OpcodeID2 op2
, bool size
, VFPOperand a
, VFPOperand b
, VFPOperand c
)
2868 ASSERT(!(op1
& 0x004f));
2869 ASSERT(!(op2
& 0xf1af));
2870 m_buffer
.putShort(op1
| b
.bits1() << 6 | a
.bits4());
2871 m_buffer
.putShort(op2
| b
.bits4() << 12 | size
<< 8 | a
.bits1() << 7 | c
.bits1() << 5 | c
.bits4());
2874 // Arm vfp addresses can be offset by a 9-bit ones-comp immediate, left shifted by 2.
2875 // (i.e. +/-(0..255) 32-bit words)
2876 ALWAYS_INLINE
void vfpMemOp(OpcodeID1 op1
, OpcodeID2 op2
, bool size
, RegisterID rn
, VFPOperand rd
, int32_t imm
)
2884 uint32_t offset
= imm
;
2885 ASSERT(!(offset
& ~0x3fc));
2888 m_buffer
.putShort(op1
| (up
<< 7) | rd
.bits1() << 6 | rn
);
2889 m_buffer
.putShort(op2
| rd
.bits4() << 12 | size
<< 8 | offset
);
2892 // Administrative methods:
2894 size_t codeSize() const { return m_buffer
.codeSize(); }
2895 AssemblerLabel
label() const { return m_buffer
.label(); }
2896 bool isAligned(int alignment
) const { return m_buffer
.isAligned(alignment
); }
2897 void* data() const { return m_buffer
.data(); }
2899 unsigned debugOffset() { return m_buffer
.debugOffset(); }
2901 AssemblerBuffer m_buffer
;
2904 Vector
<LinkRecord
, 0, UnsafeVectorOverflow
> m_jumpsToLink
;
2905 int m_indexOfLastWatchpoint
;
2906 int m_indexOfTailOfLastWatchpoint
;
2911 #endif // ENABLE(ASSEMBLER) && CPU(ARM_THUMB2)
2913 #endif // ARMAssembler_h