2 * Copyright (C) 2009, 2010 Apple Inc. All rights reserved.
3 * Copyright (C) 2010 University of Szeged
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
15 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
18 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
19 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
20 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
21 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
22 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
24 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #ifndef ARMAssembler_h
28 #define ARMAssembler_h
30 #if ENABLE(ASSEMBLER) && CPU(ARM_THUMB2)
32 #include "AssemblerBuffer.h"
33 #include <wtf/Assertions.h>
34 #include <wtf/Vector.h>
39 namespace ARMRegisters
{
48 r7
, wr
= r7
, // thumb work register
50 r9
, sb
= r9
, // static base
51 r10
, sl
= r10
, // stack limit
52 r11
, fp
= r11
, // frame pointer
127 } FPDoubleRegisterID
;
164 inline FPSingleRegisterID
asSingle(FPDoubleRegisterID reg
)
167 return (FPSingleRegisterID
)(reg
<< 1);
170 inline FPDoubleRegisterID
asDouble(FPSingleRegisterID reg
)
173 return (FPDoubleRegisterID
)(reg
>> 1);
177 class ARMv7Assembler
;
178 class ARMThumbImmediate
{
179 friend class ARMv7Assembler
;
181 typedef uint8_t ThumbImmediateType
;
182 static const ThumbImmediateType TypeInvalid
= 0;
183 static const ThumbImmediateType TypeEncoded
= 1;
184 static const ThumbImmediateType TypeUInt16
= 2;
194 // If this is an encoded immediate, then it may describe a shift, or a pattern.
196 unsigned shiftValue7
: 7;
197 unsigned shiftAmount
: 5;
200 unsigned immediate
: 8;
201 unsigned pattern
: 4;
203 } ThumbImmediateValue
;
205 // byte0 contains least significant bit; not using an array to make client code endian agnostic.
216 ALWAYS_INLINE
static void countLeadingZerosPartial(uint32_t& value
, int32_t& zeros
, const int N
)
218 if (value
& ~((1 << N
) - 1)) /* check for any of the top N bits (of 2N bits) are set */
219 value
>>= N
; /* if any were set, lose the bottom N */
220 else /* if none of the top N bits are set, */
221 zeros
+= N
; /* then we have identified N leading zeros */
224 static int32_t countLeadingZeros(uint32_t value
)
230 countLeadingZerosPartial(value
, zeros
, 16);
231 countLeadingZerosPartial(value
, zeros
, 8);
232 countLeadingZerosPartial(value
, zeros
, 4);
233 countLeadingZerosPartial(value
, zeros
, 2);
234 countLeadingZerosPartial(value
, zeros
, 1);
239 : m_type(TypeInvalid
)
244 ARMThumbImmediate(ThumbImmediateType type
, ThumbImmediateValue value
)
250 ARMThumbImmediate(ThumbImmediateType type
, uint16_t value
)
253 // Make sure this constructor is only reached with type TypeUInt16;
254 // this extra parameter makes the code a little clearer by making it
255 // explicit at call sites which type is being constructed
256 ASSERT_UNUSED(type
, type
== TypeUInt16
);
258 m_value
.asInt
= value
;
262 static ARMThumbImmediate
makeEncodedImm(uint32_t value
)
264 ThumbImmediateValue encoding
;
267 // okay, these are easy.
269 encoding
.immediate
= value
;
270 encoding
.pattern
= 0;
271 return ARMThumbImmediate(TypeEncoded
, encoding
);
274 int32_t leadingZeros
= countLeadingZeros(value
);
275 // if there were 24 or more leading zeros, then we'd have hit the (value < 256) case.
276 ASSERT(leadingZeros
< 24);
278 // Given a number with bit fields Z:B:C, where count(Z)+count(B)+count(C) == 32,
279 // Z are the bits known zero, B is the 8-bit immediate, C are the bits to check for
280 // zero. count(B) == 8, so the count of bits to be checked is 24 - count(Z).
281 int32_t rightShiftAmount
= 24 - leadingZeros
;
282 if (value
== ((value
>> rightShiftAmount
) << rightShiftAmount
)) {
283 // Shift the value down to the low byte position. The assign to
284 // shiftValue7 drops the implicit top bit.
285 encoding
.shiftValue7
= value
>> rightShiftAmount
;
286 // The endoded shift amount is the magnitude of a right rotate.
287 encoding
.shiftAmount
= 8 + leadingZeros
;
288 return ARMThumbImmediate(TypeEncoded
, encoding
);
294 if ((bytes
.byte0
== bytes
.byte1
) && (bytes
.byte0
== bytes
.byte2
) && (bytes
.byte0
== bytes
.byte3
)) {
295 encoding
.immediate
= bytes
.byte0
;
296 encoding
.pattern
= 3;
297 return ARMThumbImmediate(TypeEncoded
, encoding
);
300 if ((bytes
.byte0
== bytes
.byte2
) && !(bytes
.byte1
| bytes
.byte3
)) {
301 encoding
.immediate
= bytes
.byte0
;
302 encoding
.pattern
= 1;
303 return ARMThumbImmediate(TypeEncoded
, encoding
);
306 if ((bytes
.byte1
== bytes
.byte3
) && !(bytes
.byte0
| bytes
.byte2
)) {
307 encoding
.immediate
= bytes
.byte1
;
308 encoding
.pattern
= 2;
309 return ARMThumbImmediate(TypeEncoded
, encoding
);
312 return ARMThumbImmediate();
315 static ARMThumbImmediate
makeUInt12(int32_t value
)
317 return (!(value
& 0xfffff000))
318 ? ARMThumbImmediate(TypeUInt16
, (uint16_t)value
)
319 : ARMThumbImmediate();
322 static ARMThumbImmediate
makeUInt12OrEncodedImm(int32_t value
)
324 // If this is not a 12-bit unsigned it, try making an encoded immediate.
325 return (!(value
& 0xfffff000))
326 ? ARMThumbImmediate(TypeUInt16
, (uint16_t)value
)
327 : makeEncodedImm(value
);
330 // The 'make' methods, above, return a !isValid() value if the argument
331 // cannot be represented as the requested type. This methods is called
332 // 'get' since the argument can always be represented.
333 static ARMThumbImmediate
makeUInt16(uint16_t value
)
335 return ARMThumbImmediate(TypeUInt16
, value
);
340 return m_type
!= TypeInvalid
;
343 uint16_t asUInt16() const { return m_value
.asInt
; }
345 // These methods rely on the format of encoded byte values.
346 bool isUInt3() { return !(m_value
.asInt
& 0xfff8); }
347 bool isUInt4() { return !(m_value
.asInt
& 0xfff0); }
348 bool isUInt5() { return !(m_value
.asInt
& 0xffe0); }
349 bool isUInt6() { return !(m_value
.asInt
& 0xffc0); }
350 bool isUInt7() { return !(m_value
.asInt
& 0xff80); }
351 bool isUInt8() { return !(m_value
.asInt
& 0xff00); }
352 bool isUInt9() { return (m_type
== TypeUInt16
) && !(m_value
.asInt
& 0xfe00); }
353 bool isUInt10() { return (m_type
== TypeUInt16
) && !(m_value
.asInt
& 0xfc00); }
354 bool isUInt12() { return (m_type
== TypeUInt16
) && !(m_value
.asInt
& 0xf000); }
355 bool isUInt16() { return m_type
== TypeUInt16
; }
356 uint8_t getUInt3() { ASSERT(isUInt3()); return m_value
.asInt
; }
357 uint8_t getUInt4() { ASSERT(isUInt4()); return m_value
.asInt
; }
358 uint8_t getUInt5() { ASSERT(isUInt5()); return m_value
.asInt
; }
359 uint8_t getUInt6() { ASSERT(isUInt6()); return m_value
.asInt
; }
360 uint8_t getUInt7() { ASSERT(isUInt7()); return m_value
.asInt
; }
361 uint8_t getUInt8() { ASSERT(isUInt8()); return m_value
.asInt
; }
362 uint16_t getUInt9() { ASSERT(isUInt9()); return m_value
.asInt
; }
363 uint16_t getUInt10() { ASSERT(isUInt10()); return m_value
.asInt
; }
364 uint16_t getUInt12() { ASSERT(isUInt12()); return m_value
.asInt
; }
365 uint16_t getUInt16() { ASSERT(isUInt16()); return m_value
.asInt
; }
367 bool isEncodedImm() { return m_type
== TypeEncoded
; }
370 ThumbImmediateType m_type
;
371 ThumbImmediateValue m_value
;
380 SRType_RRX
= SRType_ROR
383 class ShiftTypeAndAmount
{
384 friend class ARMv7Assembler
;
389 m_u
.type
= (ARMShiftType
)0;
393 ShiftTypeAndAmount(ARMShiftType type
, unsigned amount
)
396 m_u
.amount
= amount
& 31;
399 unsigned lo4() { return m_u
.lo4
; }
400 unsigned hi4() { return m_u
.hi4
; }
415 class ARMv7Assembler
{
417 typedef ARMRegisters::RegisterID RegisterID
;
418 typedef ARMRegisters::FPSingleRegisterID FPSingleRegisterID
;
419 typedef ARMRegisters::FPDoubleRegisterID FPDoubleRegisterID
;
420 typedef ARMRegisters::FPQuadRegisterID FPQuadRegisterID
;
422 // (HS, LO, HI, LS) -> (AE, B, A, BE)
423 // (VS, VC) -> (O, NO)
427 ConditionHS
, ConditionCS
= ConditionHS
,
428 ConditionLO
, ConditionCC
= ConditionLO
,
443 #define JUMP_ENUM_WITH_SIZE(index, value) (((value) << 3) | (index))
444 #define JUMP_ENUM_SIZE(jump) ((jump) >> 3)
445 enum JumpType
{ JumpFixed
= JUMP_ENUM_WITH_SIZE(0, 0),
446 JumpNoCondition
= JUMP_ENUM_WITH_SIZE(1, 5 * sizeof(uint16_t)),
447 JumpCondition
= JUMP_ENUM_WITH_SIZE(2, 6 * sizeof(uint16_t)),
448 JumpNoConditionFixedSize
= JUMP_ENUM_WITH_SIZE(3, 5 * sizeof(uint16_t)),
449 JumpConditionFixedSize
= JUMP_ENUM_WITH_SIZE(4, 6 * sizeof(uint16_t))
452 LinkInvalid
= JUMP_ENUM_WITH_SIZE(0, 0),
453 LinkJumpT1
= JUMP_ENUM_WITH_SIZE(1, sizeof(uint16_t)),
454 LinkJumpT2
= JUMP_ENUM_WITH_SIZE(2, sizeof(uint16_t)),
455 LinkJumpT3
= JUMP_ENUM_WITH_SIZE(3, 2 * sizeof(uint16_t)),
456 LinkJumpT4
= JUMP_ENUM_WITH_SIZE(4, 2 * sizeof(uint16_t)),
457 LinkConditionalJumpT4
= JUMP_ENUM_WITH_SIZE(5, 3 * sizeof(uint16_t)),
458 LinkBX
= JUMP_ENUM_WITH_SIZE(6, 5 * sizeof(uint16_t)),
459 LinkConditionalBX
= JUMP_ENUM_WITH_SIZE(7, 6 * sizeof(uint16_t))
464 LinkRecord(intptr_t from
, intptr_t to
, JumpType type
, Condition condition
)
466 data
.realTypes
.m_from
= from
;
467 data
.realTypes
.m_to
= to
;
468 data
.realTypes
.m_type
= type
;
469 data
.realTypes
.m_linkType
= LinkInvalid
;
470 data
.realTypes
.m_condition
= condition
;
472 void operator=(const LinkRecord
& other
)
474 data
.copyTypes
.content
[0] = other
.data
.copyTypes
.content
[0];
475 data
.copyTypes
.content
[1] = other
.data
.copyTypes
.content
[1];
476 data
.copyTypes
.content
[2] = other
.data
.copyTypes
.content
[2];
478 intptr_t from() const { return data
.realTypes
.m_from
; }
479 void setFrom(intptr_t from
) { data
.realTypes
.m_from
= from
; }
480 intptr_t to() const { return data
.realTypes
.m_to
; }
481 JumpType
type() const { return data
.realTypes
.m_type
; }
482 JumpLinkType
linkType() const { return data
.realTypes
.m_linkType
; }
483 void setLinkType(JumpLinkType linkType
) { ASSERT(data
.realTypes
.m_linkType
== LinkInvalid
); data
.realTypes
.m_linkType
= linkType
; }
484 Condition
condition() const { return data
.realTypes
.m_condition
; }
488 intptr_t m_from
: 31;
491 JumpLinkType m_linkType
: 8;
492 Condition m_condition
: 16;
497 COMPILE_ASSERT(sizeof(RealTypes
) == sizeof(CopyTypes
), LinkRecordCopyStructSizeEqualsRealStruct
);
504 bool BadReg(RegisterID reg
)
506 return (reg
== ARMRegisters::sp
) || (reg
== ARMRegisters::pc
);
509 uint32_t singleRegisterMask(FPSingleRegisterID rdNum
, int highBitsShift
, int lowBitShift
)
511 uint32_t rdMask
= (rdNum
>> 1) << highBitsShift
;
513 rdMask
|= 1 << lowBitShift
;
517 uint32_t doubleRegisterMask(FPDoubleRegisterID rdNum
, int highBitShift
, int lowBitsShift
)
519 uint32_t rdMask
= (rdNum
& 0xf) << lowBitsShift
;
521 rdMask
|= 1 << highBitShift
;
526 OP_ADD_reg_T1
= 0x1800,
527 OP_SUB_reg_T1
= 0x1A00,
528 OP_ADD_imm_T1
= 0x1C00,
529 OP_SUB_imm_T1
= 0x1E00,
530 OP_MOV_imm_T1
= 0x2000,
531 OP_CMP_imm_T1
= 0x2800,
532 OP_ADD_imm_T2
= 0x3000,
533 OP_SUB_imm_T2
= 0x3800,
534 OP_AND_reg_T1
= 0x4000,
535 OP_EOR_reg_T1
= 0x4040,
536 OP_TST_reg_T1
= 0x4200,
537 OP_RSB_imm_T1
= 0x4240,
538 OP_CMP_reg_T1
= 0x4280,
539 OP_ORR_reg_T1
= 0x4300,
540 OP_MVN_reg_T1
= 0x43C0,
541 OP_ADD_reg_T2
= 0x4400,
542 OP_MOV_reg_T1
= 0x4600,
545 OP_STR_reg_T1
= 0x5000,
546 OP_STRH_reg_T1
= 0x5200,
547 OP_STRB_reg_T1
= 0x5400,
548 OP_LDRSB_reg_T1
= 0x5600,
549 OP_LDR_reg_T1
= 0x5800,
550 OP_LDRH_reg_T1
= 0x5A00,
551 OP_LDRB_reg_T1
= 0x5C00,
552 OP_LDRSH_reg_T1
= 0x5E00,
553 OP_STR_imm_T1
= 0x6000,
554 OP_LDR_imm_T1
= 0x6800,
555 OP_STRB_imm_T1
= 0x7000,
556 OP_LDRB_imm_T1
= 0x7800,
557 OP_STRH_imm_T1
= 0x8000,
558 OP_LDRH_imm_T1
= 0x8800,
559 OP_STR_imm_T2
= 0x9000,
560 OP_LDR_imm_T2
= 0x9800,
561 OP_ADD_SP_imm_T1
= 0xA800,
562 OP_ADD_SP_imm_T2
= 0xB000,
563 OP_SUB_SP_imm_T1
= 0xB080,
572 OP_AND_reg_T2
= 0xEA00,
573 OP_TST_reg_T2
= 0xEA10,
574 OP_ORR_reg_T2
= 0xEA40,
575 OP_ORR_S_reg_T2
= 0xEA50,
576 OP_ASR_imm_T1
= 0xEA4F,
577 OP_LSL_imm_T1
= 0xEA4F,
578 OP_LSR_imm_T1
= 0xEA4F,
579 OP_ROR_imm_T1
= 0xEA4F,
580 OP_MVN_reg_T2
= 0xEA6F,
581 OP_EOR_reg_T2
= 0xEA80,
582 OP_ADD_reg_T3
= 0xEB00,
583 OP_ADD_S_reg_T3
= 0xEB10,
584 OP_SUB_reg_T2
= 0xEBA0,
585 OP_SUB_S_reg_T2
= 0xEBB0,
586 OP_CMP_reg_T2
= 0xEBB0,
587 OP_VMOV_CtoD
= 0xEC00,
588 OP_VMOV_DtoC
= 0xEC10,
593 OP_VMOV_CtoS
= 0xEE00,
594 OP_VMOV_StoC
= 0xEE10,
601 OP_VCVT_FPIVFP
= 0xEEB0,
603 OP_VMOV_IMM_T2
= 0xEEB0,
606 OP_VSQRT_T1
= 0xEEB0,
607 OP_VCVTSD_T1
= 0xEEB0,
608 OP_VCVTDS_T1
= 0xEEB0,
611 OP_AND_imm_T1
= 0xF000,
613 OP_ORR_imm_T1
= 0xF040,
614 OP_MOV_imm_T2
= 0xF040,
616 OP_EOR_imm_T1
= 0xF080,
617 OP_ADD_imm_T3
= 0xF100,
618 OP_ADD_S_imm_T3
= 0xF110,
621 OP_SUB_imm_T3
= 0xF1A0,
622 OP_SUB_S_imm_T3
= 0xF1B0,
623 OP_CMP_imm_T2
= 0xF1B0,
624 OP_RSB_imm_T2
= 0xF1C0,
625 OP_RSB_S_imm_T2
= 0xF1D0,
626 OP_ADD_imm_T4
= 0xF200,
627 OP_MOV_imm_T3
= 0xF240,
628 OP_SUB_imm_T4
= 0xF2A0,
632 OP_STRB_imm_T3
= 0xF800,
633 OP_STRB_reg_T2
= 0xF800,
634 OP_LDRB_imm_T3
= 0xF810,
635 OP_LDRB_reg_T2
= 0xF810,
636 OP_STRH_imm_T3
= 0xF820,
637 OP_STRH_reg_T2
= 0xF820,
638 OP_LDRH_reg_T2
= 0xF830,
639 OP_LDRH_imm_T3
= 0xF830,
640 OP_STR_imm_T4
= 0xF840,
641 OP_STR_reg_T2
= 0xF840,
642 OP_LDR_imm_T4
= 0xF850,
643 OP_LDR_reg_T2
= 0xF850,
644 OP_STRB_imm_T2
= 0xF880,
645 OP_LDRB_imm_T2
= 0xF890,
646 OP_STRH_imm_T2
= 0xF8A0,
647 OP_LDRH_imm_T2
= 0xF8B0,
648 OP_STR_imm_T3
= 0xF8C0,
649 OP_LDR_imm_T3
= 0xF8D0,
650 OP_LDRSB_reg_T2
= 0xF910,
651 OP_LDRSH_reg_T2
= 0xF930,
652 OP_LSL_reg_T2
= 0xFA00,
653 OP_LSR_reg_T2
= 0xFA20,
654 OP_ASR_reg_T2
= 0xFA40,
655 OP_ROR_reg_T2
= 0xFA60,
657 OP_SMULL_T1
= 0xFB80,
661 OP_VADD_T2b
= 0x0A00,
665 OP_VMOV_IMM_T2b
= 0x0A00,
666 OP_VMOV_T2b
= 0x0A40,
667 OP_VMUL_T2b
= 0x0A00,
670 OP_VMOV_StoCb
= 0x0A10,
671 OP_VMOV_CtoSb
= 0x0A10,
672 OP_VMOV_DtoCb
= 0x0A10,
673 OP_VMOV_CtoDb
= 0x0A10,
675 OP_VABS_T2b
= 0x0A40,
677 OP_VCVT_FPIVFPb
= 0x0A40,
678 OP_VNEG_T2b
= 0x0A40,
679 OP_VSUB_T2b
= 0x0A40,
680 OP_VSQRT_T1b
= 0x0A40,
681 OP_VCVTSD_T1b
= 0x0A40,
682 OP_VCVTDS_T1b
= 0x0A40,
689 FourFours(unsigned f3
, unsigned f2
, unsigned f1
, unsigned f0
)
708 class ARMInstructionFormatter
;
711 bool ifThenElseConditionBit(Condition condition
, bool isIf
)
713 return isIf
? (condition
& 1) : !(condition
& 1);
715 uint8_t ifThenElse(Condition condition
, bool inst2if
, bool inst3if
, bool inst4if
)
717 int mask
= (ifThenElseConditionBit(condition
, inst2if
) << 3)
718 | (ifThenElseConditionBit(condition
, inst3if
) << 2)
719 | (ifThenElseConditionBit(condition
, inst4if
) << 1)
721 ASSERT((condition
!= ConditionAL
) || !(mask
& (mask
- 1)));
722 return (condition
<< 4) | mask
;
724 uint8_t ifThenElse(Condition condition
, bool inst2if
, bool inst3if
)
726 int mask
= (ifThenElseConditionBit(condition
, inst2if
) << 3)
727 | (ifThenElseConditionBit(condition
, inst3if
) << 2)
729 ASSERT((condition
!= ConditionAL
) || !(mask
& (mask
- 1)));
730 return (condition
<< 4) | mask
;
732 uint8_t ifThenElse(Condition condition
, bool inst2if
)
734 int mask
= (ifThenElseConditionBit(condition
, inst2if
) << 3)
736 ASSERT((condition
!= ConditionAL
) || !(mask
& (mask
- 1)));
737 return (condition
<< 4) | mask
;
740 uint8_t ifThenElse(Condition condition
)
743 return (condition
<< 4) | mask
;
748 void adc(RegisterID rd
, RegisterID rn
, ARMThumbImmediate imm
)
750 // Rd can only be SP if Rn is also SP.
751 ASSERT((rd
!= ARMRegisters::sp
) || (rn
== ARMRegisters::sp
));
752 ASSERT(rd
!= ARMRegisters::pc
);
753 ASSERT(rn
!= ARMRegisters::pc
);
754 ASSERT(imm
.isEncodedImm());
756 m_formatter
.twoWordOp5i6Imm4Reg4EncodedImm(OP_ADC_imm
, rn
, rd
, imm
);
759 void add(RegisterID rd
, RegisterID rn
, ARMThumbImmediate imm
)
761 // Rd can only be SP if Rn is also SP.
762 ASSERT((rd
!= ARMRegisters::sp
) || (rn
== ARMRegisters::sp
));
763 ASSERT(rd
!= ARMRegisters::pc
);
764 ASSERT(rn
!= ARMRegisters::pc
);
765 ASSERT(imm
.isValid());
767 if (rn
== ARMRegisters::sp
) {
768 ASSERT(!(imm
.getUInt16() & 3));
769 if (!(rd
& 8) && imm
.isUInt10()) {
770 m_formatter
.oneWordOp5Reg3Imm8(OP_ADD_SP_imm_T1
, rd
, static_cast<uint8_t>(imm
.getUInt10() >> 2));
772 } else if ((rd
== ARMRegisters::sp
) && imm
.isUInt9()) {
773 m_formatter
.oneWordOp9Imm7(OP_ADD_SP_imm_T2
, static_cast<uint8_t>(imm
.getUInt9() >> 2));
776 } else if (!((rd
| rn
) & 8)) {
778 m_formatter
.oneWordOp7Reg3Reg3Reg3(OP_ADD_imm_T1
, (RegisterID
)imm
.getUInt3(), rn
, rd
);
780 } else if ((rd
== rn
) && imm
.isUInt8()) {
781 m_formatter
.oneWordOp5Reg3Imm8(OP_ADD_imm_T2
, rd
, imm
.getUInt8());
786 if (imm
.isEncodedImm())
787 m_formatter
.twoWordOp5i6Imm4Reg4EncodedImm(OP_ADD_imm_T3
, rn
, rd
, imm
);
789 ASSERT(imm
.isUInt12());
790 m_formatter
.twoWordOp5i6Imm4Reg4EncodedImm(OP_ADD_imm_T4
, rn
, rd
, imm
);
794 ALWAYS_INLINE
void add(RegisterID rd
, RegisterID rn
, RegisterID rm
, ShiftTypeAndAmount shift
)
796 ASSERT((rd
!= ARMRegisters::sp
) || (rn
== ARMRegisters::sp
));
797 ASSERT(rd
!= ARMRegisters::pc
);
798 ASSERT(rn
!= ARMRegisters::pc
);
800 m_formatter
.twoWordOp12Reg4FourFours(OP_ADD_reg_T3
, rn
, FourFours(shift
.hi4(), rd
, shift
.lo4(), rm
));
803 // NOTE: In an IT block, add doesn't modify the flags register.
804 ALWAYS_INLINE
void add(RegisterID rd
, RegisterID rn
, RegisterID rm
)
807 m_formatter
.oneWordOp8RegReg143(OP_ADD_reg_T2
, rm
, rd
);
809 m_formatter
.oneWordOp8RegReg143(OP_ADD_reg_T2
, rn
, rd
);
810 else if (!((rd
| rn
| rm
) & 8))
811 m_formatter
.oneWordOp7Reg3Reg3Reg3(OP_ADD_reg_T1
, rm
, rn
, rd
);
813 add(rd
, rn
, rm
, ShiftTypeAndAmount());
816 // Not allowed in an IT (if then) block.
817 ALWAYS_INLINE
void add_S(RegisterID rd
, RegisterID rn
, ARMThumbImmediate imm
)
819 // Rd can only be SP if Rn is also SP.
820 ASSERT((rd
!= ARMRegisters::sp
) || (rn
== ARMRegisters::sp
));
821 ASSERT(rd
!= ARMRegisters::pc
);
822 ASSERT(rn
!= ARMRegisters::pc
);
823 ASSERT(imm
.isEncodedImm());
825 if (!((rd
| rn
) & 8)) {
827 m_formatter
.oneWordOp7Reg3Reg3Reg3(OP_ADD_imm_T1
, (RegisterID
)imm
.getUInt3(), rn
, rd
);
829 } else if ((rd
== rn
) && imm
.isUInt8()) {
830 m_formatter
.oneWordOp5Reg3Imm8(OP_ADD_imm_T2
, rd
, imm
.getUInt8());
835 m_formatter
.twoWordOp5i6Imm4Reg4EncodedImm(OP_ADD_S_imm_T3
, rn
, rd
, imm
);
838 // Not allowed in an IT (if then) block?
839 ALWAYS_INLINE
void add_S(RegisterID rd
, RegisterID rn
, RegisterID rm
, ShiftTypeAndAmount shift
)
841 ASSERT((rd
!= ARMRegisters::sp
) || (rn
== ARMRegisters::sp
));
842 ASSERT(rd
!= ARMRegisters::pc
);
843 ASSERT(rn
!= ARMRegisters::pc
);
845 m_formatter
.twoWordOp12Reg4FourFours(OP_ADD_S_reg_T3
, rn
, FourFours(shift
.hi4(), rd
, shift
.lo4(), rm
));
848 // Not allowed in an IT (if then) block.
849 ALWAYS_INLINE
void add_S(RegisterID rd
, RegisterID rn
, RegisterID rm
)
851 if (!((rd
| rn
| rm
) & 8))
852 m_formatter
.oneWordOp7Reg3Reg3Reg3(OP_ADD_reg_T1
, rm
, rn
, rd
);
854 add_S(rd
, rn
, rm
, ShiftTypeAndAmount());
857 ALWAYS_INLINE
void ARM_and(RegisterID rd
, RegisterID rn
, ARMThumbImmediate imm
)
861 ASSERT(imm
.isEncodedImm());
862 m_formatter
.twoWordOp5i6Imm4Reg4EncodedImm(OP_AND_imm_T1
, rn
, rd
, imm
);
865 ALWAYS_INLINE
void ARM_and(RegisterID rd
, RegisterID rn
, RegisterID rm
, ShiftTypeAndAmount shift
)
870 m_formatter
.twoWordOp12Reg4FourFours(OP_AND_reg_T2
, rn
, FourFours(shift
.hi4(), rd
, shift
.lo4(), rm
));
873 ALWAYS_INLINE
void ARM_and(RegisterID rd
, RegisterID rn
, RegisterID rm
)
875 if ((rd
== rn
) && !((rd
| rm
) & 8))
876 m_formatter
.oneWordOp10Reg3Reg3(OP_AND_reg_T1
, rm
, rd
);
877 else if ((rd
== rm
) && !((rd
| rn
) & 8))
878 m_formatter
.oneWordOp10Reg3Reg3(OP_AND_reg_T1
, rn
, rd
);
880 ARM_and(rd
, rn
, rm
, ShiftTypeAndAmount());
883 ALWAYS_INLINE
void asr(RegisterID rd
, RegisterID rm
, int32_t shiftAmount
)
887 ShiftTypeAndAmount
shift(SRType_ASR
, shiftAmount
);
888 m_formatter
.twoWordOp16FourFours(OP_ASR_imm_T1
, FourFours(shift
.hi4(), rd
, shift
.lo4(), rm
));
891 ALWAYS_INLINE
void asr(RegisterID rd
, RegisterID rn
, RegisterID rm
)
896 m_formatter
.twoWordOp12Reg4FourFours(OP_ASR_reg_T2
, rn
, FourFours(0xf, rd
, 0, rm
));
899 // Only allowed in IT (if then) block if last instruction.
900 ALWAYS_INLINE AssemblerLabel
b()
902 m_formatter
.twoWordOp16Op16(OP_B_T4a
, OP_B_T4b
);
903 return m_formatter
.label();
906 // Only allowed in IT (if then) block if last instruction.
907 ALWAYS_INLINE AssemblerLabel
blx(RegisterID rm
)
909 ASSERT(rm
!= ARMRegisters::pc
);
910 m_formatter
.oneWordOp8RegReg143(OP_BLX
, rm
, (RegisterID
)8);
911 return m_formatter
.label();
914 // Only allowed in IT (if then) block if last instruction.
915 ALWAYS_INLINE AssemblerLabel
bx(RegisterID rm
)
917 m_formatter
.oneWordOp8RegReg143(OP_BX
, rm
, (RegisterID
)0);
918 return m_formatter
.label();
921 void bkpt(uint8_t imm
= 0)
923 m_formatter
.oneWordOp8Imm8(OP_BKPT
, imm
);
926 ALWAYS_INLINE
void clz(RegisterID rd
, RegisterID rm
)
930 m_formatter
.twoWordOp12Reg4FourFours(OP_CLZ
, rm
, FourFours(0xf, rd
, 8, rm
));
933 ALWAYS_INLINE
void cmn(RegisterID rn
, ARMThumbImmediate imm
)
935 ASSERT(rn
!= ARMRegisters::pc
);
936 ASSERT(imm
.isEncodedImm());
938 m_formatter
.twoWordOp5i6Imm4Reg4EncodedImm(OP_CMN_imm
, rn
, (RegisterID
)0xf, imm
);
941 ALWAYS_INLINE
void cmp(RegisterID rn
, ARMThumbImmediate imm
)
943 ASSERT(rn
!= ARMRegisters::pc
);
944 ASSERT(imm
.isEncodedImm());
946 if (!(rn
& 8) && imm
.isUInt8())
947 m_formatter
.oneWordOp5Reg3Imm8(OP_CMP_imm_T1
, rn
, imm
.getUInt8());
949 m_formatter
.twoWordOp5i6Imm4Reg4EncodedImm(OP_CMP_imm_T2
, rn
, (RegisterID
)0xf, imm
);
952 ALWAYS_INLINE
void cmp(RegisterID rn
, RegisterID rm
, ShiftTypeAndAmount shift
)
954 ASSERT(rn
!= ARMRegisters::pc
);
956 m_formatter
.twoWordOp12Reg4FourFours(OP_CMP_reg_T2
, rn
, FourFours(shift
.hi4(), 0xf, shift
.lo4(), rm
));
959 ALWAYS_INLINE
void cmp(RegisterID rn
, RegisterID rm
)
962 cmp(rn
, rm
, ShiftTypeAndAmount());
964 m_formatter
.oneWordOp10Reg3Reg3(OP_CMP_reg_T1
, rm
, rn
);
967 // xor is not spelled with an 'e'. :-(
968 ALWAYS_INLINE
void eor(RegisterID rd
, RegisterID rn
, ARMThumbImmediate imm
)
972 ASSERT(imm
.isEncodedImm());
973 m_formatter
.twoWordOp5i6Imm4Reg4EncodedImm(OP_EOR_imm_T1
, rn
, rd
, imm
);
976 // xor is not spelled with an 'e'. :-(
977 ALWAYS_INLINE
void eor(RegisterID rd
, RegisterID rn
, RegisterID rm
, ShiftTypeAndAmount shift
)
982 m_formatter
.twoWordOp12Reg4FourFours(OP_EOR_reg_T2
, rn
, FourFours(shift
.hi4(), rd
, shift
.lo4(), rm
));
985 // xor is not spelled with an 'e'. :-(
986 void eor(RegisterID rd
, RegisterID rn
, RegisterID rm
)
988 if ((rd
== rn
) && !((rd
| rm
) & 8))
989 m_formatter
.oneWordOp10Reg3Reg3(OP_EOR_reg_T1
, rm
, rd
);
990 else if ((rd
== rm
) && !((rd
| rn
) & 8))
991 m_formatter
.oneWordOp10Reg3Reg3(OP_EOR_reg_T1
, rn
, rd
);
993 eor(rd
, rn
, rm
, ShiftTypeAndAmount());
996 ALWAYS_INLINE
void it(Condition cond
)
998 m_formatter
.oneWordOp8Imm8(OP_IT
, ifThenElse(cond
));
1001 ALWAYS_INLINE
void it(Condition cond
, bool inst2if
)
1003 m_formatter
.oneWordOp8Imm8(OP_IT
, ifThenElse(cond
, inst2if
));
1006 ALWAYS_INLINE
void it(Condition cond
, bool inst2if
, bool inst3if
)
1008 m_formatter
.oneWordOp8Imm8(OP_IT
, ifThenElse(cond
, inst2if
, inst3if
));
1011 ALWAYS_INLINE
void it(Condition cond
, bool inst2if
, bool inst3if
, bool inst4if
)
1013 m_formatter
.oneWordOp8Imm8(OP_IT
, ifThenElse(cond
, inst2if
, inst3if
, inst4if
));
1016 // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
1017 ALWAYS_INLINE
void ldr(RegisterID rt
, RegisterID rn
, ARMThumbImmediate imm
)
1019 ASSERT(rn
!= ARMRegisters::pc
); // LDR (literal)
1020 ASSERT(imm
.isUInt12());
1022 if (!((rt
| rn
) & 8) && imm
.isUInt7())
1023 m_formatter
.oneWordOp5Imm5Reg3Reg3(OP_LDR_imm_T1
, imm
.getUInt7() >> 2, rn
, rt
);
1024 else if ((rn
== ARMRegisters::sp
) && !(rt
& 8) && imm
.isUInt10())
1025 m_formatter
.oneWordOp5Reg3Imm8(OP_LDR_imm_T2
, rt
, static_cast<uint8_t>(imm
.getUInt10() >> 2));
1027 m_formatter
.twoWordOp12Reg4Reg4Imm12(OP_LDR_imm_T3
, rn
, rt
, imm
.getUInt12());
1030 ALWAYS_INLINE
void ldrCompact(RegisterID rt
, RegisterID rn
, ARMThumbImmediate imm
)
1032 ASSERT(rn
!= ARMRegisters::pc
); // LDR (literal)
1033 ASSERT(imm
.isUInt7());
1034 ASSERT(!((rt
| rn
) & 8));
1035 m_formatter
.oneWordOp5Imm5Reg3Reg3(OP_LDR_imm_T1
, imm
.getUInt7() >> 2, rn
, rt
);
1038 // If index is set, this is a regular offset or a pre-indexed load;
1039 // if index is not set then is is a post-index load.
1041 // If wback is set rn is updated - this is a pre or post index load,
1042 // if wback is not set this is a regular offset memory access.
1044 // (-255 <= offset <= 255)
1046 // _tmp = _reg + offset
1047 // MEM[index ? _tmp : _reg] = REG[rt]
1048 // if (wback) REG[rn] = _tmp
1049 ALWAYS_INLINE
void ldr(RegisterID rt
, RegisterID rn
, int offset
, bool index
, bool wback
)
1051 ASSERT(rt
!= ARMRegisters::pc
);
1052 ASSERT(rn
!= ARMRegisters::pc
);
1053 ASSERT(index
|| wback
);
1054 ASSERT(!wback
| (rt
!= rn
));
1061 ASSERT((offset
& ~0xff) == 0);
1063 offset
|= (wback
<< 8);
1064 offset
|= (add
<< 9);
1065 offset
|= (index
<< 10);
1066 offset
|= (1 << 11);
1068 m_formatter
.twoWordOp12Reg4Reg4Imm12(OP_LDR_imm_T4
, rn
, rt
, offset
);
1071 // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
1072 ALWAYS_INLINE
void ldr(RegisterID rt
, RegisterID rn
, RegisterID rm
, unsigned shift
= 0)
1074 ASSERT(rn
!= ARMRegisters::pc
); // LDR (literal)
1075 ASSERT(!BadReg(rm
));
1078 if (!shift
&& !((rt
| rn
| rm
) & 8))
1079 m_formatter
.oneWordOp7Reg3Reg3Reg3(OP_LDR_reg_T1
, rm
, rn
, rt
);
1081 m_formatter
.twoWordOp12Reg4FourFours(OP_LDR_reg_T2
, rn
, FourFours(rt
, 0, shift
, rm
));
1084 // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
1085 ALWAYS_INLINE
void ldrh(RegisterID rt
, RegisterID rn
, ARMThumbImmediate imm
)
1087 ASSERT(rn
!= ARMRegisters::pc
); // LDR (literal)
1088 ASSERT(imm
.isUInt12());
1090 if (!((rt
| rn
) & 8) && imm
.isUInt6())
1091 m_formatter
.oneWordOp5Imm5Reg3Reg3(OP_LDRH_imm_T1
, imm
.getUInt6() >> 2, rn
, rt
);
1093 m_formatter
.twoWordOp12Reg4Reg4Imm12(OP_LDRH_imm_T2
, rn
, rt
, imm
.getUInt12());
1096 // If index is set, this is a regular offset or a pre-indexed load;
1097 // if index is not set then is is a post-index load.
1099 // If wback is set rn is updated - this is a pre or post index load,
1100 // if wback is not set this is a regular offset memory access.
1102 // (-255 <= offset <= 255)
1104 // _tmp = _reg + offset
1105 // MEM[index ? _tmp : _reg] = REG[rt]
1106 // if (wback) REG[rn] = _tmp
1107 ALWAYS_INLINE
void ldrh(RegisterID rt
, RegisterID rn
, int offset
, bool index
, bool wback
)
1109 ASSERT(rt
!= ARMRegisters::pc
);
1110 ASSERT(rn
!= ARMRegisters::pc
);
1111 ASSERT(index
|| wback
);
1112 ASSERT(!wback
| (rt
!= rn
));
1119 ASSERT((offset
& ~0xff) == 0);
1121 offset
|= (wback
<< 8);
1122 offset
|= (add
<< 9);
1123 offset
|= (index
<< 10);
1124 offset
|= (1 << 11);
1126 m_formatter
.twoWordOp12Reg4Reg4Imm12(OP_LDRH_imm_T3
, rn
, rt
, offset
);
1129 ALWAYS_INLINE
void ldrh(RegisterID rt
, RegisterID rn
, RegisterID rm
, unsigned shift
= 0)
1131 ASSERT(!BadReg(rt
)); // Memory hint
1132 ASSERT(rn
!= ARMRegisters::pc
); // LDRH (literal)
1133 ASSERT(!BadReg(rm
));
1136 if (!shift
&& !((rt
| rn
| rm
) & 8))
1137 m_formatter
.oneWordOp7Reg3Reg3Reg3(OP_LDRH_reg_T1
, rm
, rn
, rt
);
1139 m_formatter
.twoWordOp12Reg4FourFours(OP_LDRH_reg_T2
, rn
, FourFours(rt
, 0, shift
, rm
));
1142 void ldrb(RegisterID rt
, RegisterID rn
, ARMThumbImmediate imm
)
1144 ASSERT(rn
!= ARMRegisters::pc
); // LDR (literal)
1145 ASSERT(imm
.isUInt12());
1147 if (!((rt
| rn
) & 8) && imm
.isUInt5())
1148 m_formatter
.oneWordOp5Imm5Reg3Reg3(OP_LDRB_imm_T1
, imm
.getUInt5(), rn
, rt
);
1150 m_formatter
.twoWordOp12Reg4Reg4Imm12(OP_LDRB_imm_T2
, rn
, rt
, imm
.getUInt12());
1153 void ldrb(RegisterID rt
, RegisterID rn
, int offset
, bool index
, bool wback
)
1155 ASSERT(rt
!= ARMRegisters::pc
);
1156 ASSERT(rn
!= ARMRegisters::pc
);
1157 ASSERT(index
|| wback
);
1158 ASSERT(!wback
| (rt
!= rn
));
1166 ASSERT(!(offset
& ~0xff));
1168 offset
|= (wback
<< 8);
1169 offset
|= (add
<< 9);
1170 offset
|= (index
<< 10);
1171 offset
|= (1 << 11);
1173 m_formatter
.twoWordOp12Reg4Reg4Imm12(OP_LDRB_imm_T3
, rn
, rt
, offset
);
1176 ALWAYS_INLINE
void ldrb(RegisterID rt
, RegisterID rn
, RegisterID rm
, unsigned shift
= 0)
1178 ASSERT(rn
!= ARMRegisters::pc
); // LDR (literal)
1179 ASSERT(!BadReg(rm
));
1182 if (!shift
&& !((rt
| rn
| rm
) & 8))
1183 m_formatter
.oneWordOp7Reg3Reg3Reg3(OP_LDRB_reg_T1
, rm
, rn
, rt
);
1185 m_formatter
.twoWordOp12Reg4FourFours(OP_LDRB_reg_T2
, rn
, FourFours(rt
, 0, shift
, rm
));
1188 void ldrsb(RegisterID rt
, RegisterID rn
, RegisterID rm
, unsigned shift
= 0)
1190 ASSERT(rn
!= ARMRegisters::pc
);
1191 ASSERT(!BadReg(rm
));
1194 if (!shift
&& !((rt
| rn
| rm
) & 8))
1195 m_formatter
.oneWordOp7Reg3Reg3Reg3(OP_LDRSB_reg_T1
, rm
, rn
, rt
);
1197 m_formatter
.twoWordOp12Reg4FourFours(OP_LDRSB_reg_T2
, rn
, FourFours(rt
, 0, shift
, rm
));
1200 void ldrsh(RegisterID rt
, RegisterID rn
, RegisterID rm
, unsigned shift
= 0)
1202 ASSERT(rn
!= ARMRegisters::pc
);
1203 ASSERT(!BadReg(rm
));
1206 if (!shift
&& !((rt
| rn
| rm
) & 8))
1207 m_formatter
.oneWordOp7Reg3Reg3Reg3(OP_LDRSH_reg_T1
, rm
, rn
, rt
);
1209 m_formatter
.twoWordOp12Reg4FourFours(OP_LDRSH_reg_T2
, rn
, FourFours(rt
, 0, shift
, rm
));
1212 void lsl(RegisterID rd
, RegisterID rm
, int32_t shiftAmount
)
1214 ASSERT(!BadReg(rd
));
1215 ASSERT(!BadReg(rm
));
1216 ShiftTypeAndAmount
shift(SRType_LSL
, shiftAmount
);
1217 m_formatter
.twoWordOp16FourFours(OP_LSL_imm_T1
, FourFours(shift
.hi4(), rd
, shift
.lo4(), rm
));
1220 ALWAYS_INLINE
void lsl(RegisterID rd
, RegisterID rn
, RegisterID rm
)
1222 ASSERT(!BadReg(rd
));
1223 ASSERT(!BadReg(rn
));
1224 ASSERT(!BadReg(rm
));
1225 m_formatter
.twoWordOp12Reg4FourFours(OP_LSL_reg_T2
, rn
, FourFours(0xf, rd
, 0, rm
));
1228 ALWAYS_INLINE
void lsr(RegisterID rd
, RegisterID rm
, int32_t shiftAmount
)
1230 ASSERT(!BadReg(rd
));
1231 ASSERT(!BadReg(rm
));
1232 ShiftTypeAndAmount
shift(SRType_LSR
, shiftAmount
);
1233 m_formatter
.twoWordOp16FourFours(OP_LSR_imm_T1
, FourFours(shift
.hi4(), rd
, shift
.lo4(), rm
));
1236 ALWAYS_INLINE
void lsr(RegisterID rd
, RegisterID rn
, RegisterID rm
)
1238 ASSERT(!BadReg(rd
));
1239 ASSERT(!BadReg(rn
));
1240 ASSERT(!BadReg(rm
));
1241 m_formatter
.twoWordOp12Reg4FourFours(OP_LSR_reg_T2
, rn
, FourFours(0xf, rd
, 0, rm
));
1244 ALWAYS_INLINE
void movT3(RegisterID rd
, ARMThumbImmediate imm
)
1246 ASSERT(imm
.isValid());
1247 ASSERT(!imm
.isEncodedImm());
1248 ASSERT(!BadReg(rd
));
1250 m_formatter
.twoWordOp5i6Imm4Reg4EncodedImm(OP_MOV_imm_T3
, imm
.m_value
.imm4
, rd
, imm
);
1253 ALWAYS_INLINE
void mov(RegisterID rd
, ARMThumbImmediate imm
)
1255 ASSERT(imm
.isValid());
1256 ASSERT(!BadReg(rd
));
1258 if ((rd
< 8) && imm
.isUInt8())
1259 m_formatter
.oneWordOp5Reg3Imm8(OP_MOV_imm_T1
, rd
, imm
.getUInt8());
1260 else if (imm
.isEncodedImm())
1261 m_formatter
.twoWordOp5i6Imm4Reg4EncodedImm(OP_MOV_imm_T2
, 0xf, rd
, imm
);
1266 ALWAYS_INLINE
void mov(RegisterID rd
, RegisterID rm
)
1268 m_formatter
.oneWordOp8RegReg143(OP_MOV_reg_T1
, rm
, rd
);
1271 ALWAYS_INLINE
void movt(RegisterID rd
, ARMThumbImmediate imm
)
1273 ASSERT(imm
.isUInt16());
1274 ASSERT(!BadReg(rd
));
1275 m_formatter
.twoWordOp5i6Imm4Reg4EncodedImm(OP_MOVT
, imm
.m_value
.imm4
, rd
, imm
);
1278 ALWAYS_INLINE
void mvn(RegisterID rd
, ARMThumbImmediate imm
)
1280 ASSERT(imm
.isEncodedImm());
1281 ASSERT(!BadReg(rd
));
1283 m_formatter
.twoWordOp5i6Imm4Reg4EncodedImm(OP_MVN_imm
, 0xf, rd
, imm
);
1286 ALWAYS_INLINE
void mvn(RegisterID rd
, RegisterID rm
, ShiftTypeAndAmount shift
)
1288 ASSERT(!BadReg(rd
));
1289 ASSERT(!BadReg(rm
));
1290 m_formatter
.twoWordOp16FourFours(OP_MVN_reg_T2
, FourFours(shift
.hi4(), rd
, shift
.lo4(), rm
));
1293 ALWAYS_INLINE
void mvn(RegisterID rd
, RegisterID rm
)
1295 if (!((rd
| rm
) & 8))
1296 m_formatter
.oneWordOp10Reg3Reg3(OP_MVN_reg_T1
, rm
, rd
);
1298 mvn(rd
, rm
, ShiftTypeAndAmount());
1301 ALWAYS_INLINE
void neg(RegisterID rd
, RegisterID rm
)
1303 ARMThumbImmediate zero
= ARMThumbImmediate::makeUInt12(0);
1307 ALWAYS_INLINE
void orr(RegisterID rd
, RegisterID rn
, ARMThumbImmediate imm
)
1309 ASSERT(!BadReg(rd
));
1310 ASSERT(!BadReg(rn
));
1311 ASSERT(imm
.isEncodedImm());
1312 m_formatter
.twoWordOp5i6Imm4Reg4EncodedImm(OP_ORR_imm_T1
, rn
, rd
, imm
);
1315 ALWAYS_INLINE
void orr(RegisterID rd
, RegisterID rn
, RegisterID rm
, ShiftTypeAndAmount shift
)
1317 ASSERT(!BadReg(rd
));
1318 ASSERT(!BadReg(rn
));
1319 ASSERT(!BadReg(rm
));
1320 m_formatter
.twoWordOp12Reg4FourFours(OP_ORR_reg_T2
, rn
, FourFours(shift
.hi4(), rd
, shift
.lo4(), rm
));
1323 void orr(RegisterID rd
, RegisterID rn
, RegisterID rm
)
1325 if ((rd
== rn
) && !((rd
| rm
) & 8))
1326 m_formatter
.oneWordOp10Reg3Reg3(OP_ORR_reg_T1
, rm
, rd
);
1327 else if ((rd
== rm
) && !((rd
| rn
) & 8))
1328 m_formatter
.oneWordOp10Reg3Reg3(OP_ORR_reg_T1
, rn
, rd
);
1330 orr(rd
, rn
, rm
, ShiftTypeAndAmount());
1333 ALWAYS_INLINE
void orr_S(RegisterID rd
, RegisterID rn
, RegisterID rm
, ShiftTypeAndAmount shift
)
1335 ASSERT(!BadReg(rd
));
1336 ASSERT(!BadReg(rn
));
1337 ASSERT(!BadReg(rm
));
1338 m_formatter
.twoWordOp12Reg4FourFours(OP_ORR_S_reg_T2
, rn
, FourFours(shift
.hi4(), rd
, shift
.lo4(), rm
));
1341 void orr_S(RegisterID rd
, RegisterID rn
, RegisterID rm
)
1343 if ((rd
== rn
) && !((rd
| rm
) & 8))
1344 m_formatter
.oneWordOp10Reg3Reg3(OP_ORR_reg_T1
, rm
, rd
);
1345 else if ((rd
== rm
) && !((rd
| rn
) & 8))
1346 m_formatter
.oneWordOp10Reg3Reg3(OP_ORR_reg_T1
, rn
, rd
);
1348 orr_S(rd
, rn
, rm
, ShiftTypeAndAmount());
1351 ALWAYS_INLINE
void ror(RegisterID rd
, RegisterID rm
, int32_t shiftAmount
)
1353 ASSERT(!BadReg(rd
));
1354 ASSERT(!BadReg(rm
));
1355 ShiftTypeAndAmount
shift(SRType_ROR
, shiftAmount
);
1356 m_formatter
.twoWordOp16FourFours(OP_ROR_imm_T1
, FourFours(shift
.hi4(), rd
, shift
.lo4(), rm
));
1359 ALWAYS_INLINE
void ror(RegisterID rd
, RegisterID rn
, RegisterID rm
)
1361 ASSERT(!BadReg(rd
));
1362 ASSERT(!BadReg(rn
));
1363 ASSERT(!BadReg(rm
));
1364 m_formatter
.twoWordOp12Reg4FourFours(OP_ROR_reg_T2
, rn
, FourFours(0xf, rd
, 0, rm
));
1367 ALWAYS_INLINE
void smull(RegisterID rdLo
, RegisterID rdHi
, RegisterID rn
, RegisterID rm
)
1369 ASSERT(!BadReg(rdLo
));
1370 ASSERT(!BadReg(rdHi
));
1371 ASSERT(!BadReg(rn
));
1372 ASSERT(!BadReg(rm
));
1373 ASSERT(rdLo
!= rdHi
);
1374 m_formatter
.twoWordOp12Reg4FourFours(OP_SMULL_T1
, rn
, FourFours(rdLo
, rdHi
, 0, rm
));
1377 // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
1378 ALWAYS_INLINE
void str(RegisterID rt
, RegisterID rn
, ARMThumbImmediate imm
)
1380 ASSERT(rt
!= ARMRegisters::pc
);
1381 ASSERT(rn
!= ARMRegisters::pc
);
1382 ASSERT(imm
.isUInt12());
1384 if (!((rt
| rn
) & 8) && imm
.isUInt7())
1385 m_formatter
.oneWordOp5Imm5Reg3Reg3(OP_STR_imm_T1
, imm
.getUInt7() >> 2, rn
, rt
);
1386 else if ((rn
== ARMRegisters::sp
) && !(rt
& 8) && imm
.isUInt10())
1387 m_formatter
.oneWordOp5Reg3Imm8(OP_STR_imm_T2
, rt
, static_cast<uint8_t>(imm
.getUInt10() >> 2));
1389 m_formatter
.twoWordOp12Reg4Reg4Imm12(OP_STR_imm_T3
, rn
, rt
, imm
.getUInt12());
1392 // If index is set, this is a regular offset or a pre-indexed store;
1393 // if index is not set then is is a post-index store.
1395 // If wback is set rn is updated - this is a pre or post index store,
1396 // if wback is not set this is a regular offset memory access.
1398 // (-255 <= offset <= 255)
1400 // _tmp = _reg + offset
1401 // MEM[index ? _tmp : _reg] = REG[rt]
1402 // if (wback) REG[rn] = _tmp
1403 ALWAYS_INLINE
void str(RegisterID rt
, RegisterID rn
, int offset
, bool index
, bool wback
)
1405 ASSERT(rt
!= ARMRegisters::pc
);
1406 ASSERT(rn
!= ARMRegisters::pc
);
1407 ASSERT(index
|| wback
);
1408 ASSERT(!wback
| (rt
!= rn
));
1415 ASSERT((offset
& ~0xff) == 0);
1417 offset
|= (wback
<< 8);
1418 offset
|= (add
<< 9);
1419 offset
|= (index
<< 10);
1420 offset
|= (1 << 11);
1422 m_formatter
.twoWordOp12Reg4Reg4Imm12(OP_STR_imm_T4
, rn
, rt
, offset
);
1425 // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
1426 ALWAYS_INLINE
void str(RegisterID rt
, RegisterID rn
, RegisterID rm
, unsigned shift
= 0)
1428 ASSERT(rn
!= ARMRegisters::pc
);
1429 ASSERT(!BadReg(rm
));
1432 if (!shift
&& !((rt
| rn
| rm
) & 8))
1433 m_formatter
.oneWordOp7Reg3Reg3Reg3(OP_STR_reg_T1
, rm
, rn
, rt
);
1435 m_formatter
.twoWordOp12Reg4FourFours(OP_STR_reg_T2
, rn
, FourFours(rt
, 0, shift
, rm
));
1438 // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
1439 ALWAYS_INLINE
void strb(RegisterID rt
, RegisterID rn
, ARMThumbImmediate imm
)
1441 ASSERT(rt
!= ARMRegisters::pc
);
1442 ASSERT(rn
!= ARMRegisters::pc
);
1443 ASSERT(imm
.isUInt12());
1445 if (!((rt
| rn
) & 8) && imm
.isUInt7())
1446 m_formatter
.oneWordOp5Imm5Reg3Reg3(OP_STRB_imm_T1
, imm
.getUInt7() >> 2, rn
, rt
);
1448 m_formatter
.twoWordOp12Reg4Reg4Imm12(OP_STRB_imm_T2
, rn
, rt
, imm
.getUInt12());
1451 // If index is set, this is a regular offset or a pre-indexed store;
1452 // if index is not set then is is a post-index store.
1454 // If wback is set rn is updated - this is a pre or post index store,
1455 // if wback is not set this is a regular offset memory access.
1457 // (-255 <= offset <= 255)
1459 // _tmp = _reg + offset
1460 // MEM[index ? _tmp : _reg] = REG[rt]
1461 // if (wback) REG[rn] = _tmp
1462 ALWAYS_INLINE
void strb(RegisterID rt
, RegisterID rn
, int offset
, bool index
, bool wback
)
1464 ASSERT(rt
!= ARMRegisters::pc
);
1465 ASSERT(rn
!= ARMRegisters::pc
);
1466 ASSERT(index
|| wback
);
1467 ASSERT(!wback
| (rt
!= rn
));
1474 ASSERT((offset
& ~0xff) == 0);
1476 offset
|= (wback
<< 8);
1477 offset
|= (add
<< 9);
1478 offset
|= (index
<< 10);
1479 offset
|= (1 << 11);
1481 m_formatter
.twoWordOp12Reg4Reg4Imm12(OP_STRB_imm_T3
, rn
, rt
, offset
);
1484 // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
1485 ALWAYS_INLINE
void strb(RegisterID rt
, RegisterID rn
, RegisterID rm
, unsigned shift
= 0)
1487 ASSERT(rn
!= ARMRegisters::pc
);
1488 ASSERT(!BadReg(rm
));
1491 if (!shift
&& !((rt
| rn
| rm
) & 8))
1492 m_formatter
.oneWordOp7Reg3Reg3Reg3(OP_STRB_reg_T1
, rm
, rn
, rt
);
1494 m_formatter
.twoWordOp12Reg4FourFours(OP_STRB_reg_T2
, rn
, FourFours(rt
, 0, shift
, rm
));
1497 // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
1498 ALWAYS_INLINE
void strh(RegisterID rt
, RegisterID rn
, ARMThumbImmediate imm
)
1500 ASSERT(rt
!= ARMRegisters::pc
);
1501 ASSERT(rn
!= ARMRegisters::pc
);
1502 ASSERT(imm
.isUInt12());
1504 if (!((rt
| rn
) & 8) && imm
.isUInt7())
1505 m_formatter
.oneWordOp5Imm5Reg3Reg3(OP_STRH_imm_T1
, imm
.getUInt7() >> 2, rn
, rt
);
1507 m_formatter
.twoWordOp12Reg4Reg4Imm12(OP_STRH_imm_T2
, rn
, rt
, imm
.getUInt12());
1510 // If index is set, this is a regular offset or a pre-indexed store;
1511 // if index is not set then is is a post-index store.
1513 // If wback is set rn is updated - this is a pre or post index store,
1514 // if wback is not set this is a regular offset memory access.
1516 // (-255 <= offset <= 255)
1518 // _tmp = _reg + offset
1519 // MEM[index ? _tmp : _reg] = REG[rt]
1520 // if (wback) REG[rn] = _tmp
1521 ALWAYS_INLINE
void strh(RegisterID rt
, RegisterID rn
, int offset
, bool index
, bool wback
)
1523 ASSERT(rt
!= ARMRegisters::pc
);
1524 ASSERT(rn
!= ARMRegisters::pc
);
1525 ASSERT(index
|| wback
);
1526 ASSERT(!wback
| (rt
!= rn
));
1533 ASSERT(!(offset
& ~0xff));
1535 offset
|= (wback
<< 8);
1536 offset
|= (add
<< 9);
1537 offset
|= (index
<< 10);
1538 offset
|= (1 << 11);
1540 m_formatter
.twoWordOp12Reg4Reg4Imm12(OP_STRH_imm_T3
, rn
, rt
, offset
);
1543 // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
1544 ALWAYS_INLINE
void strh(RegisterID rt
, RegisterID rn
, RegisterID rm
, unsigned shift
= 0)
1546 ASSERT(rn
!= ARMRegisters::pc
);
1547 ASSERT(!BadReg(rm
));
1550 if (!shift
&& !((rt
| rn
| rm
) & 8))
1551 m_formatter
.oneWordOp7Reg3Reg3Reg3(OP_STRH_reg_T1
, rm
, rn
, rt
);
1553 m_formatter
.twoWordOp12Reg4FourFours(OP_STRH_reg_T2
, rn
, FourFours(rt
, 0, shift
, rm
));
1556 ALWAYS_INLINE
void sub(RegisterID rd
, RegisterID rn
, ARMThumbImmediate imm
)
1558 // Rd can only be SP if Rn is also SP.
1559 ASSERT((rd
!= ARMRegisters::sp
) || (rn
== ARMRegisters::sp
));
1560 ASSERT(rd
!= ARMRegisters::pc
);
1561 ASSERT(rn
!= ARMRegisters::pc
);
1562 ASSERT(imm
.isValid());
1564 if ((rn
== ARMRegisters::sp
) && (rd
== ARMRegisters::sp
) && imm
.isUInt9()) {
1565 ASSERT(!(imm
.getUInt16() & 3));
1566 m_formatter
.oneWordOp9Imm7(OP_SUB_SP_imm_T1
, static_cast<uint8_t>(imm
.getUInt9() >> 2));
1568 } else if (!((rd
| rn
) & 8)) {
1569 if (imm
.isUInt3()) {
1570 m_formatter
.oneWordOp7Reg3Reg3Reg3(OP_SUB_imm_T1
, (RegisterID
)imm
.getUInt3(), rn
, rd
);
1572 } else if ((rd
== rn
) && imm
.isUInt8()) {
1573 m_formatter
.oneWordOp5Reg3Imm8(OP_SUB_imm_T2
, rd
, imm
.getUInt8());
1578 if (imm
.isEncodedImm())
1579 m_formatter
.twoWordOp5i6Imm4Reg4EncodedImm(OP_SUB_imm_T3
, rn
, rd
, imm
);
1581 ASSERT(imm
.isUInt12());
1582 m_formatter
.twoWordOp5i6Imm4Reg4EncodedImm(OP_SUB_imm_T4
, rn
, rd
, imm
);
1586 ALWAYS_INLINE
void sub(RegisterID rd
, ARMThumbImmediate imm
, RegisterID rn
)
1588 ASSERT(rd
!= ARMRegisters::pc
);
1589 ASSERT(rn
!= ARMRegisters::pc
);
1590 ASSERT(imm
.isValid());
1591 ASSERT(imm
.isUInt12());
1593 if (!((rd
| rn
) & 8) && !imm
.getUInt12())
1594 m_formatter
.oneWordOp10Reg3Reg3(OP_RSB_imm_T1
, rn
, rd
);
1596 m_formatter
.twoWordOp5i6Imm4Reg4EncodedImm(OP_RSB_imm_T2
, rn
, rd
, imm
);
1599 ALWAYS_INLINE
void sub(RegisterID rd
, RegisterID rn
, RegisterID rm
, ShiftTypeAndAmount shift
)
1601 ASSERT((rd
!= ARMRegisters::sp
) || (rn
== ARMRegisters::sp
));
1602 ASSERT(rd
!= ARMRegisters::pc
);
1603 ASSERT(rn
!= ARMRegisters::pc
);
1604 ASSERT(!BadReg(rm
));
1605 m_formatter
.twoWordOp12Reg4FourFours(OP_SUB_reg_T2
, rn
, FourFours(shift
.hi4(), rd
, shift
.lo4(), rm
));
1608 // NOTE: In an IT block, add doesn't modify the flags register.
1609 ALWAYS_INLINE
void sub(RegisterID rd
, RegisterID rn
, RegisterID rm
)
1611 if (!((rd
| rn
| rm
) & 8))
1612 m_formatter
.oneWordOp7Reg3Reg3Reg3(OP_SUB_reg_T1
, rm
, rn
, rd
);
1614 sub(rd
, rn
, rm
, ShiftTypeAndAmount());
1617 // Not allowed in an IT (if then) block.
1618 void sub_S(RegisterID rd
, RegisterID rn
, ARMThumbImmediate imm
)
1620 // Rd can only be SP if Rn is also SP.
1621 ASSERT((rd
!= ARMRegisters::sp
) || (rn
== ARMRegisters::sp
));
1622 ASSERT(rd
!= ARMRegisters::pc
);
1623 ASSERT(rn
!= ARMRegisters::pc
);
1624 ASSERT(imm
.isValid());
1626 if ((rn
== ARMRegisters::sp
) && (rd
== ARMRegisters::sp
) && imm
.isUInt9()) {
1627 ASSERT(!(imm
.getUInt16() & 3));
1628 m_formatter
.oneWordOp9Imm7(OP_SUB_SP_imm_T1
, static_cast<uint8_t>(imm
.getUInt9() >> 2));
1630 } else if (!((rd
| rn
) & 8)) {
1631 if (imm
.isUInt3()) {
1632 m_formatter
.oneWordOp7Reg3Reg3Reg3(OP_SUB_imm_T1
, (RegisterID
)imm
.getUInt3(), rn
, rd
);
1634 } else if ((rd
== rn
) && imm
.isUInt8()) {
1635 m_formatter
.oneWordOp5Reg3Imm8(OP_SUB_imm_T2
, rd
, imm
.getUInt8());
1640 m_formatter
.twoWordOp5i6Imm4Reg4EncodedImm(OP_SUB_S_imm_T3
, rn
, rd
, imm
);
1643 ALWAYS_INLINE
void sub_S(RegisterID rd
, ARMThumbImmediate imm
, RegisterID rn
)
1645 ASSERT(rd
!= ARMRegisters::pc
);
1646 ASSERT(rn
!= ARMRegisters::pc
);
1647 ASSERT(imm
.isValid());
1648 ASSERT(imm
.isUInt12());
1650 m_formatter
.twoWordOp5i6Imm4Reg4EncodedImm(OP_RSB_S_imm_T2
, rn
, rd
, imm
);
1653 // Not allowed in an IT (if then) block?
1654 ALWAYS_INLINE
void sub_S(RegisterID rd
, RegisterID rn
, RegisterID rm
, ShiftTypeAndAmount shift
)
1656 ASSERT((rd
!= ARMRegisters::sp
) || (rn
== ARMRegisters::sp
));
1657 ASSERT(rd
!= ARMRegisters::pc
);
1658 ASSERT(rn
!= ARMRegisters::pc
);
1659 ASSERT(!BadReg(rm
));
1660 m_formatter
.twoWordOp12Reg4FourFours(OP_SUB_S_reg_T2
, rn
, FourFours(shift
.hi4(), rd
, shift
.lo4(), rm
));
1663 // Not allowed in an IT (if then) block.
1664 ALWAYS_INLINE
void sub_S(RegisterID rd
, RegisterID rn
, RegisterID rm
)
1666 if (!((rd
| rn
| rm
) & 8))
1667 m_formatter
.oneWordOp7Reg3Reg3Reg3(OP_SUB_reg_T1
, rm
, rn
, rd
);
1669 sub_S(rd
, rn
, rm
, ShiftTypeAndAmount());
1672 ALWAYS_INLINE
void tst(RegisterID rn
, ARMThumbImmediate imm
)
1674 ASSERT(!BadReg(rn
));
1675 ASSERT(imm
.isEncodedImm());
1677 m_formatter
.twoWordOp5i6Imm4Reg4EncodedImm(OP_TST_imm
, rn
, (RegisterID
)0xf, imm
);
1680 ALWAYS_INLINE
void tst(RegisterID rn
, RegisterID rm
, ShiftTypeAndAmount shift
)
1682 ASSERT(!BadReg(rn
));
1683 ASSERT(!BadReg(rm
));
1684 m_formatter
.twoWordOp12Reg4FourFours(OP_TST_reg_T2
, rn
, FourFours(shift
.hi4(), 0xf, shift
.lo4(), rm
));
1687 ALWAYS_INLINE
void tst(RegisterID rn
, RegisterID rm
)
1690 tst(rn
, rm
, ShiftTypeAndAmount());
1692 m_formatter
.oneWordOp10Reg3Reg3(OP_TST_reg_T1
, rm
, rn
);
1695 ALWAYS_INLINE
void ubfx(RegisterID rd
, RegisterID rn
, unsigned lsb
, unsigned width
)
1698 ASSERT((width
>= 1) && (width
<= 32));
1699 ASSERT((lsb
+ width
) <= 32);
1700 m_formatter
.twoWordOp12Reg40Imm3Reg4Imm20Imm5(OP_UBFX_T1
, rd
, rn
, (lsb
& 0x1c) << 10, (lsb
& 0x3) << 6, (width
- 1) & 0x1f);
1703 void vadd(FPDoubleRegisterID rd
, FPDoubleRegisterID rn
, FPDoubleRegisterID rm
)
1705 m_formatter
.vfpOp(OP_VADD_T2
, OP_VADD_T2b
, true, rn
, rd
, rm
);
1708 void vcmp(FPDoubleRegisterID rd
, FPDoubleRegisterID rm
)
1710 m_formatter
.vfpOp(OP_VCMP
, OP_VCMPb
, true, VFPOperand(4), rd
, rm
);
1713 void vcmpz(FPDoubleRegisterID rd
)
1715 m_formatter
.vfpOp(OP_VCMP
, OP_VCMPb
, true, VFPOperand(5), rd
, VFPOperand(0));
1718 void vcvt_signedToFloatingPoint(FPDoubleRegisterID rd
, FPSingleRegisterID rm
)
1720 // boolean values are 64bit (toInt, unsigned, roundZero)
1721 m_formatter
.vfpOp(OP_VCVT_FPIVFP
, OP_VCVT_FPIVFPb
, true, vcvtOp(false, false, false), rd
, rm
);
1724 void vcvt_floatingPointToSigned(FPSingleRegisterID rd
, FPDoubleRegisterID rm
)
1726 // boolean values are 64bit (toInt, unsigned, roundZero)
1727 m_formatter
.vfpOp(OP_VCVT_FPIVFP
, OP_VCVT_FPIVFPb
, true, vcvtOp(true, false, true), rd
, rm
);
1730 void vcvt_floatingPointToUnsigned(FPSingleRegisterID rd
, FPDoubleRegisterID rm
)
1732 // boolean values are 64bit (toInt, unsigned, roundZero)
1733 m_formatter
.vfpOp(OP_VCVT_FPIVFP
, OP_VCVT_FPIVFPb
, true, vcvtOp(true, true, true), rd
, rm
);
1736 void vdiv(FPDoubleRegisterID rd
, FPDoubleRegisterID rn
, FPDoubleRegisterID rm
)
1738 m_formatter
.vfpOp(OP_VDIV
, OP_VDIVb
, true, rn
, rd
, rm
);
1741 void vldr(FPDoubleRegisterID rd
, RegisterID rn
, int32_t imm
)
1743 m_formatter
.vfpMemOp(OP_VLDR
, OP_VLDRb
, true, rn
, rd
, imm
);
1746 void flds(FPSingleRegisterID rd
, RegisterID rn
, int32_t imm
)
1748 m_formatter
.vfpMemOp(OP_FLDS
, OP_FLDSb
, false, rn
, rd
, imm
);
1751 void vmov(RegisterID rd
, FPSingleRegisterID rn
)
1753 ASSERT(!BadReg(rd
));
1754 m_formatter
.vfpOp(OP_VMOV_StoC
, OP_VMOV_StoCb
, false, rn
, rd
, VFPOperand(0));
1757 void vmov(FPSingleRegisterID rd
, RegisterID rn
)
1759 ASSERT(!BadReg(rn
));
1760 m_formatter
.vfpOp(OP_VMOV_CtoS
, OP_VMOV_CtoSb
, false, rd
, rn
, VFPOperand(0));
1763 void vmov(RegisterID rd1
, RegisterID rd2
, FPDoubleRegisterID rn
)
1765 ASSERT(!BadReg(rd1
));
1766 ASSERT(!BadReg(rd2
));
1767 m_formatter
.vfpOp(OP_VMOV_DtoC
, OP_VMOV_DtoCb
, true, rd2
, VFPOperand(rd1
| 16), rn
);
1770 void vmov(FPDoubleRegisterID rd
, RegisterID rn1
, RegisterID rn2
)
1772 ASSERT(!BadReg(rn1
));
1773 ASSERT(!BadReg(rn2
));
1774 m_formatter
.vfpOp(OP_VMOV_CtoD
, OP_VMOV_CtoDb
, true, rn2
, VFPOperand(rn1
| 16), rd
);
1777 void vmov(FPDoubleRegisterID rd
, FPDoubleRegisterID rn
)
1779 m_formatter
.vfpOp(OP_VMOV_T2
, OP_VMOV_T2b
, true, VFPOperand(0), rd
, rn
);
1782 void vmrs(RegisterID reg
= ARMRegisters::pc
)
1784 ASSERT(reg
!= ARMRegisters::sp
);
1785 m_formatter
.vfpOp(OP_VMRS
, OP_VMRSb
, false, VFPOperand(1), VFPOperand(0x10 | reg
), VFPOperand(0));
1788 void vmul(FPDoubleRegisterID rd
, FPDoubleRegisterID rn
, FPDoubleRegisterID rm
)
1790 m_formatter
.vfpOp(OP_VMUL_T2
, OP_VMUL_T2b
, true, rn
, rd
, rm
);
1793 void vstr(FPDoubleRegisterID rd
, RegisterID rn
, int32_t imm
)
1795 m_formatter
.vfpMemOp(OP_VSTR
, OP_VSTRb
, true, rn
, rd
, imm
);
1798 void fsts(FPSingleRegisterID rd
, RegisterID rn
, int32_t imm
)
1800 m_formatter
.vfpMemOp(OP_FSTS
, OP_FSTSb
, false, rn
, rd
, imm
);
1803 void vsub(FPDoubleRegisterID rd
, FPDoubleRegisterID rn
, FPDoubleRegisterID rm
)
1805 m_formatter
.vfpOp(OP_VSUB_T2
, OP_VSUB_T2b
, true, rn
, rd
, rm
);
1808 void vabs(FPDoubleRegisterID rd
, FPDoubleRegisterID rm
)
1810 m_formatter
.vfpOp(OP_VABS_T2
, OP_VABS_T2b
, true, VFPOperand(16), rd
, rm
);
1813 void vneg(FPDoubleRegisterID rd
, FPDoubleRegisterID rm
)
1815 m_formatter
.vfpOp(OP_VNEG_T2
, OP_VNEG_T2b
, true, VFPOperand(1), rd
, rm
);
1818 void vsqrt(FPDoubleRegisterID rd
, FPDoubleRegisterID rm
)
1820 m_formatter
.vfpOp(OP_VSQRT_T1
, OP_VSQRT_T1b
, true, VFPOperand(17), rd
, rm
);
1823 void vcvtds(FPDoubleRegisterID rd
, FPSingleRegisterID rm
)
1825 m_formatter
.vfpOp(OP_VCVTDS_T1
, OP_VCVTDS_T1b
, false, VFPOperand(23), rd
, rm
);
1828 void vcvtsd(FPSingleRegisterID rd
, FPDoubleRegisterID rm
)
1830 m_formatter
.vfpOp(OP_VCVTSD_T1
, OP_VCVTSD_T1b
, true, VFPOperand(23), rd
, rm
);
1835 m_formatter
.oneWordOp8Imm8(OP_NOP_T1
, 0);
1838 AssemblerLabel
label()
1840 return m_formatter
.label();
1843 AssemblerLabel
align(int alignment
)
1845 while (!m_formatter
.isAligned(alignment
))
1851 static void* getRelocatedAddress(void* code
, AssemblerLabel label
)
1853 ASSERT(label
.isSet());
1854 return reinterpret_cast<void*>(reinterpret_cast<ptrdiff_t>(code
) + label
.m_offset
);
1857 static int getDifferenceBetweenLabels(AssemblerLabel a
, AssemblerLabel b
)
1859 return b
.m_offset
- a
.m_offset
;
1862 int executableOffsetFor(int location
)
1866 return static_cast<int32_t*>(m_formatter
.data())[location
/ sizeof(int32_t) - 1];
1869 int jumpSizeDelta(JumpType jumpType
, JumpLinkType jumpLinkType
) { return JUMP_ENUM_SIZE(jumpType
) - JUMP_ENUM_SIZE(jumpLinkType
); }
1871 // Assembler admin methods:
1873 static ALWAYS_INLINE
bool linkRecordSourceComparator(const LinkRecord
& a
, const LinkRecord
& b
)
1875 return a
.from() < b
.from();
1878 bool canCompact(JumpType jumpType
)
1880 // The following cannot be compacted:
1881 // JumpFixed: represents custom jump sequence
1882 // JumpNoConditionFixedSize: represents unconditional jump that must remain a fixed size
1883 // JumpConditionFixedSize: represents conditional jump that must remain a fixed size
1884 return (jumpType
== JumpNoCondition
) || (jumpType
== JumpCondition
);
1887 JumpLinkType
computeJumpType(JumpType jumpType
, const uint8_t* from
, const uint8_t* to
)
1889 if (jumpType
== JumpFixed
)
1892 // for patchable jump we must leave space for the longest code sequence
1893 if (jumpType
== JumpNoConditionFixedSize
)
1895 if (jumpType
== JumpConditionFixedSize
)
1896 return LinkConditionalBX
;
1898 const int paddingSize
= JUMP_ENUM_SIZE(jumpType
);
1899 bool mayTriggerErrata
= false;
1901 if (jumpType
== JumpCondition
) {
1902 // 2-byte conditional T1
1903 const uint16_t* jumpT1Location
= reinterpret_cast_ptr
<const uint16_t*>(from
- (paddingSize
- JUMP_ENUM_SIZE(LinkJumpT1
)));
1904 if (canBeJumpT1(jumpT1Location
, to
))
1906 // 4-byte conditional T3
1907 const uint16_t* jumpT3Location
= reinterpret_cast_ptr
<const uint16_t*>(from
- (paddingSize
- JUMP_ENUM_SIZE(LinkJumpT3
)));
1908 if (canBeJumpT3(jumpT3Location
, to
, mayTriggerErrata
)) {
1909 if (!mayTriggerErrata
)
1912 // 4-byte conditional T4 with IT
1913 const uint16_t* conditionalJumpT4Location
=
1914 reinterpret_cast_ptr
<const uint16_t*>(from
- (paddingSize
- JUMP_ENUM_SIZE(LinkConditionalJumpT4
)));
1915 if (canBeJumpT4(conditionalJumpT4Location
, to
, mayTriggerErrata
)) {
1916 if (!mayTriggerErrata
)
1917 return LinkConditionalJumpT4
;
1920 // 2-byte unconditional T2
1921 const uint16_t* jumpT2Location
= reinterpret_cast_ptr
<const uint16_t*>(from
- (paddingSize
- JUMP_ENUM_SIZE(LinkJumpT2
)));
1922 if (canBeJumpT2(jumpT2Location
, to
))
1924 // 4-byte unconditional T4
1925 const uint16_t* jumpT4Location
= reinterpret_cast_ptr
<const uint16_t*>(from
- (paddingSize
- JUMP_ENUM_SIZE(LinkJumpT4
)));
1926 if (canBeJumpT4(jumpT4Location
, to
, mayTriggerErrata
)) {
1927 if (!mayTriggerErrata
)
1930 // use long jump sequence
1934 ASSERT(jumpType
== JumpCondition
);
1935 return LinkConditionalBX
;
1938 JumpLinkType
computeJumpType(LinkRecord
& record
, const uint8_t* from
, const uint8_t* to
)
1940 JumpLinkType linkType
= computeJumpType(record
.type(), from
, to
);
1941 record
.setLinkType(linkType
);
1945 void recordLinkOffsets(int32_t regionStart
, int32_t regionEnd
, int32_t offset
)
1947 int32_t ptr
= regionStart
/ sizeof(int32_t);
1948 const int32_t end
= regionEnd
/ sizeof(int32_t);
1949 int32_t* offsets
= static_cast<int32_t*>(m_formatter
.data());
1951 offsets
[ptr
++] = offset
;
1954 Vector
<LinkRecord
>& jumpsToLink()
1956 std::sort(m_jumpsToLink
.begin(), m_jumpsToLink
.end(), linkRecordSourceComparator
);
1957 return m_jumpsToLink
;
1960 void ALWAYS_INLINE
link(LinkRecord
& record
, uint8_t* from
, uint8_t* to
)
1962 switch (record
.linkType()) {
1964 linkJumpT1(record
.condition(), reinterpret_cast_ptr
<uint16_t*>(from
), to
);
1967 linkJumpT2(reinterpret_cast_ptr
<uint16_t*>(from
), to
);
1970 linkJumpT3(record
.condition(), reinterpret_cast_ptr
<uint16_t*>(from
), to
);
1973 linkJumpT4(reinterpret_cast_ptr
<uint16_t*>(from
), to
);
1975 case LinkConditionalJumpT4
:
1976 linkConditionalJumpT4(record
.condition(), reinterpret_cast_ptr
<uint16_t*>(from
), to
);
1978 case LinkConditionalBX
:
1979 linkConditionalBX(record
.condition(), reinterpret_cast_ptr
<uint16_t*>(from
), to
);
1982 linkBX(reinterpret_cast_ptr
<uint16_t*>(from
), to
);
1985 ASSERT_NOT_REACHED();
1990 void* unlinkedCode() { return m_formatter
.data(); }
1991 size_t codeSize() const { return m_formatter
.codeSize(); }
1993 static unsigned getCallReturnOffset(AssemblerLabel call
)
1995 ASSERT(call
.isSet());
1996 return call
.m_offset
;
1999 // Linking & patching:
2001 // 'link' and 'patch' methods are for use on unprotected code - such as the code
2002 // within the AssemblerBuffer, and code being patched by the patch buffer. Once
2003 // code has been finalized it is (platform support permitting) within a non-
2004 // writable region of memory; to modify the code in an execute-only execuable
2005 // pool the 'repatch' and 'relink' methods should be used.
2007 void linkJump(AssemblerLabel from
, AssemblerLabel to
, JumpType type
, Condition condition
)
2010 ASSERT(from
.isSet());
2011 m_jumpsToLink
.append(LinkRecord(from
.m_offset
, to
.m_offset
, type
, condition
));
2014 static void linkJump(void* code
, AssemblerLabel from
, void* to
)
2016 ASSERT(from
.isSet());
2018 uint16_t* location
= reinterpret_cast<uint16_t*>(reinterpret_cast<intptr_t>(code
) + from
.m_offset
);
2019 linkJumpAbsolute(location
, to
);
2022 static void linkCall(void* code
, AssemblerLabel from
, void* to
)
2024 ASSERT(!(reinterpret_cast<intptr_t>(code
) & 1));
2025 ASSERT(from
.isSet());
2026 ASSERT(reinterpret_cast<intptr_t>(to
) & 1);
2028 setPointer(reinterpret_cast<uint16_t*>(reinterpret_cast<intptr_t>(code
) + from
.m_offset
) - 1, to
);
2031 static void linkPointer(void* code
, AssemblerLabel where
, void* value
)
2033 setPointer(reinterpret_cast<char*>(code
) + where
.m_offset
, value
);
2036 static void relinkJump(void* from
, void* to
)
2038 ASSERT(!(reinterpret_cast<intptr_t>(from
) & 1));
2039 ASSERT(!(reinterpret_cast<intptr_t>(to
) & 1));
2041 linkJumpAbsolute(reinterpret_cast<uint16_t*>(from
), to
);
2043 cacheFlush(reinterpret_cast<uint16_t*>(from
) - 5, 5 * sizeof(uint16_t));
2046 static void relinkCall(void* from
, void* to
)
2048 ASSERT(!(reinterpret_cast<intptr_t>(from
) & 1));
2049 ASSERT(reinterpret_cast<intptr_t>(to
) & 1);
2051 setPointer(reinterpret_cast<uint16_t*>(from
) - 1, to
);
2054 static void* readCallTarget(void* from
)
2056 return readPointer(reinterpret_cast<uint16_t*>(from
) - 1);
2059 static void repatchInt32(void* where
, int32_t value
)
2061 ASSERT(!(reinterpret_cast<intptr_t>(where
) & 1));
2063 setInt32(where
, value
);
2066 static void repatchCompact(void* where
, int32_t value
)
2069 ASSERT(ARMThumbImmediate::makeUInt12(value
).isUInt7());
2070 setUInt7ForLoad(where
, ARMThumbImmediate::makeUInt12(value
));
2073 static void repatchPointer(void* where
, void* value
)
2075 ASSERT(!(reinterpret_cast<intptr_t>(where
) & 1));
2077 setPointer(where
, value
);
2080 static void* readPointer(void* where
)
2082 return reinterpret_cast<void*>(readInt32(where
));
2085 unsigned debugOffset() { return m_formatter
.debugOffset(); }
2087 static void cacheFlush(void* code
, size_t size
)
2090 sys_cache_control(kCacheFunctionPrepareForExecution
, code
, size
);
2102 : "r" (code
), "r" (reinterpret_cast<char*>(code
) + size
)
2103 : "r0", "r1", "r2");
2105 CacheRangeFlush(code
, size
, CACHE_SYNC_ALL
);
2107 #if !ENABLE(ASSEMBLER_WX_EXCLUSIVE)
2108 msync(code
, size
, MS_INVALIDATE_ICACHE
);
2114 #error "The cacheFlush support is missing on this platform."
2119 // VFP operations commonly take one or more 5-bit operands, typically representing a
2120 // floating point register number. This will commonly be encoded in the instruction
2121 // in two parts, with one single bit field, and one 4-bit field. In the case of
2122 // double precision operands the high bit of the register number will be encoded
2123 // separately, and for single precision operands the high bit of the register number
2124 // will be encoded individually.
2125 // VFPOperand encapsulates a 5-bit VFP operand, with bits 0..3 containing the 4-bit
2126 // field to be encoded together in the instruction (the low 4-bits of a double
2127 // register number, or the high 4-bits of a single register number), and bit 4
2128 // contains the bit value to be encoded individually.
2130 explicit VFPOperand(uint32_t value
)
2133 ASSERT(!(m_value
& ~0x1f));
2136 VFPOperand(FPDoubleRegisterID reg
)
2141 VFPOperand(RegisterID reg
)
2146 VFPOperand(FPSingleRegisterID reg
)
2147 : m_value(((reg
& 1) << 4) | (reg
>> 1)) // rotate the lowest bit of 'reg' to the top.
2153 return m_value
>> 4;
2158 return m_value
& 0xf;
2164 VFPOperand
vcvtOp(bool toInteger
, bool isUnsigned
, bool isRoundZero
)
2166 // Cannot specify rounding when converting to float.
2167 ASSERT(toInteger
|| !isRoundZero
);
2171 // opc2 indicates both toInteger & isUnsigned.
2172 op
|= isUnsigned
? 0x4 : 0x5;
2173 // 'op' field in instruction is isRoundZero
2177 ASSERT(!isRoundZero
);
2178 // 'op' field in instruction is isUnsigned
2182 return VFPOperand(op
);
2185 static void setInt32(void* code
, uint32_t value
)
2187 uint16_t* location
= reinterpret_cast<uint16_t*>(code
);
2188 ASSERT(isMOV_imm_T3(location
- 4) && isMOVT(location
- 2));
2190 ARMThumbImmediate lo16
= ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(value
));
2191 ARMThumbImmediate hi16
= ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(value
>> 16));
2192 location
[-4] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOV_imm_T3
, lo16
);
2193 location
[-3] = twoWordOp5i6Imm4Reg4EncodedImmSecond((location
[-3] >> 8) & 0xf, lo16
);
2194 location
[-2] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOVT
, hi16
);
2195 location
[-1] = twoWordOp5i6Imm4Reg4EncodedImmSecond((location
[-1] >> 8) & 0xf, hi16
);
2197 cacheFlush(location
- 4, 4 * sizeof(uint16_t));
2200 static int32_t readInt32(void* code
)
2202 uint16_t* location
= reinterpret_cast<uint16_t*>(code
);
2203 ASSERT(isMOV_imm_T3(location
- 4) && isMOVT(location
- 2));
2205 ARMThumbImmediate lo16
;
2206 ARMThumbImmediate hi16
;
2207 decodeTwoWordOp5i6Imm4Reg4EncodedImmFirst(lo16
, location
[-4]);
2208 decodeTwoWordOp5i6Imm4Reg4EncodedImmSecond(lo16
, location
[-3]);
2209 decodeTwoWordOp5i6Imm4Reg4EncodedImmFirst(hi16
, location
[-2]);
2210 decodeTwoWordOp5i6Imm4Reg4EncodedImmSecond(hi16
, location
[-1]);
2211 uint32_t result
= hi16
.asUInt16();
2213 result
|= lo16
.asUInt16();
2214 return static_cast<int32_t>(result
);
2217 static void setUInt7ForLoad(void* code
, ARMThumbImmediate imm
)
2219 // Requires us to have planted a LDR_imm_T1
2220 ASSERT(imm
.isValid());
2221 ASSERT(imm
.isUInt7());
2222 uint16_t* location
= reinterpret_cast<uint16_t*>(code
);
2223 location
[0] &= ~((static_cast<uint16_t>(0x7f) >> 2) << 6);
2224 location
[0] |= (imm
.getUInt7() >> 2) << 6;
2225 cacheFlush(location
, sizeof(uint16_t));
2228 static void setPointer(void* code
, void* value
)
2230 setInt32(code
, reinterpret_cast<uint32_t>(value
));
2233 static bool isB(void* address
)
2235 uint16_t* instruction
= static_cast<uint16_t*>(address
);
2236 return ((instruction
[0] & 0xf800) == OP_B_T4a
) && ((instruction
[1] & 0xd000) == OP_B_T4b
);
2239 static bool isBX(void* address
)
2241 uint16_t* instruction
= static_cast<uint16_t*>(address
);
2242 return (instruction
[0] & 0xff87) == OP_BX
;
2245 static bool isMOV_imm_T3(void* address
)
2247 uint16_t* instruction
= static_cast<uint16_t*>(address
);
2248 return ((instruction
[0] & 0xFBF0) == OP_MOV_imm_T3
) && ((instruction
[1] & 0x8000) == 0);
2251 static bool isMOVT(void* address
)
2253 uint16_t* instruction
= static_cast<uint16_t*>(address
);
2254 return ((instruction
[0] & 0xFBF0) == OP_MOVT
) && ((instruction
[1] & 0x8000) == 0);
2257 static bool isNOP_T1(void* address
)
2259 uint16_t* instruction
= static_cast<uint16_t*>(address
);
2260 return instruction
[0] == OP_NOP_T1
;
2263 static bool isNOP_T2(void* address
)
2265 uint16_t* instruction
= static_cast<uint16_t*>(address
);
2266 return (instruction
[0] == OP_NOP_T2a
) && (instruction
[1] == OP_NOP_T2b
);
2269 static bool canBeJumpT1(const uint16_t* instruction
, const void* target
)
2271 ASSERT(!(reinterpret_cast<intptr_t>(instruction
) & 1));
2272 ASSERT(!(reinterpret_cast<intptr_t>(target
) & 1));
2274 intptr_t relative
= reinterpret_cast<intptr_t>(target
) - (reinterpret_cast<intptr_t>(instruction
));
2275 // It does not appear to be documented in the ARM ARM (big surprise), but
2276 // for OP_B_T1 the branch displacement encoded in the instruction is 2
2277 // less than the actual displacement.
2279 return ((relative
<< 23) >> 23) == relative
;
2282 static bool canBeJumpT2(const uint16_t* instruction
, const void* target
)
2284 ASSERT(!(reinterpret_cast<intptr_t>(instruction
) & 1));
2285 ASSERT(!(reinterpret_cast<intptr_t>(target
) & 1));
2287 intptr_t relative
= reinterpret_cast<intptr_t>(target
) - (reinterpret_cast<intptr_t>(instruction
));
2288 // It does not appear to be documented in the ARM ARM (big surprise), but
2289 // for OP_B_T2 the branch displacement encoded in the instruction is 2
2290 // less than the actual displacement.
2292 return ((relative
<< 20) >> 20) == relative
;
2295 static bool canBeJumpT3(const uint16_t* instruction
, const void* target
, bool& mayTriggerErrata
)
2297 ASSERT(!(reinterpret_cast<intptr_t>(instruction
) & 1));
2298 ASSERT(!(reinterpret_cast<intptr_t>(target
) & 1));
2300 intptr_t relative
= reinterpret_cast<intptr_t>(target
) - (reinterpret_cast<intptr_t>(instruction
));
2301 // From Cortex-A8 errata:
2302 // If the 32-bit Thumb-2 branch instruction spans two 4KiB regions and
2303 // the target of the branch falls within the first region it is
2304 // possible for the processor to incorrectly determine the branch
2305 // instruction, and it is also possible in some cases for the processor
2306 // to enter a deadlock state.
2307 // The instruction is spanning two pages if it ends at an address ending 0x002
2308 bool spansTwo4K
= ((reinterpret_cast<intptr_t>(instruction
) & 0xfff) == 0x002);
2309 mayTriggerErrata
= spansTwo4K
;
2310 // The target is in the first page if the jump branch back by [3..0x1002] bytes
2311 bool targetInFirstPage
= (relative
>= -0x1002) && (relative
< -2);
2312 bool wouldTriggerA8Errata
= spansTwo4K
&& targetInFirstPage
;
2313 return ((relative
<< 11) >> 11) == relative
&& !wouldTriggerA8Errata
;
2316 static bool canBeJumpT4(const uint16_t* instruction
, const void* target
, bool& mayTriggerErrata
)
2318 ASSERT(!(reinterpret_cast<intptr_t>(instruction
) & 1));
2319 ASSERT(!(reinterpret_cast<intptr_t>(target
) & 1));
2321 intptr_t relative
= reinterpret_cast<intptr_t>(target
) - (reinterpret_cast<intptr_t>(instruction
));
2322 // From Cortex-A8 errata:
2323 // If the 32-bit Thumb-2 branch instruction spans two 4KiB regions and
2324 // the target of the branch falls within the first region it is
2325 // possible for the processor to incorrectly determine the branch
2326 // instruction, and it is also possible in some cases for the processor
2327 // to enter a deadlock state.
2328 // The instruction is spanning two pages if it ends at an address ending 0x002
2329 bool spansTwo4K
= ((reinterpret_cast<intptr_t>(instruction
) & 0xfff) == 0x002);
2330 mayTriggerErrata
= spansTwo4K
;
2331 // The target is in the first page if the jump branch back by [3..0x1002] bytes
2332 bool targetInFirstPage
= (relative
>= -0x1002) && (relative
< -2);
2333 bool wouldTriggerA8Errata
= spansTwo4K
&& targetInFirstPage
;
2334 return ((relative
<< 7) >> 7) == relative
&& !wouldTriggerA8Errata
;
2337 void linkJumpT1(Condition cond
, uint16_t* instruction
, void* target
)
2339 // FIMXE: this should be up in the MacroAssembler layer. :-(
2340 ASSERT(!(reinterpret_cast<intptr_t>(instruction
) & 1));
2341 ASSERT(!(reinterpret_cast<intptr_t>(target
) & 1));
2342 ASSERT(canBeJumpT1(instruction
, target
));
2344 intptr_t relative
= reinterpret_cast<intptr_t>(target
) - (reinterpret_cast<intptr_t>(instruction
));
2345 // It does not appear to be documented in the ARM ARM (big surprise), but
2346 // for OP_B_T1 the branch displacement encoded in the instruction is 2
2347 // less than the actual displacement.
2350 // All branch offsets should be an even distance.
2351 ASSERT(!(relative
& 1));
2352 instruction
[-1] = OP_B_T1
| ((cond
& 0xf) << 8) | ((relative
& 0x1fe) >> 1);
2355 static void linkJumpT2(uint16_t* instruction
, void* target
)
2357 // FIMXE: this should be up in the MacroAssembler layer. :-(
2358 ASSERT(!(reinterpret_cast<intptr_t>(instruction
) & 1));
2359 ASSERT(!(reinterpret_cast<intptr_t>(target
) & 1));
2360 ASSERT(canBeJumpT2(instruction
, target
));
2362 intptr_t relative
= reinterpret_cast<intptr_t>(target
) - (reinterpret_cast<intptr_t>(instruction
));
2363 // It does not appear to be documented in the ARM ARM (big surprise), but
2364 // for OP_B_T2 the branch displacement encoded in the instruction is 2
2365 // less than the actual displacement.
2368 // All branch offsets should be an even distance.
2369 ASSERT(!(relative
& 1));
2370 instruction
[-1] = OP_B_T2
| ((relative
& 0xffe) >> 1);
2373 void linkJumpT3(Condition cond
, uint16_t* instruction
, void* target
)
2375 // FIMXE: this should be up in the MacroAssembler layer. :-(
2376 ASSERT(!(reinterpret_cast<intptr_t>(instruction
) & 1));
2377 ASSERT(!(reinterpret_cast<intptr_t>(target
) & 1));
2379 UNUSED_PARAM(scratch
);
2380 ASSERT(canBeJumpT3(instruction
, target
, scratch
));
2382 intptr_t relative
= reinterpret_cast<intptr_t>(target
) - (reinterpret_cast<intptr_t>(instruction
));
2384 // All branch offsets should be an even distance.
2385 ASSERT(!(relative
& 1));
2386 instruction
[-2] = OP_B_T3a
| ((relative
& 0x100000) >> 10) | ((cond
& 0xf) << 6) | ((relative
& 0x3f000) >> 12);
2387 instruction
[-1] = OP_B_T3b
| ((relative
& 0x80000) >> 8) | ((relative
& 0x40000) >> 5) | ((relative
& 0xffe) >> 1);
2390 static void linkJumpT4(uint16_t* instruction
, void* target
)
2392 // FIMXE: this should be up in the MacroAssembler layer. :-(
2393 ASSERT(!(reinterpret_cast<intptr_t>(instruction
) & 1));
2394 ASSERT(!(reinterpret_cast<intptr_t>(target
) & 1));
2396 UNUSED_PARAM(scratch
);
2397 ASSERT(canBeJumpT4(instruction
, target
, scratch
));
2399 intptr_t relative
= reinterpret_cast<intptr_t>(target
) - (reinterpret_cast<intptr_t>(instruction
));
2400 // ARM encoding for the top two bits below the sign bit is 'peculiar'.
2402 relative
^= 0xC00000;
2404 // All branch offsets should be an even distance.
2405 ASSERT(!(relative
& 1));
2406 instruction
[-2] = OP_B_T4a
| ((relative
& 0x1000000) >> 14) | ((relative
& 0x3ff000) >> 12);
2407 instruction
[-1] = OP_B_T4b
| ((relative
& 0x800000) >> 10) | ((relative
& 0x400000) >> 11) | ((relative
& 0xffe) >> 1);
2410 void linkConditionalJumpT4(Condition cond
, uint16_t* instruction
, void* target
)
2412 // FIMXE: this should be up in the MacroAssembler layer. :-(
2413 ASSERT(!(reinterpret_cast<intptr_t>(instruction
) & 1));
2414 ASSERT(!(reinterpret_cast<intptr_t>(target
) & 1));
2416 instruction
[-3] = ifThenElse(cond
) | OP_IT
;
2417 linkJumpT4(instruction
, target
);
2420 static void linkBX(uint16_t* instruction
, void* target
)
2422 // FIMXE: this should be up in the MacroAssembler layer. :-(
2423 ASSERT(!(reinterpret_cast<intptr_t>(instruction
) & 1));
2424 ASSERT(!(reinterpret_cast<intptr_t>(target
) & 1));
2426 const uint16_t JUMP_TEMPORARY_REGISTER
= ARMRegisters::ip
;
2427 ARMThumbImmediate lo16
= ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(reinterpret_cast<uint32_t>(target
) + 1));
2428 ARMThumbImmediate hi16
= ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(reinterpret_cast<uint32_t>(target
) >> 16));
2429 instruction
[-5] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOV_imm_T3
, lo16
);
2430 instruction
[-4] = twoWordOp5i6Imm4Reg4EncodedImmSecond(JUMP_TEMPORARY_REGISTER
, lo16
);
2431 instruction
[-3] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOVT
, hi16
);
2432 instruction
[-2] = twoWordOp5i6Imm4Reg4EncodedImmSecond(JUMP_TEMPORARY_REGISTER
, hi16
);
2433 instruction
[-1] = OP_BX
| (JUMP_TEMPORARY_REGISTER
<< 3);
2436 void linkConditionalBX(Condition cond
, uint16_t* instruction
, void* target
)
2438 // FIMXE: this should be up in the MacroAssembler layer. :-(
2439 ASSERT(!(reinterpret_cast<intptr_t>(instruction
) & 1));
2440 ASSERT(!(reinterpret_cast<intptr_t>(target
) & 1));
2442 linkBX(instruction
, target
);
2443 instruction
[-6] = ifThenElse(cond
, true, true) | OP_IT
;
2446 static void linkJumpAbsolute(uint16_t* instruction
, void* target
)
2448 // FIMXE: this should be up in the MacroAssembler layer. :-(
2449 ASSERT(!(reinterpret_cast<intptr_t>(instruction
) & 1));
2450 ASSERT(!(reinterpret_cast<intptr_t>(target
) & 1));
2452 ASSERT((isMOV_imm_T3(instruction
- 5) && isMOVT(instruction
- 3) && isBX(instruction
- 1))
2453 || (isNOP_T1(instruction
- 5) && isNOP_T2(instruction
- 4) && isB(instruction
- 2)));
2456 if (canBeJumpT4(instruction
, target
, scratch
)) {
2457 // There may be a better way to fix this, but right now put the NOPs first, since in the
2458 // case of an conditional branch this will be coming after an ITTT predicating *three*
2459 // instructions! Looking backwards to modify the ITTT to an IT is not easy, due to
2460 // variable wdith encoding - the previous instruction might *look* like an ITTT but
2461 // actually be the second half of a 2-word op.
2462 instruction
[-5] = OP_NOP_T1
;
2463 instruction
[-4] = OP_NOP_T2a
;
2464 instruction
[-3] = OP_NOP_T2b
;
2465 linkJumpT4(instruction
, target
);
2467 const uint16_t JUMP_TEMPORARY_REGISTER
= ARMRegisters::ip
;
2468 ARMThumbImmediate lo16
= ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(reinterpret_cast<uint32_t>(target
) + 1));
2469 ARMThumbImmediate hi16
= ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(reinterpret_cast<uint32_t>(target
) >> 16));
2470 instruction
[-5] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOV_imm_T3
, lo16
);
2471 instruction
[-4] = twoWordOp5i6Imm4Reg4EncodedImmSecond(JUMP_TEMPORARY_REGISTER
, lo16
);
2472 instruction
[-3] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOVT
, hi16
);
2473 instruction
[-2] = twoWordOp5i6Imm4Reg4EncodedImmSecond(JUMP_TEMPORARY_REGISTER
, hi16
);
2474 instruction
[-1] = OP_BX
| (JUMP_TEMPORARY_REGISTER
<< 3);
2478 static uint16_t twoWordOp5i6Imm4Reg4EncodedImmFirst(uint16_t op
, ARMThumbImmediate imm
)
2480 return op
| (imm
.m_value
.i
<< 10) | imm
.m_value
.imm4
;
2483 static void decodeTwoWordOp5i6Imm4Reg4EncodedImmFirst(ARMThumbImmediate
& result
, uint16_t value
)
2485 result
.m_value
.i
= (value
>> 10) & 1;
2486 result
.m_value
.imm4
= value
& 15;
2489 static uint16_t twoWordOp5i6Imm4Reg4EncodedImmSecond(uint16_t rd
, ARMThumbImmediate imm
)
2491 return (imm
.m_value
.imm3
<< 12) | (rd
<< 8) | imm
.m_value
.imm8
;
2494 static void decodeTwoWordOp5i6Imm4Reg4EncodedImmSecond(ARMThumbImmediate
& result
, uint16_t value
)
2496 result
.m_value
.imm3
= (value
>> 12) & 7;
2497 result
.m_value
.imm8
= value
& 255;
2500 class ARMInstructionFormatter
{
2502 ALWAYS_INLINE
void oneWordOp5Reg3Imm8(OpcodeID op
, RegisterID rd
, uint8_t imm
)
2504 m_buffer
.putShort(op
| (rd
<< 8) | imm
);
2507 ALWAYS_INLINE
void oneWordOp5Imm5Reg3Reg3(OpcodeID op
, uint8_t imm
, RegisterID reg1
, RegisterID reg2
)
2509 m_buffer
.putShort(op
| (imm
<< 6) | (reg1
<< 3) | reg2
);
2512 ALWAYS_INLINE
void oneWordOp7Reg3Reg3Reg3(OpcodeID op
, RegisterID reg1
, RegisterID reg2
, RegisterID reg3
)
2514 m_buffer
.putShort(op
| (reg1
<< 6) | (reg2
<< 3) | reg3
);
2517 ALWAYS_INLINE
void oneWordOp8Imm8(OpcodeID op
, uint8_t imm
)
2519 m_buffer
.putShort(op
| imm
);
2522 ALWAYS_INLINE
void oneWordOp8RegReg143(OpcodeID op
, RegisterID reg1
, RegisterID reg2
)
2524 m_buffer
.putShort(op
| ((reg2
& 8) << 4) | (reg1
<< 3) | (reg2
& 7));
2527 ALWAYS_INLINE
void oneWordOp9Imm7(OpcodeID op
, uint8_t imm
)
2529 m_buffer
.putShort(op
| imm
);
2532 ALWAYS_INLINE
void oneWordOp10Reg3Reg3(OpcodeID op
, RegisterID reg1
, RegisterID reg2
)
2534 m_buffer
.putShort(op
| (reg1
<< 3) | reg2
);
2537 ALWAYS_INLINE
void twoWordOp12Reg4FourFours(OpcodeID1 op
, RegisterID reg
, FourFours ff
)
2539 m_buffer
.putShort(op
| reg
);
2540 m_buffer
.putShort(ff
.m_u
.value
);
2543 ALWAYS_INLINE
void twoWordOp16FourFours(OpcodeID1 op
, FourFours ff
)
2545 m_buffer
.putShort(op
);
2546 m_buffer
.putShort(ff
.m_u
.value
);
2549 ALWAYS_INLINE
void twoWordOp16Op16(OpcodeID1 op1
, OpcodeID2 op2
)
2551 m_buffer
.putShort(op1
);
2552 m_buffer
.putShort(op2
);
2555 ALWAYS_INLINE
void twoWordOp5i6Imm4Reg4EncodedImm(OpcodeID1 op
, int imm4
, RegisterID rd
, ARMThumbImmediate imm
)
2557 ARMThumbImmediate newImm
= imm
;
2558 newImm
.m_value
.imm4
= imm4
;
2560 m_buffer
.putShort(ARMv7Assembler::twoWordOp5i6Imm4Reg4EncodedImmFirst(op
, newImm
));
2561 m_buffer
.putShort(ARMv7Assembler::twoWordOp5i6Imm4Reg4EncodedImmSecond(rd
, newImm
));
2564 ALWAYS_INLINE
void twoWordOp12Reg4Reg4Imm12(OpcodeID1 op
, RegisterID reg1
, RegisterID reg2
, uint16_t imm
)
2566 m_buffer
.putShort(op
| reg1
);
2567 m_buffer
.putShort((reg2
<< 12) | imm
);
2570 ALWAYS_INLINE
void twoWordOp12Reg40Imm3Reg4Imm20Imm5(OpcodeID1 op
, RegisterID reg1
, RegisterID reg2
, uint16_t imm1
, uint16_t imm2
, uint16_t imm3
)
2572 m_buffer
.putShort(op
| reg1
);
2573 m_buffer
.putShort((imm1
<< 12) | (reg2
<< 8) | (imm2
<< 6) | imm3
);
2576 // Formats up instructions of the pattern:
2577 // 111111111B11aaaa:bbbb222SA2C2cccc
2578 // Where 1s in the pattern come from op1, 2s in the pattern come from op2, S is the provided size bit.
2579 // Operands provide 5 bit values of the form Aaaaa, Bbbbb, Ccccc.
2580 ALWAYS_INLINE
void vfpOp(OpcodeID1 op1
, OpcodeID2 op2
, bool size
, VFPOperand a
, VFPOperand b
, VFPOperand c
)
2582 ASSERT(!(op1
& 0x004f));
2583 ASSERT(!(op2
& 0xf1af));
2584 m_buffer
.putShort(op1
| b
.bits1() << 6 | a
.bits4());
2585 m_buffer
.putShort(op2
| b
.bits4() << 12 | size
<< 8 | a
.bits1() << 7 | c
.bits1() << 5 | c
.bits4());
2588 // Arm vfp addresses can be offset by a 9-bit ones-comp immediate, left shifted by 2.
2589 // (i.e. +/-(0..255) 32-bit words)
2590 ALWAYS_INLINE
void vfpMemOp(OpcodeID1 op1
, OpcodeID2 op2
, bool size
, RegisterID rn
, VFPOperand rd
, int32_t imm
)
2598 uint32_t offset
= imm
;
2599 ASSERT(!(offset
& ~0x3fc));
2602 m_buffer
.putShort(op1
| (up
<< 7) | rd
.bits1() << 6 | rn
);
2603 m_buffer
.putShort(op2
| rd
.bits4() << 12 | size
<< 8 | offset
);
2606 // Administrative methods:
2608 size_t codeSize() const { return m_buffer
.codeSize(); }
2609 AssemblerLabel
label() const { return m_buffer
.label(); }
2610 bool isAligned(int alignment
) const { return m_buffer
.isAligned(alignment
); }
2611 void* data() const { return m_buffer
.data(); }
2613 unsigned debugOffset() { return m_buffer
.debugOffset(); }
2616 AssemblerBuffer m_buffer
;
2619 Vector
<LinkRecord
> m_jumpsToLink
;
2620 Vector
<int32_t> m_offsets
;
2625 #endif // ENABLE(ASSEMBLER) && CPU(ARM_THUMB2)
2627 #endif // ARMAssembler_h