2 * Copyright (C) 2012, 2014 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 #ifndef ARM64Assembler_h
27 #define ARM64Assembler_h
29 #if ENABLE(ASSEMBLER) && CPU(ARM64)
31 #include "AssemblerBuffer.h"
33 #include <wtf/Assertions.h>
34 #include <wtf/Vector.h>
37 #define CHECK_DATASIZE_OF(datasize) ASSERT(datasize == 32 || datasize == 64)
38 #define DATASIZE_OF(datasize) ((datasize == 64) ? Datasize_64 : Datasize_32)
39 #define MEMOPSIZE_OF(datasize) ((datasize == 8 || datasize == 128) ? MemOpSize_8_or_128 : (datasize == 16) ? MemOpSize_16 : (datasize == 32) ? MemOpSize_32 : MemOpSize_64)
40 #define CHECK_DATASIZE() CHECK_DATASIZE_OF(datasize)
41 #define DATASIZE DATASIZE_OF(datasize)
42 #define MEMOPSIZE MEMOPSIZE_OF(datasize)
43 #define CHECK_FP_MEMOP_DATASIZE() ASSERT(datasize == 8 || datasize == 16 || datasize == 32 || datasize == 64 || datasize == 128)
44 #define MEMPAIROPSIZE_INT(datasize) ((datasize == 64) ? MemPairOp_64 : MemPairOp_32)
45 #define MEMPAIROPSIZE_FP(datasize) ((datasize == 128) ? MemPairOp_V128 : (datasize == 64) ? MemPairOp_V64 : MemPairOp_32)
49 ALWAYS_INLINE
bool isInt7(int32_t value
)
51 return value
== ((value
<< 25) >> 25);
54 ALWAYS_INLINE
bool isInt9(int32_t value
)
56 return value
== ((value
<< 23) >> 23);
59 ALWAYS_INLINE
bool isInt11(int32_t value
)
61 return value
== ((value
<< 21) >> 21);
64 ALWAYS_INLINE
bool isUInt5(int32_t value
)
66 return !(value
& ~0x1f);
69 ALWAYS_INLINE
bool isUInt12(int32_t value
)
71 return !(value
& ~0xfff);
74 ALWAYS_INLINE
bool isUInt12(intptr_t value
)
76 return !(value
& ~0xfffL
);
81 explicit UInt5(int value
)
84 ASSERT(isUInt5(value
));
87 operator int() { return m_value
; }
95 explicit UInt12(int value
)
98 ASSERT(isUInt12(value
));
101 operator int() { return m_value
; }
109 explicit PostIndex(int value
)
112 ASSERT(isInt9(value
));
115 operator int() { return m_value
; }
123 explicit PreIndex(int value
)
126 ASSERT(isInt9(value
));
129 operator int() { return m_value
; }
135 class PairPostIndex
{
137 explicit PairPostIndex(int value
)
140 ASSERT(isInt11(value
));
143 operator int() { return m_value
; }
151 explicit PairPreIndex(int value
)
154 ASSERT(isInt11(value
));
157 operator int() { return m_value
; }
163 class LogicalImmediate
{
165 static LogicalImmediate
create32(uint32_t value
)
167 // Check for 0, -1 - these cannot be encoded.
168 if (!value
|| !~value
)
169 return InvalidLogicalImmediate
;
171 // First look for a 32-bit pattern, then for repeating 16-bit
172 // patterns, 8-bit, 4-bit, and finally 2-bit.
176 if (findBitRange
<32>(value
, hsb
, lsb
, inverted
))
177 return encodeLogicalImmediate
<32>(hsb
, lsb
, inverted
);
179 if ((value
& 0xffff) != (value
>> 16))
180 return InvalidLogicalImmediate
;
183 if (findBitRange
<16>(value
, hsb
, lsb
, inverted
))
184 return encodeLogicalImmediate
<16>(hsb
, lsb
, inverted
);
186 if ((value
& 0xff) != (value
>> 8))
187 return InvalidLogicalImmediate
;
190 if (findBitRange
<8>(value
, hsb
, lsb
, inverted
))
191 return encodeLogicalImmediate
<8>(hsb
, lsb
, inverted
);
193 if ((value
& 0xf) != (value
>> 4))
194 return InvalidLogicalImmediate
;
197 if (findBitRange
<4>(value
, hsb
, lsb
, inverted
))
198 return encodeLogicalImmediate
<4>(hsb
, lsb
, inverted
);
200 if ((value
& 0x3) != (value
>> 2))
201 return InvalidLogicalImmediate
;
204 if (findBitRange
<2>(value
, hsb
, lsb
, inverted
))
205 return encodeLogicalImmediate
<2>(hsb
, lsb
, inverted
);
207 return InvalidLogicalImmediate
;
210 static LogicalImmediate
create64(uint64_t value
)
212 // Check for 0, -1 - these cannot be encoded.
213 if (!value
|| !~value
)
214 return InvalidLogicalImmediate
;
216 // Look for a contiguous bit range.
219 if (findBitRange
<64>(value
, hsb
, lsb
, inverted
))
220 return encodeLogicalImmediate
<64>(hsb
, lsb
, inverted
);
222 // If the high & low 32 bits are equal, we can try for a 32-bit (or narrower) pattern.
223 if (static_cast<uint32_t>(value
) == static_cast<uint32_t>(value
>> 32))
224 return create32(static_cast<uint32_t>(value
));
225 return InvalidLogicalImmediate
;
236 return m_value
!= InvalidLogicalImmediate
;
241 return m_value
& (1 << 12);
245 LogicalImmediate(int value
)
250 // Generate a mask with bits in the range hsb..0 set, for example:
251 // hsb:63 = 0xffffffffffffffff
252 // hsb:42 = 0x000007ffffffffff
253 // hsb: 0 = 0x0000000000000001
254 static uint64_t mask(unsigned hsb
)
257 return 0xffffffffffffffffull
>> (63 - hsb
);
261 static void partialHSB(uint64_t& value
, unsigned&result
)
263 if (value
& (0xffffffffffffffffull
<< N
)) {
269 // Find the bit number of the highest bit set in a non-zero value, for example:
270 // 0x8080808080808080 = hsb:63
271 // 0x0000000000000001 = hsb: 0
272 // 0x000007ffffe00000 = hsb:42
273 static unsigned highestSetBit(uint64_t value
)
277 partialHSB
<32>(value
, hsb
);
278 partialHSB
<16>(value
, hsb
);
279 partialHSB
<8>(value
, hsb
);
280 partialHSB
<4>(value
, hsb
);
281 partialHSB
<2>(value
, hsb
);
282 partialHSB
<1>(value
, hsb
);
286 // This function takes a value and a bit width, where value obeys the following constraints:
287 // * bits outside of the width of the value must be zero.
288 // * bits within the width of value must neither be all clear or all set.
289 // The input is inspected to detect values that consist of either two or three contiguous
290 // ranges of bits. The output range hsb..lsb will describe the second range of the value.
291 // if the range is set, inverted will be false, and if the range is clear, inverted will
292 // be true. For example (with width 8):
293 // 00001111 = hsb:3, lsb:0, inverted:false
294 // 11110000 = hsb:3, lsb:0, inverted:true
295 // 00111100 = hsb:5, lsb:2, inverted:false
296 // 11000011 = hsb:5, lsb:2, inverted:true
297 template<unsigned width
>
298 static bool findBitRange(uint64_t value
, unsigned& hsb
, unsigned& lsb
, bool& inverted
)
300 ASSERT(value
& mask(width
- 1));
301 ASSERT(value
!= mask(width
- 1));
302 ASSERT(!(value
& ~mask(width
- 1)));
304 // Detect cases where the top bit is set; if so, flip all the bits & set invert.
305 // This halves the number of patterns we need to look for.
306 const uint64_t msb
= 1ull << (width
- 1);
307 if ((inverted
= (value
& msb
)))
308 value
^= mask(width
- 1);
310 // Find the highest set bit in value, generate a corresponding mask & flip all
312 hsb
= highestSetBit(value
);
315 // If this cleared the value, then the range hsb..0 was all set.
320 // Try making one more mask, and flipping the bits!
321 lsb
= highestSetBit(value
);
324 // Success - but lsb actually points to the hsb of a third range - add one
325 // to get to the lsb of the mid range.
333 // Encodes the set of immN:immr:imms fields found in a logical immediate.
334 template<unsigned width
>
335 static int encodeLogicalImmediate(unsigned hsb
, unsigned lsb
, bool inverted
)
337 // Check width is a power of 2!
338 ASSERT(!(width
& (width
-1)));
339 ASSERT(width
<= 64 && width
>= 2);
347 // For 64-bit values this is easy - just set immN to true, and imms just
348 // contains the bit number of the highest set bit of the set range. For
349 // values with narrower widths, these are encoded by a leading set of
350 // one bits, followed by a zero bit, followed by the remaining set of bits
351 // being the high bit of the range. For a 32-bit immediate there are no
352 // leading one bits, just a zero followed by a five bit number. For a
353 // 16-bit immediate there is one one bit, a zero bit, and then a four bit
354 // bit-position, etc.
358 imms
= 63 & ~(width
+ width
- 1);
361 // if width is 64 & hsb is 62, then we have a value something like:
362 // 0x80000000ffffffff (in this case with lsb 32).
363 // The ror should be by 1, imms (effectively set width minus 1) is
364 // 32. Set width is full width minus cleared width.
365 immr
= (width
- 1) - hsb
;
366 imms
|= (width
- ((hsb
- lsb
) + 1)) - 1;
368 // if width is 64 & hsb is 62, then we have a value something like:
369 // 0x7fffffff00000000 (in this case with lsb 32).
370 // The value is effectively rol'ed by lsb, which is equivalent to
371 // a ror by width - lsb (or 0, in the case where lsb is 0). imms
373 immr
= (width
- lsb
) & (width
- 1);
377 return immN
<< 12 | immr
<< 6 | imms
;
380 static const int InvalidLogicalImmediate
= -1;
385 inline uint16_t getHalfword(uint64_t value
, int which
)
387 return value
>> (which
<< 4);
390 namespace ARM64Registers
{
392 // Parameter/result registers
401 // Indirect result location register
403 // Temporary registers
411 // Intra-procedure-call scratch registers (temporary)
414 // Platform Register (temporary)
435 // Parameter/result registers
444 // Callee-saved (up to 64-bits only!)
453 // Temporary registers
472 static bool isSp(RegisterID reg
) { return reg
== sp
; }
473 static bool isZr(RegisterID reg
) { return reg
== zr
; }
476 class ARM64Assembler
{
478 typedef ARM64Registers::RegisterID RegisterID
;
479 typedef ARM64Registers::FPRegisterID FPRegisterID
;
481 static RegisterID
firstRegister() { return ARM64Registers::x0
; }
482 static RegisterID
lastRegister() { return ARM64Registers::sp
; }
484 static FPRegisterID
firstFPRegister() { return ARM64Registers::q0
; }
485 static FPRegisterID
lastFPRegister() { return ARM64Registers::q31
; }
488 static bool isSp(RegisterID reg
) { return ARM64Registers::isSp(reg
); }
489 static bool isZr(RegisterID reg
) { return ARM64Registers::isZr(reg
); }
493 : m_indexOfLastWatchpoint(INT_MIN
)
494 , m_indexOfTailOfLastWatchpoint(INT_MIN
)
498 AssemblerBuffer
& buffer() { return m_buffer
; }
500 // (HS, LO, HI, LS) -> (AE, B, A, BE)
501 // (VS, VC) -> (O, NO)
505 ConditionHS
, ConditionCS
= ConditionHS
,
506 ConditionLO
, ConditionCC
= ConditionLO
,
521 static Condition
invert(Condition cond
)
523 return static_cast<Condition
>(cond
^ 1);
549 #define JUMP_ENUM_WITH_SIZE(index, value) (((value) << 4) | (index))
550 #define JUMP_ENUM_SIZE(jump) ((jump) >> 4)
551 enum JumpType
{ JumpFixed
= JUMP_ENUM_WITH_SIZE(0, 0),
552 JumpNoCondition
= JUMP_ENUM_WITH_SIZE(1, 1 * sizeof(uint32_t)),
553 JumpCondition
= JUMP_ENUM_WITH_SIZE(2, 2 * sizeof(uint32_t)),
554 JumpCompareAndBranch
= JUMP_ENUM_WITH_SIZE(3, 2 * sizeof(uint32_t)),
555 JumpTestBit
= JUMP_ENUM_WITH_SIZE(4, 2 * sizeof(uint32_t)),
556 JumpNoConditionFixedSize
= JUMP_ENUM_WITH_SIZE(5, 1 * sizeof(uint32_t)),
557 JumpConditionFixedSize
= JUMP_ENUM_WITH_SIZE(6, 2 * sizeof(uint32_t)),
558 JumpCompareAndBranchFixedSize
= JUMP_ENUM_WITH_SIZE(7, 2 * sizeof(uint32_t)),
559 JumpTestBitFixedSize
= JUMP_ENUM_WITH_SIZE(8, 2 * sizeof(uint32_t)),
562 LinkInvalid
= JUMP_ENUM_WITH_SIZE(0, 0),
563 LinkJumpNoCondition
= JUMP_ENUM_WITH_SIZE(1, 1 * sizeof(uint32_t)),
564 LinkJumpConditionDirect
= JUMP_ENUM_WITH_SIZE(2, 1 * sizeof(uint32_t)),
565 LinkJumpCondition
= JUMP_ENUM_WITH_SIZE(3, 2 * sizeof(uint32_t)),
566 LinkJumpCompareAndBranch
= JUMP_ENUM_WITH_SIZE(4, 2 * sizeof(uint32_t)),
567 LinkJumpCompareAndBranchDirect
= JUMP_ENUM_WITH_SIZE(5, 1 * sizeof(uint32_t)),
568 LinkJumpTestBit
= JUMP_ENUM_WITH_SIZE(6, 2 * sizeof(uint32_t)),
569 LinkJumpTestBitDirect
= JUMP_ENUM_WITH_SIZE(7, 1 * sizeof(uint32_t)),
574 LinkRecord(intptr_t from
, intptr_t to
, JumpType type
, Condition condition
)
576 data
.realTypes
.m_from
= from
;
577 data
.realTypes
.m_to
= to
;
578 data
.realTypes
.m_type
= type
;
579 data
.realTypes
.m_linkType
= LinkInvalid
;
580 data
.realTypes
.m_condition
= condition
;
582 LinkRecord(intptr_t from
, intptr_t to
, JumpType type
, Condition condition
, bool is64Bit
, RegisterID compareRegister
)
584 data
.realTypes
.m_from
= from
;
585 data
.realTypes
.m_to
= to
;
586 data
.realTypes
.m_type
= type
;
587 data
.realTypes
.m_linkType
= LinkInvalid
;
588 data
.realTypes
.m_condition
= condition
;
589 data
.realTypes
.m_is64Bit
= is64Bit
;
590 data
.realTypes
.m_compareRegister
= compareRegister
;
592 LinkRecord(intptr_t from
, intptr_t to
, JumpType type
, Condition condition
, unsigned bitNumber
, RegisterID compareRegister
)
594 data
.realTypes
.m_from
= from
;
595 data
.realTypes
.m_to
= to
;
596 data
.realTypes
.m_type
= type
;
597 data
.realTypes
.m_linkType
= LinkInvalid
;
598 data
.realTypes
.m_condition
= condition
;
599 data
.realTypes
.m_bitNumber
= bitNumber
;
600 data
.realTypes
.m_compareRegister
= compareRegister
;
602 void operator=(const LinkRecord
& other
)
604 data
.copyTypes
.content
[0] = other
.data
.copyTypes
.content
[0];
605 data
.copyTypes
.content
[1] = other
.data
.copyTypes
.content
[1];
606 data
.copyTypes
.content
[2] = other
.data
.copyTypes
.content
[2];
608 intptr_t from() const { return data
.realTypes
.m_from
; }
609 void setFrom(intptr_t from
) { data
.realTypes
.m_from
= from
; }
610 intptr_t to() const { return data
.realTypes
.m_to
; }
611 JumpType
type() const { return data
.realTypes
.m_type
; }
612 JumpLinkType
linkType() const { return data
.realTypes
.m_linkType
; }
613 void setLinkType(JumpLinkType linkType
) { ASSERT(data
.realTypes
.m_linkType
== LinkInvalid
); data
.realTypes
.m_linkType
= linkType
; }
614 Condition
condition() const { return data
.realTypes
.m_condition
; }
615 bool is64Bit() const { return data
.realTypes
.m_is64Bit
; }
616 unsigned bitNumber() const { return data
.realTypes
.m_bitNumber
; }
617 RegisterID
compareRegister() const { return data
.realTypes
.m_compareRegister
; }
622 intptr_t m_from
: 48;
625 JumpLinkType m_linkType
: 8;
626 Condition m_condition
: 4;
628 unsigned m_bitNumber
: 6;
629 RegisterID m_compareRegister
: 5;
634 COMPILE_ASSERT(sizeof(RealTypes
) == sizeof(CopyTypes
), LinkRecordCopyStructSizeEqualsRealStruct
);
638 // bits(N) VFPExpandImm(bits(8) imm8);
640 // Encoding of floating point immediates is a litte complicated. Here's a
641 // high level description:
642 // +/-m*2-n where m and n are integers, 16 <= m <= 31, 0 <= n <= 7
643 // and the algirithm for expanding to a single precision float:
644 // return imm8<7>:NOT(imm8<6>):Replicate(imm8<6>,5):imm8<5:0>:Zeros(19);
646 // The trickiest bit is how the exponent is handled. The following table
647 // may help clarify things a little:
649 // 100 01111100 124 -3 1020 01111111100
650 // 101 01111101 125 -2 1021 01111111101
651 // 110 01111110 126 -1 1022 01111111110
652 // 111 01111111 127 0 1023 01111111111
653 // 000 10000000 128 1 1024 10000000000
654 // 001 10000001 129 2 1025 10000000001
655 // 010 10000010 130 3 1026 10000000010
656 // 011 10000011 131 4 1027 10000000011
657 // The first column shows the bit pattern stored in bits 6-4 of the arm
658 // encoded immediate. The second column shows the 8-bit IEEE 754 single
659 // -precision exponent in binary, the third column shows the raw decimal
660 // value. IEEE 754 single-precision numbers are stored with a bias of 127
661 // to the exponent, so the fourth column shows the resulting exponent.
662 // From this was can see that the exponent can be in the range -3..4,
663 // which agrees with the high level description given above. The fifth
664 // and sixth columns shows the value stored in a IEEE 754 double-precision
665 // number to represent these exponents in decimal and binary, given the
668 // Ultimately, detecting doubles that can be encoded as immediates on arm
669 // and encoding doubles is actually not too bad. A floating point value can
670 // be encoded by retaining the sign bit, the low three bits of the exponent
671 // and the high 4 bits of the mantissa. To validly be able to encode an
672 // immediate the remainder of the mantissa must be zero, and the high part
673 // of the exponent must match the top bit retained, bar the highest bit
674 // which must be its inverse.
675 static bool canEncodeFPImm(double d
)
677 // Discard the sign bit, the low two bits of the exponent & the highest
678 // four bits of the mantissa.
679 uint64_t masked
= bitwise_cast
<uint64_t>(d
) & 0x7fc0ffffffffffffull
;
680 return (masked
== 0x3fc0000000000000ull
) || (masked
== 0x4000000000000000ull
);
683 template<int datasize
>
684 static bool canEncodePImmOffset(int32_t offset
)
686 int32_t maxPImm
= 4095 * (datasize
/ 8);
689 if (offset
> maxPImm
)
691 if (offset
& ((datasize
/ 8 ) - 1))
696 static bool canEncodeSImmOffset(int32_t offset
)
698 return isInt9(offset
);
702 int encodeFPImm(double d
)
704 ASSERT(canEncodeFPImm(d
));
705 uint64_t u64
= bitwise_cast
<uint64_t>(d
);
706 return (static_cast<int>(u64
>> 56) & 0x80) | (static_cast<int>(u64
>> 48) & 0x7f);
709 template<int datasize
>
710 int encodeShiftAmount(int amount
)
712 ASSERT(!amount
|| datasize
== (8 << amount
));
716 template<int datasize
>
717 static int encodePositiveImmediate(unsigned pimm
)
719 ASSERT(!(pimm
& ((datasize
/ 8) - 1)));
720 return pimm
/ (datasize
/ 8);
784 ExcepnOp_EXCEPTION
= 0,
785 ExcepnOp_BREAKPOINT
= 1,
792 FPCmpOp_FCMP0
= 0x08,
793 FPCmpOp_FCMPE
= 0x10,
794 FPCmpOp_FCMPE0
= 0x18
802 enum FPDataOp1Source
{
807 FPDataOp_FCVT_toSingle
= 4,
808 FPDataOp_FCVT_toDouble
= 5,
809 FPDataOp_FCVT_toHalf
= 7,
812 FPDataOp_FRINTM
= 10,
813 FPDataOp_FRINTZ
= 11,
814 FPDataOp_FRINTA
= 12,
815 FPDataOp_FRINTX
= 14,
819 enum FPDataOp2Source
{
832 FPIntConvOp_FCVTNS
= 0x00,
833 FPIntConvOp_FCVTNU
= 0x01,
834 FPIntConvOp_SCVTF
= 0x02,
835 FPIntConvOp_UCVTF
= 0x03,
836 FPIntConvOp_FCVTAS
= 0x04,
837 FPIntConvOp_FCVTAU
= 0x05,
838 FPIntConvOp_FMOV_QtoX
= 0x06,
839 FPIntConvOp_FMOV_XtoQ
= 0x07,
840 FPIntConvOp_FCVTPS
= 0x08,
841 FPIntConvOp_FCVTPU
= 0x09,
842 FPIntConvOp_FMOV_QtoX_top
= 0x0e,
843 FPIntConvOp_FMOV_XtoQ_top
= 0x0f,
844 FPIntConvOp_FCVTMS
= 0x10,
845 FPIntConvOp_FCVTMU
= 0x11,
846 FPIntConvOp_FCVTZS
= 0x18,
847 FPIntConvOp_FCVTZU
= 0x19,
862 MemOp_PREFETCH
= 2, // size must be 3
863 MemOp_LOAD_signed64
= 2, // size may be 0, 1 or 2
864 MemOp_LOAD_signed32
= 3 // size may be 0 or 1
869 MemPairOp_LoadSigned_32
= 1,
872 MemPairOp_V32
= MemPairOp_32
,
884 LdrLiteralOp_32BIT
= 0,
885 LdrLiteralOp_64BIT
= 1,
886 LdrLiteralOp_LDRSW
= 2,
887 LdrLiteralOp_128BIT
= 2
890 static unsigned memPairOffsetShift(bool V
, MemPairOpSize size
)
892 // return the log2 of the size in bytes, e.g. 64 bit size returns 3
895 return (size
>> 1) + 2;
899 // Integer Instructions:
901 template<int datasize
, SetFlags setFlags
= DontSetFlags
>
902 ALWAYS_INLINE
void adc(RegisterID rd
, RegisterID rn
, RegisterID rm
)
905 insn(addSubtractWithCarry(DATASIZE
, AddOp_ADD
, setFlags
, rm
, rn
, rd
));
908 template<int datasize
, SetFlags setFlags
= DontSetFlags
>
909 ALWAYS_INLINE
void add(RegisterID rd
, RegisterID rn
, UInt12 imm12
, int shift
= 0)
912 ASSERT(!shift
|| shift
== 12);
913 insn(addSubtractImmediate(DATASIZE
, AddOp_ADD
, setFlags
, shift
== 12, imm12
, rn
, rd
));
916 template<int datasize
, SetFlags setFlags
= DontSetFlags
>
917 ALWAYS_INLINE
void add(RegisterID rd
, RegisterID rn
, RegisterID rm
)
919 add
<datasize
, setFlags
>(rd
, rn
, rm
, LSL
, 0);
922 template<int datasize
, SetFlags setFlags
= DontSetFlags
>
923 ALWAYS_INLINE
void add(RegisterID rd
, RegisterID rn
, RegisterID rm
, ExtendType extend
, int amount
)
926 insn(addSubtractExtendedRegister(DATASIZE
, AddOp_ADD
, setFlags
, rm
, extend
, amount
, rn
, rd
));
929 template<int datasize
, SetFlags setFlags
= DontSetFlags
>
930 ALWAYS_INLINE
void add(RegisterID rd
, RegisterID rn
, RegisterID rm
, ShiftType shift
, int amount
)
933 if (isSp(rd
) || isSp(rn
)) {
934 ASSERT(shift
== LSL
);
936 add
<datasize
, setFlags
>(rd
, rn
, rm
, UXTX
, amount
);
938 insn(addSubtractShiftedRegister(DATASIZE
, AddOp_ADD
, setFlags
, shift
, rm
, amount
, rn
, rd
));
941 ALWAYS_INLINE
void adr(RegisterID rd
, int offset
)
943 insn(pcRelative(false, offset
, rd
));
946 ALWAYS_INLINE
void adrp(RegisterID rd
, int offset
)
948 ASSERT(!(offset
& 0xfff));
949 insn(pcRelative(true, offset
>> 12, rd
));
952 template<int datasize
, SetFlags setFlags
= DontSetFlags
>
953 ALWAYS_INLINE
void and_(RegisterID rd
, RegisterID rn
, RegisterID rm
)
955 and_
<datasize
, setFlags
>(rd
, rn
, rm
, LSL
, 0);
958 template<int datasize
, SetFlags setFlags
= DontSetFlags
>
959 ALWAYS_INLINE
void and_(RegisterID rd
, RegisterID rn
, RegisterID rm
, ShiftType shift
, int amount
)
962 insn(logicalShiftedRegister(DATASIZE
, setFlags
? LogicalOp_ANDS
: LogicalOp_AND
, shift
, false, rm
, amount
, rn
, rd
));
965 template<int datasize
, SetFlags setFlags
= DontSetFlags
>
966 ALWAYS_INLINE
void and_(RegisterID rd
, RegisterID rn
, LogicalImmediate imm
)
969 insn(logicalImmediate(DATASIZE
, setFlags
? LogicalOp_ANDS
: LogicalOp_AND
, imm
.value(), rn
, rd
));
972 template<int datasize
>
973 ALWAYS_INLINE
void asr(RegisterID rd
, RegisterID rn
, int shift
)
975 ASSERT(shift
< datasize
);
976 sbfm
<datasize
>(rd
, rn
, shift
, datasize
- 1);
979 template<int datasize
>
980 ALWAYS_INLINE
void asr(RegisterID rd
, RegisterID rn
, RegisterID rm
)
982 asrv
<datasize
>(rd
, rn
, rm
);
985 template<int datasize
>
986 ALWAYS_INLINE
void asrv(RegisterID rd
, RegisterID rn
, RegisterID rm
)
989 insn(dataProcessing2Source(DATASIZE
, rm
, DataOp_ASRV
, rn
, rd
));
992 ALWAYS_INLINE
void b(int32_t offset
= 0)
994 ASSERT(!(offset
& 3));
996 ASSERT(offset
== (offset
<< 6) >> 6);
997 insn(unconditionalBranchImmediate(false, offset
));
1000 ALWAYS_INLINE
void b_cond(Condition cond
, int32_t offset
= 0)
1002 ASSERT(!(offset
& 3));
1004 ASSERT(offset
== (offset
<< 13) >> 13);
1005 insn(conditionalBranchImmediate(offset
, cond
));
1008 template<int datasize
>
1009 ALWAYS_INLINE
void bfi(RegisterID rd
, RegisterID rn
, int lsb
, int width
)
1011 bfm
<datasize
>(rd
, rn
, (datasize
- lsb
) & (datasize
- 1), width
- 1);
1014 template<int datasize
>
1015 ALWAYS_INLINE
void bfm(RegisterID rd
, RegisterID rn
, int immr
, int imms
)
1018 insn(bitfield(DATASIZE
, BitfieldOp_BFM
, immr
, imms
, rn
, rd
));
1021 template<int datasize
>
1022 ALWAYS_INLINE
void bfxil(RegisterID rd
, RegisterID rn
, int lsb
, int width
)
1024 bfm
<datasize
>(rd
, rn
, lsb
, lsb
+ width
- 1);
1027 template<int datasize
, SetFlags setFlags
= DontSetFlags
>
1028 ALWAYS_INLINE
void bic(RegisterID rd
, RegisterID rn
, RegisterID rm
)
1030 bic
<datasize
, setFlags
>(rd
, rn
, rm
, LSL
, 0);
1033 template<int datasize
, SetFlags setFlags
= DontSetFlags
>
1034 ALWAYS_INLINE
void bic(RegisterID rd
, RegisterID rn
, RegisterID rm
, ShiftType shift
, int amount
)
1037 insn(logicalShiftedRegister(DATASIZE
, setFlags
? LogicalOp_ANDS
: LogicalOp_AND
, shift
, true, rm
, amount
, rn
, rd
));
1040 ALWAYS_INLINE
void bl(int32_t offset
= 0)
1042 ASSERT(!(offset
& 3));
1044 insn(unconditionalBranchImmediate(true, offset
));
1047 ALWAYS_INLINE
void blr(RegisterID rn
)
1049 insn(unconditionalBranchRegister(BranchType_CALL
, rn
));
1052 ALWAYS_INLINE
void br(RegisterID rn
)
1054 insn(unconditionalBranchRegister(BranchType_JMP
, rn
));
1057 ALWAYS_INLINE
void brk(uint16_t imm
)
1059 insn(excepnGeneration(ExcepnOp_BREAKPOINT
, imm
, 0));
1062 template<int datasize
>
1063 ALWAYS_INLINE
void cbnz(RegisterID rt
, int32_t offset
= 0)
1066 ASSERT(!(offset
& 3));
1068 insn(compareAndBranchImmediate(DATASIZE
, true, offset
, rt
));
1071 template<int datasize
>
1072 ALWAYS_INLINE
void cbz(RegisterID rt
, int32_t offset
= 0)
1075 ASSERT(!(offset
& 3));
1077 insn(compareAndBranchImmediate(DATASIZE
, false, offset
, rt
));
1080 template<int datasize
>
1081 ALWAYS_INLINE
void ccmn(RegisterID rn
, RegisterID rm
, int nzcv
, Condition cond
)
1084 insn(conditionalCompareRegister(DATASIZE
, AddOp_ADD
, rm
, cond
, rn
, nzcv
));
1087 template<int datasize
>
1088 ALWAYS_INLINE
void ccmn(RegisterID rn
, UInt5 imm
, int nzcv
, Condition cond
)
1091 insn(conditionalCompareImmediate(DATASIZE
, AddOp_ADD
, imm
, cond
, rn
, nzcv
));
1094 template<int datasize
>
1095 ALWAYS_INLINE
void ccmp(RegisterID rn
, RegisterID rm
, int nzcv
, Condition cond
)
1098 insn(conditionalCompareRegister(DATASIZE
, AddOp_SUB
, rm
, cond
, rn
, nzcv
));
1101 template<int datasize
>
1102 ALWAYS_INLINE
void ccmp(RegisterID rn
, UInt5 imm
, int nzcv
, Condition cond
)
1105 insn(conditionalCompareImmediate(DATASIZE
, AddOp_SUB
, imm
, cond
, rn
, nzcv
));
1108 template<int datasize
>
1109 ALWAYS_INLINE
void cinc(RegisterID rd
, RegisterID rn
, Condition cond
)
1111 csinc
<datasize
>(rd
, rn
, rn
, invert(cond
));
1114 template<int datasize
>
1115 ALWAYS_INLINE
void cinv(RegisterID rd
, RegisterID rn
, Condition cond
)
1117 csinv
<datasize
>(rd
, rn
, rn
, invert(cond
));
1120 template<int datasize
>
1121 ALWAYS_INLINE
void cls(RegisterID rd
, RegisterID rn
)
1124 insn(dataProcessing1Source(DATASIZE
, DataOp_CLS
, rn
, rd
));
1127 template<int datasize
>
1128 ALWAYS_INLINE
void clz(RegisterID rd
, RegisterID rn
)
1131 insn(dataProcessing1Source(DATASIZE
, DataOp_CLZ
, rn
, rd
));
1134 template<int datasize
>
1135 ALWAYS_INLINE
void cmn(RegisterID rn
, UInt12 imm12
, int shift
= 0)
1137 add
<datasize
, S
>(ARM64Registers::zr
, rn
, imm12
, shift
);
1140 template<int datasize
>
1141 ALWAYS_INLINE
void cmn(RegisterID rn
, RegisterID rm
)
1143 add
<datasize
, S
>(ARM64Registers::zr
, rn
, rm
);
1146 template<int datasize
>
1147 ALWAYS_INLINE
void cmn(RegisterID rn
, RegisterID rm
, ExtendType extend
, int amount
)
1149 add
<datasize
, S
>(ARM64Registers::zr
, rn
, rm
, extend
, amount
);
1152 template<int datasize
>
1153 ALWAYS_INLINE
void cmn(RegisterID rn
, RegisterID rm
, ShiftType shift
, int amount
)
1155 add
<datasize
, S
>(ARM64Registers::zr
, rn
, rm
, shift
, amount
);
1158 template<int datasize
>
1159 ALWAYS_INLINE
void cmp(RegisterID rn
, UInt12 imm12
, int shift
= 0)
1161 sub
<datasize
, S
>(ARM64Registers::zr
, rn
, imm12
, shift
);
1164 template<int datasize
>
1165 ALWAYS_INLINE
void cmp(RegisterID rn
, RegisterID rm
)
1167 sub
<datasize
, S
>(ARM64Registers::zr
, rn
, rm
);
1170 template<int datasize
>
1171 ALWAYS_INLINE
void cmp(RegisterID rn
, RegisterID rm
, ExtendType extend
, int amount
)
1173 sub
<datasize
, S
>(ARM64Registers::zr
, rn
, rm
, extend
, amount
);
1176 template<int datasize
>
1177 ALWAYS_INLINE
void cmp(RegisterID rn
, RegisterID rm
, ShiftType shift
, int amount
)
1179 sub
<datasize
, S
>(ARM64Registers::zr
, rn
, rm
, shift
, amount
);
1182 template<int datasize
>
1183 ALWAYS_INLINE
void cneg(RegisterID rd
, RegisterID rn
, Condition cond
)
1185 csneg
<datasize
>(rd
, rn
, rn
, invert(cond
));
1188 template<int datasize
>
1189 ALWAYS_INLINE
void csel(RegisterID rd
, RegisterID rn
, RegisterID rm
, Condition cond
)
1192 insn(conditionalSelect(DATASIZE
, false, rm
, cond
, false, rn
, rd
));
1195 template<int datasize
>
1196 ALWAYS_INLINE
void cset(RegisterID rd
, Condition cond
)
1198 csinc
<datasize
>(rd
, ARM64Registers::zr
, ARM64Registers::zr
, invert(cond
));
1201 template<int datasize
>
1202 ALWAYS_INLINE
void csetm(RegisterID rd
, Condition cond
)
1204 csinv
<datasize
>(rd
, ARM64Registers::zr
, ARM64Registers::zr
, invert(cond
));
1207 template<int datasize
>
1208 ALWAYS_INLINE
void csinc(RegisterID rd
, RegisterID rn
, RegisterID rm
, Condition cond
)
1211 insn(conditionalSelect(DATASIZE
, false, rm
, cond
, true, rn
, rd
));
1214 template<int datasize
>
1215 ALWAYS_INLINE
void csinv(RegisterID rd
, RegisterID rn
, RegisterID rm
, Condition cond
)
1218 insn(conditionalSelect(DATASIZE
, true, rm
, cond
, false, rn
, rd
));
1221 template<int datasize
>
1222 ALWAYS_INLINE
void csneg(RegisterID rd
, RegisterID rn
, RegisterID rm
, Condition cond
)
1225 insn(conditionalSelect(DATASIZE
, true, rm
, cond
, true, rn
, rd
));
1228 template<int datasize
>
1229 ALWAYS_INLINE
void eon(RegisterID rd
, RegisterID rn
, RegisterID rm
)
1231 eon
<datasize
>(rd
, rn
, rm
, LSL
, 0);
1234 template<int datasize
>
1235 ALWAYS_INLINE
void eon(RegisterID rd
, RegisterID rn
, RegisterID rm
, ShiftType shift
, int amount
)
1238 insn(logicalShiftedRegister(DATASIZE
, LogicalOp_EOR
, shift
, true, rm
, amount
, rn
, rd
));
1241 template<int datasize
>
1242 ALWAYS_INLINE
void eor(RegisterID rd
, RegisterID rn
, RegisterID rm
)
1244 eor
<datasize
>(rd
, rn
, rm
, LSL
, 0);
1247 template<int datasize
>
1248 ALWAYS_INLINE
void eor(RegisterID rd
, RegisterID rn
, RegisterID rm
, ShiftType shift
, int amount
)
1251 insn(logicalShiftedRegister(DATASIZE
, LogicalOp_EOR
, shift
, false, rm
, amount
, rn
, rd
));
1254 template<int datasize
>
1255 ALWAYS_INLINE
void eor(RegisterID rd
, RegisterID rn
, LogicalImmediate imm
)
1258 insn(logicalImmediate(DATASIZE
, LogicalOp_EOR
, imm
.value(), rn
, rd
));
1261 template<int datasize
>
1262 ALWAYS_INLINE
void extr(RegisterID rd
, RegisterID rn
, RegisterID rm
, int lsb
)
1265 insn(extract(DATASIZE
, rm
, lsb
, rn
, rd
));
1268 ALWAYS_INLINE
void hint(int imm
)
1270 insn(hintPseudo(imm
));
1273 ALWAYS_INLINE
void hlt(uint16_t imm
)
1275 insn(excepnGeneration(ExcepnOp_HALT
, imm
, 0));
1278 template<int datasize
>
1279 ALWAYS_INLINE
void ldp(RegisterID rt
, RegisterID rt2
, RegisterID rn
, PairPostIndex simm
)
1282 insn(loadStoreRegisterPairPostIndex(MEMPAIROPSIZE_INT(datasize
), false, MemOp_LOAD
, simm
, rn
, rt
, rt2
));
1285 template<int datasize
>
1286 ALWAYS_INLINE
void ldp(RegisterID rt
, RegisterID rt2
, RegisterID rn
, PairPreIndex simm
)
1289 insn(loadStoreRegisterPairPreIndex(MEMPAIROPSIZE_INT(datasize
), false, MemOp_LOAD
, simm
, rn
, rt
, rt2
));
1292 template<int datasize
>
1293 ALWAYS_INLINE
void ldr(RegisterID rt
, RegisterID rn
, RegisterID rm
)
1295 ldr
<datasize
>(rt
, rn
, rm
, UXTX
, 0);
1298 template<int datasize
>
1299 ALWAYS_INLINE
void ldr(RegisterID rt
, RegisterID rn
, RegisterID rm
, ExtendType extend
, int amount
)
1302 insn(loadStoreRegisterRegisterOffset(MEMOPSIZE
, false, MemOp_LOAD
, rm
, extend
, encodeShiftAmount
<datasize
>(amount
), rn
, rt
));
1305 template<int datasize
>
1306 ALWAYS_INLINE
void ldr(RegisterID rt
, RegisterID rn
, unsigned pimm
)
1309 insn(loadStoreRegisterUnsignedImmediate(MEMOPSIZE
, false, MemOp_LOAD
, encodePositiveImmediate
<datasize
>(pimm
), rn
, rt
));
1312 template<int datasize
>
1313 ALWAYS_INLINE
void ldr(RegisterID rt
, RegisterID rn
, PostIndex simm
)
1316 insn(loadStoreRegisterPostIndex(MEMOPSIZE
, false, MemOp_LOAD
, simm
, rn
, rt
));
1319 template<int datasize
>
1320 ALWAYS_INLINE
void ldr(RegisterID rt
, RegisterID rn
, PreIndex simm
)
1323 insn(loadStoreRegisterPreIndex(MEMOPSIZE
, false, MemOp_LOAD
, simm
, rn
, rt
));
1326 template<int datasize
>
1327 ALWAYS_INLINE
void ldr_literal(RegisterID rt
, int offset
= 0)
1330 ASSERT(!(offset
& 3));
1331 insn(loadRegisterLiteral(datasize
== 64 ? LdrLiteralOp_64BIT
: LdrLiteralOp_32BIT
, false, offset
>> 2, rt
));
1334 ALWAYS_INLINE
void ldrb(RegisterID rt
, RegisterID rn
, RegisterID rm
)
1336 // Not calling the 5 argument form of ldrb, since is amount is ommitted S is false.
1337 insn(loadStoreRegisterRegisterOffset(MemOpSize_8_or_128
, false, MemOp_LOAD
, rm
, UXTX
, false, rn
, rt
));
1340 ALWAYS_INLINE
void ldrb(RegisterID rt
, RegisterID rn
, RegisterID rm
, ExtendType extend
, int amount
)
1342 ASSERT_UNUSED(amount
, !amount
);
1343 insn(loadStoreRegisterRegisterOffset(MemOpSize_8_or_128
, false, MemOp_LOAD
, rm
, extend
, true, rn
, rt
));
1346 ALWAYS_INLINE
void ldrb(RegisterID rt
, RegisterID rn
, unsigned pimm
)
1348 insn(loadStoreRegisterUnsignedImmediate(MemOpSize_8_or_128
, false, MemOp_LOAD
, encodePositiveImmediate
<8>(pimm
), rn
, rt
));
1351 ALWAYS_INLINE
void ldrb(RegisterID rt
, RegisterID rn
, PostIndex simm
)
1353 insn(loadStoreRegisterPostIndex(MemOpSize_8_or_128
, false, MemOp_LOAD
, simm
, rn
, rt
));
1356 ALWAYS_INLINE
void ldrb(RegisterID rt
, RegisterID rn
, PreIndex simm
)
1358 insn(loadStoreRegisterPreIndex(MemOpSize_8_or_128
, false, MemOp_LOAD
, simm
, rn
, rt
));
1361 ALWAYS_INLINE
void ldrh(RegisterID rt
, RegisterID rn
, RegisterID rm
)
1363 ldrh(rt
, rn
, rm
, UXTX
, 0);
1366 ALWAYS_INLINE
void ldrh(RegisterID rt
, RegisterID rn
, RegisterID rm
, ExtendType extend
, int amount
)
1368 ASSERT(!amount
|| amount
== 1);
1369 insn(loadStoreRegisterRegisterOffset(MemOpSize_16
, false, MemOp_LOAD
, rm
, extend
, amount
== 1, rn
, rt
));
1372 ALWAYS_INLINE
void ldrh(RegisterID rt
, RegisterID rn
, unsigned pimm
)
1374 insn(loadStoreRegisterUnsignedImmediate(MemOpSize_16
, false, MemOp_LOAD
, encodePositiveImmediate
<16>(pimm
), rn
, rt
));
1377 ALWAYS_INLINE
void ldrh(RegisterID rt
, RegisterID rn
, PostIndex simm
)
1379 insn(loadStoreRegisterPostIndex(MemOpSize_16
, false, MemOp_LOAD
, simm
, rn
, rt
));
1382 ALWAYS_INLINE
void ldrh(RegisterID rt
, RegisterID rn
, PreIndex simm
)
1384 insn(loadStoreRegisterPreIndex(MemOpSize_16
, false, MemOp_LOAD
, simm
, rn
, rt
));
1387 template<int datasize
>
1388 ALWAYS_INLINE
void ldrsb(RegisterID rt
, RegisterID rn
, RegisterID rm
)
1391 // Not calling the 5 argument form of ldrsb, since is amount is ommitted S is false.
1392 insn(loadStoreRegisterRegisterOffset(MemOpSize_8_or_128
, false, (datasize
== 64) ? MemOp_LOAD_signed64
: MemOp_LOAD_signed32
, rm
, UXTX
, false, rn
, rt
));
1395 template<int datasize
>
1396 ALWAYS_INLINE
void ldrsb(RegisterID rt
, RegisterID rn
, RegisterID rm
, ExtendType extend
, int amount
)
1399 ASSERT_UNUSED(amount
, !amount
);
1400 insn(loadStoreRegisterRegisterOffset(MemOpSize_8_or_128
, false, (datasize
== 64) ? MemOp_LOAD_signed64
: MemOp_LOAD_signed32
, rm
, extend
, true, rn
, rt
));
1403 template<int datasize
>
1404 ALWAYS_INLINE
void ldrsb(RegisterID rt
, RegisterID rn
, unsigned pimm
)
1407 insn(loadStoreRegisterUnsignedImmediate(MemOpSize_8_or_128
, false, (datasize
== 64) ? MemOp_LOAD_signed64
: MemOp_LOAD_signed32
, encodePositiveImmediate
<8>(pimm
), rn
, rt
));
1410 template<int datasize
>
1411 ALWAYS_INLINE
void ldrsb(RegisterID rt
, RegisterID rn
, PostIndex simm
)
1414 insn(loadStoreRegisterPostIndex(MemOpSize_8_or_128
, false, (datasize
== 64) ? MemOp_LOAD_signed64
: MemOp_LOAD_signed32
, simm
, rn
, rt
));
1417 template<int datasize
>
1418 ALWAYS_INLINE
void ldrsb(RegisterID rt
, RegisterID rn
, PreIndex simm
)
1421 insn(loadStoreRegisterPreIndex(MemOpSize_8_or_128
, false, (datasize
== 64) ? MemOp_LOAD_signed64
: MemOp_LOAD_signed32
, simm
, rn
, rt
));
1424 template<int datasize
>
1425 ALWAYS_INLINE
void ldrsh(RegisterID rt
, RegisterID rn
, RegisterID rm
)
1427 ldrsh
<datasize
>(rt
, rn
, rm
, UXTX
, 0);
1430 template<int datasize
>
1431 ALWAYS_INLINE
void ldrsh(RegisterID rt
, RegisterID rn
, RegisterID rm
, ExtendType extend
, int amount
)
1434 ASSERT(!amount
|| amount
== 1);
1435 insn(loadStoreRegisterRegisterOffset(MemOpSize_16
, false, (datasize
== 64) ? MemOp_LOAD_signed64
: MemOp_LOAD_signed32
, rm
, extend
, amount
== 1, rn
, rt
));
1438 template<int datasize
>
1439 ALWAYS_INLINE
void ldrsh(RegisterID rt
, RegisterID rn
, unsigned pimm
)
1442 insn(loadStoreRegisterUnsignedImmediate(MemOpSize_16
, false, (datasize
== 64) ? MemOp_LOAD_signed64
: MemOp_LOAD_signed32
, encodePositiveImmediate
<16>(pimm
), rn
, rt
));
1445 template<int datasize
>
1446 ALWAYS_INLINE
void ldrsh(RegisterID rt
, RegisterID rn
, PostIndex simm
)
1449 insn(loadStoreRegisterPostIndex(MemOpSize_16
, false, (datasize
== 64) ? MemOp_LOAD_signed64
: MemOp_LOAD_signed32
, simm
, rn
, rt
));
1452 template<int datasize
>
1453 ALWAYS_INLINE
void ldrsh(RegisterID rt
, RegisterID rn
, PreIndex simm
)
1456 insn(loadStoreRegisterPreIndex(MemOpSize_16
, false, (datasize
== 64) ? MemOp_LOAD_signed64
: MemOp_LOAD_signed32
, simm
, rn
, rt
));
1459 ALWAYS_INLINE
void ldrsw(RegisterID rt
, RegisterID rn
, RegisterID rm
)
1461 ldrsw(rt
, rn
, rm
, UXTX
, 0);
1464 ALWAYS_INLINE
void ldrsw(RegisterID rt
, RegisterID rn
, RegisterID rm
, ExtendType extend
, int amount
)
1466 ASSERT(!amount
|| amount
== 2);
1467 insn(loadStoreRegisterRegisterOffset(MemOpSize_32
, false, MemOp_LOAD_signed64
, rm
, extend
, amount
== 2, rn
, rt
));
1470 ALWAYS_INLINE
void ldrsw(RegisterID rt
, RegisterID rn
, unsigned pimm
)
1472 insn(loadStoreRegisterUnsignedImmediate(MemOpSize_32
, false, MemOp_LOAD_signed64
, encodePositiveImmediate
<32>(pimm
), rn
, rt
));
1475 ALWAYS_INLINE
void ldrsw(RegisterID rt
, RegisterID rn
, PostIndex simm
)
1477 insn(loadStoreRegisterPostIndex(MemOpSize_32
, false, MemOp_LOAD_signed64
, simm
, rn
, rt
));
1480 ALWAYS_INLINE
void ldrsw(RegisterID rt
, RegisterID rn
, PreIndex simm
)
1482 insn(loadStoreRegisterPreIndex(MemOpSize_32
, false, MemOp_LOAD_signed64
, simm
, rn
, rt
));
1485 ALWAYS_INLINE
void ldrsw_literal(RegisterID rt
, int offset
= 0)
1487 ASSERT(!(offset
& 3));
1488 insn(loadRegisterLiteral(LdrLiteralOp_LDRSW
, false, offset
>> 2, rt
));
1491 template<int datasize
>
1492 ALWAYS_INLINE
void ldur(RegisterID rt
, RegisterID rn
, int simm
)
1495 insn(loadStoreRegisterUnscaledImmediate(MEMOPSIZE
, false, MemOp_LOAD
, simm
, rn
, rt
));
1498 ALWAYS_INLINE
void ldurb(RegisterID rt
, RegisterID rn
, int simm
)
1500 insn(loadStoreRegisterUnscaledImmediate(MemOpSize_8_or_128
, false, MemOp_LOAD
, simm
, rn
, rt
));
1503 ALWAYS_INLINE
void ldurh(RegisterID rt
, RegisterID rn
, int simm
)
1505 insn(loadStoreRegisterUnscaledImmediate(MemOpSize_16
, false, MemOp_LOAD
, simm
, rn
, rt
));
1508 template<int datasize
>
1509 ALWAYS_INLINE
void ldursb(RegisterID rt
, RegisterID rn
, int simm
)
1512 insn(loadStoreRegisterUnscaledImmediate(MemOpSize_8_or_128
, false, (datasize
== 64) ? MemOp_LOAD_signed64
: MemOp_LOAD_signed32
, simm
, rn
, rt
));
1515 template<int datasize
>
1516 ALWAYS_INLINE
void ldursh(RegisterID rt
, RegisterID rn
, int simm
)
1519 insn(loadStoreRegisterUnscaledImmediate(MemOpSize_16
, false, (datasize
== 64) ? MemOp_LOAD_signed64
: MemOp_LOAD_signed32
, simm
, rn
, rt
));
1522 ALWAYS_INLINE
void ldursw(RegisterID rt
, RegisterID rn
, int simm
)
1524 insn(loadStoreRegisterUnscaledImmediate(MemOpSize_32
, false, MemOp_LOAD_signed64
, simm
, rn
, rt
));
1527 template<int datasize
>
1528 ALWAYS_INLINE
void lsl(RegisterID rd
, RegisterID rn
, int shift
)
1530 ASSERT(shift
< datasize
);
1531 ubfm
<datasize
>(rd
, rn
, (datasize
- shift
) & (datasize
- 1), datasize
- 1 - shift
);
1534 template<int datasize
>
1535 ALWAYS_INLINE
void lsl(RegisterID rd
, RegisterID rn
, RegisterID rm
)
1537 lslv
<datasize
>(rd
, rn
, rm
);
1540 template<int datasize
>
1541 ALWAYS_INLINE
void lslv(RegisterID rd
, RegisterID rn
, RegisterID rm
)
1544 insn(dataProcessing2Source(DATASIZE
, rm
, DataOp_LSLV
, rn
, rd
));
1547 template<int datasize
>
1548 ALWAYS_INLINE
void lsr(RegisterID rd
, RegisterID rn
, int shift
)
1550 ASSERT(shift
< datasize
);
1551 ubfm
<datasize
>(rd
, rn
, shift
, datasize
- 1);
1554 template<int datasize
>
1555 ALWAYS_INLINE
void lsr(RegisterID rd
, RegisterID rn
, RegisterID rm
)
1557 lsrv
<datasize
>(rd
, rn
, rm
);
1560 template<int datasize
>
1561 ALWAYS_INLINE
void lsrv(RegisterID rd
, RegisterID rn
, RegisterID rm
)
1564 insn(dataProcessing2Source(DATASIZE
, rm
, DataOp_LSRV
, rn
, rd
));
1567 template<int datasize
>
1568 ALWAYS_INLINE
void madd(RegisterID rd
, RegisterID rn
, RegisterID rm
, RegisterID ra
)
1571 insn(dataProcessing3Source(DATASIZE
, DataOp_MADD
, rm
, ra
, rn
, rd
));
1574 template<int datasize
>
1575 ALWAYS_INLINE
void mneg(RegisterID rd
, RegisterID rn
, RegisterID rm
)
1577 msub
<datasize
>(rd
, rn
, rm
, ARM64Registers::zr
);
1580 template<int datasize
>
1581 ALWAYS_INLINE
void mov(RegisterID rd
, RegisterID rm
)
1583 if (isSp(rd
) || isSp(rm
))
1584 add
<datasize
>(rd
, rm
, UInt12(0));
1586 orr
<datasize
>(rd
, ARM64Registers::zr
, rm
);
1589 template<int datasize
>
1590 ALWAYS_INLINE
void movi(RegisterID rd
, LogicalImmediate imm
)
1592 orr
<datasize
>(rd
, ARM64Registers::zr
, imm
);
1595 template<int datasize
>
1596 ALWAYS_INLINE
void movk(RegisterID rd
, uint16_t value
, int shift
= 0)
1599 ASSERT(!(shift
& 0xf));
1600 insn(moveWideImediate(DATASIZE
, MoveWideOp_K
, shift
>> 4, value
, rd
));
1603 template<int datasize
>
1604 ALWAYS_INLINE
void movn(RegisterID rd
, uint16_t value
, int shift
= 0)
1607 ASSERT(!(shift
& 0xf));
1608 insn(moveWideImediate(DATASIZE
, MoveWideOp_N
, shift
>> 4, value
, rd
));
1611 template<int datasize
>
1612 ALWAYS_INLINE
void movz(RegisterID rd
, uint16_t value
, int shift
= 0)
1615 ASSERT(!(shift
& 0xf));
1616 insn(moveWideImediate(DATASIZE
, MoveWideOp_Z
, shift
>> 4, value
, rd
));
1619 template<int datasize
>
1620 ALWAYS_INLINE
void msub(RegisterID rd
, RegisterID rn
, RegisterID rm
, RegisterID ra
)
1623 insn(dataProcessing3Source(DATASIZE
, DataOp_MSUB
, rm
, ra
, rn
, rd
));
1626 template<int datasize
>
1627 ALWAYS_INLINE
void mul(RegisterID rd
, RegisterID rn
, RegisterID rm
)
1629 madd
<datasize
>(rd
, rn
, rm
, ARM64Registers::zr
);
1632 template<int datasize
>
1633 ALWAYS_INLINE
void mvn(RegisterID rd
, RegisterID rm
)
1635 orn
<datasize
>(rd
, ARM64Registers::zr
, rm
);
1638 template<int datasize
>
1639 ALWAYS_INLINE
void mvn(RegisterID rd
, RegisterID rm
, ShiftType shift
, int amount
)
1641 orn
<datasize
>(rd
, ARM64Registers::zr
, rm
, shift
, amount
);
1644 template<int datasize
, SetFlags setFlags
= DontSetFlags
>
1645 ALWAYS_INLINE
void neg(RegisterID rd
, RegisterID rm
)
1647 sub
<datasize
, setFlags
>(rd
, ARM64Registers::zr
, rm
);
1650 template<int datasize
, SetFlags setFlags
= DontSetFlags
>
1651 ALWAYS_INLINE
void neg(RegisterID rd
, RegisterID rm
, ShiftType shift
, int amount
)
1653 sub
<datasize
, setFlags
>(rd
, ARM64Registers::zr
, rm
, shift
, amount
);
1656 template<int datasize
, SetFlags setFlags
= DontSetFlags
>
1657 ALWAYS_INLINE
void ngc(RegisterID rd
, RegisterID rm
)
1659 sbc
<datasize
, setFlags
>(rd
, ARM64Registers::zr
, rm
);
1662 template<int datasize
, SetFlags setFlags
= DontSetFlags
>
1663 ALWAYS_INLINE
void ngc(RegisterID rd
, RegisterID rm
, ShiftType shift
, int amount
)
1665 sbc
<datasize
, setFlags
>(rd
, ARM64Registers::zr
, rm
, shift
, amount
);
1668 ALWAYS_INLINE
void nop()
1673 static void fillNops(void* base
, size_t size
)
1675 RELEASE_ASSERT(!(size
% sizeof(int32_t)));
1676 size_t n
= size
/ sizeof(int32_t);
1677 for (int32_t* ptr
= static_cast<int32_t*>(base
); n
--;)
1678 *ptr
++ = nopPseudo();
1681 ALWAYS_INLINE
void dmbSY()
1686 template<int datasize
>
1687 ALWAYS_INLINE
void orn(RegisterID rd
, RegisterID rn
, RegisterID rm
)
1689 orn
<datasize
>(rd
, rn
, rm
, LSL
, 0);
1692 template<int datasize
>
1693 ALWAYS_INLINE
void orn(RegisterID rd
, RegisterID rn
, RegisterID rm
, ShiftType shift
, int amount
)
1696 insn(logicalShiftedRegister(DATASIZE
, LogicalOp_ORR
, shift
, true, rm
, amount
, rn
, rd
));
1699 template<int datasize
>
1700 ALWAYS_INLINE
void orr(RegisterID rd
, RegisterID rn
, RegisterID rm
)
1702 orr
<datasize
>(rd
, rn
, rm
, LSL
, 0);
1705 template<int datasize
>
1706 ALWAYS_INLINE
void orr(RegisterID rd
, RegisterID rn
, RegisterID rm
, ShiftType shift
, int amount
)
1709 insn(logicalShiftedRegister(DATASIZE
, LogicalOp_ORR
, shift
, false, rm
, amount
, rn
, rd
));
1712 template<int datasize
>
1713 ALWAYS_INLINE
void orr(RegisterID rd
, RegisterID rn
, LogicalImmediate imm
)
1716 insn(logicalImmediate(DATASIZE
, LogicalOp_ORR
, imm
.value(), rn
, rd
));
1719 template<int datasize
>
1720 ALWAYS_INLINE
void rbit(RegisterID rd
, RegisterID rn
)
1723 insn(dataProcessing1Source(DATASIZE
, DataOp_RBIT
, rn
, rd
));
1726 ALWAYS_INLINE
void ret(RegisterID rn
= ARM64Registers::lr
)
1728 insn(unconditionalBranchRegister(BranchType_RET
, rn
));
1731 template<int datasize
>
1732 ALWAYS_INLINE
void rev(RegisterID rd
, RegisterID rn
)
1735 if (datasize
== 32) // 'rev' mnemonic means REV32 or REV64 depending on the operand width.
1736 insn(dataProcessing1Source(Datasize_32
, DataOp_REV32
, rn
, rd
));
1738 insn(dataProcessing1Source(Datasize_64
, DataOp_REV64
, rn
, rd
));
1741 template<int datasize
>
1742 ALWAYS_INLINE
void rev16(RegisterID rd
, RegisterID rn
)
1745 insn(dataProcessing1Source(DATASIZE
, DataOp_REV16
, rn
, rd
));
1748 template<int datasize
>
1749 ALWAYS_INLINE
void rev32(RegisterID rd
, RegisterID rn
)
1751 ASSERT(datasize
== 64); // 'rev32' only valid with 64-bit operands.
1752 insn(dataProcessing1Source(Datasize_64
, DataOp_REV32
, rn
, rd
));
1755 template<int datasize
>
1756 ALWAYS_INLINE
void ror(RegisterID rd
, RegisterID rn
, RegisterID rm
)
1758 rorv
<datasize
>(rd
, rn
, rm
);
1761 template<int datasize
>
1762 ALWAYS_INLINE
void ror(RegisterID rd
, RegisterID rs
, int shift
)
1764 extr
<datasize
>(rd
, rs
, rs
, shift
);
1767 template<int datasize
>
1768 ALWAYS_INLINE
void rorv(RegisterID rd
, RegisterID rn
, RegisterID rm
)
1771 insn(dataProcessing2Source(DATASIZE
, rm
, DataOp_RORV
, rn
, rd
));
1774 template<int datasize
, SetFlags setFlags
= DontSetFlags
>
1775 ALWAYS_INLINE
void sbc(RegisterID rd
, RegisterID rn
, RegisterID rm
)
1778 insn(addSubtractWithCarry(DATASIZE
, AddOp_SUB
, setFlags
, rm
, rn
, rd
));
1781 template<int datasize
>
1782 ALWAYS_INLINE
void sbfiz(RegisterID rd
, RegisterID rn
, int lsb
, int width
)
1784 sbfm
<datasize
>(rd
, rn
, (datasize
- lsb
) & (datasize
- 1), width
- 1);
1787 template<int datasize
>
1788 ALWAYS_INLINE
void sbfm(RegisterID rd
, RegisterID rn
, int immr
, int imms
)
1791 insn(bitfield(DATASIZE
, BitfieldOp_SBFM
, immr
, imms
, rn
, rd
));
1794 template<int datasize
>
1795 ALWAYS_INLINE
void sbfx(RegisterID rd
, RegisterID rn
, int lsb
, int width
)
1797 sbfm
<datasize
>(rd
, rn
, lsb
, lsb
+ width
- 1);
1800 template<int datasize
>
1801 ALWAYS_INLINE
void sdiv(RegisterID rd
, RegisterID rn
, RegisterID rm
)
1804 insn(dataProcessing2Source(DATASIZE
, rm
, DataOp_SDIV
, rn
, rd
));
1807 ALWAYS_INLINE
void smaddl(RegisterID rd
, RegisterID rn
, RegisterID rm
, RegisterID ra
)
1809 insn(dataProcessing3Source(Datasize_64
, DataOp_SMADDL
, rm
, ra
, rn
, rd
));
1812 ALWAYS_INLINE
void smnegl(RegisterID rd
, RegisterID rn
, RegisterID rm
)
1814 smsubl(rd
, rn
, rm
, ARM64Registers::zr
);
1817 ALWAYS_INLINE
void smsubl(RegisterID rd
, RegisterID rn
, RegisterID rm
, RegisterID ra
)
1819 insn(dataProcessing3Source(Datasize_64
, DataOp_SMSUBL
, rm
, ra
, rn
, rd
));
1822 ALWAYS_INLINE
void smulh(RegisterID rd
, RegisterID rn
, RegisterID rm
)
1824 insn(dataProcessing3Source(Datasize_64
, DataOp_SMULH
, rm
, ARM64Registers::zr
, rn
, rd
));
1827 ALWAYS_INLINE
void smull(RegisterID rd
, RegisterID rn
, RegisterID rm
)
1829 smaddl(rd
, rn
, rm
, ARM64Registers::zr
);
1832 template<int datasize
>
1833 ALWAYS_INLINE
void stp(RegisterID rt
, RegisterID rt2
, RegisterID rn
, PairPostIndex simm
)
1836 insn(loadStoreRegisterPairPostIndex(MEMPAIROPSIZE_INT(datasize
), false, MemOp_STORE
, simm
, rn
, rt
, rt2
));
1839 template<int datasize
>
1840 ALWAYS_INLINE
void stp(RegisterID rt
, RegisterID rt2
, RegisterID rn
, PairPreIndex simm
)
1843 insn(loadStoreRegisterPairPreIndex(MEMPAIROPSIZE_INT(datasize
), false, MemOp_STORE
, simm
, rn
, rt
, rt2
));
1846 template<int datasize
>
1847 ALWAYS_INLINE
void str(RegisterID rt
, RegisterID rn
, RegisterID rm
)
1849 str
<datasize
>(rt
, rn
, rm
, UXTX
, 0);
1852 template<int datasize
>
1853 ALWAYS_INLINE
void str(RegisterID rt
, RegisterID rn
, RegisterID rm
, ExtendType extend
, int amount
)
1856 insn(loadStoreRegisterRegisterOffset(MEMOPSIZE
, false, MemOp_STORE
, rm
, extend
, encodeShiftAmount
<datasize
>(amount
), rn
, rt
));
1859 template<int datasize
>
1860 ALWAYS_INLINE
void str(RegisterID rt
, RegisterID rn
, unsigned pimm
)
1863 insn(loadStoreRegisterUnsignedImmediate(MEMOPSIZE
, false, MemOp_STORE
, encodePositiveImmediate
<datasize
>(pimm
), rn
, rt
));
1866 template<int datasize
>
1867 ALWAYS_INLINE
void str(RegisterID rt
, RegisterID rn
, PostIndex simm
)
1870 insn(loadStoreRegisterPostIndex(MEMOPSIZE
, false, MemOp_STORE
, simm
, rn
, rt
));
1873 template<int datasize
>
1874 ALWAYS_INLINE
void str(RegisterID rt
, RegisterID rn
, PreIndex simm
)
1877 insn(loadStoreRegisterPreIndex(MEMOPSIZE
, false, MemOp_STORE
, simm
, rn
, rt
));
1880 ALWAYS_INLINE
void strb(RegisterID rt
, RegisterID rn
, RegisterID rm
)
1882 // Not calling the 5 argument form of strb, since is amount is ommitted S is false.
1883 insn(loadStoreRegisterRegisterOffset(MemOpSize_8_or_128
, false, MemOp_STORE
, rm
, UXTX
, false, rn
, rt
));
1886 ALWAYS_INLINE
void strb(RegisterID rt
, RegisterID rn
, RegisterID rm
, ExtendType extend
, int amount
)
1888 ASSERT_UNUSED(amount
, !amount
);
1889 insn(loadStoreRegisterRegisterOffset(MemOpSize_8_or_128
, false, MemOp_STORE
, rm
, extend
, true, rn
, rt
));
1892 ALWAYS_INLINE
void strb(RegisterID rt
, RegisterID rn
, unsigned pimm
)
1894 insn(loadStoreRegisterUnsignedImmediate(MemOpSize_8_or_128
, false, MemOp_STORE
, encodePositiveImmediate
<8>(pimm
), rn
, rt
));
1897 ALWAYS_INLINE
void strb(RegisterID rt
, RegisterID rn
, PostIndex simm
)
1899 insn(loadStoreRegisterPostIndex(MemOpSize_8_or_128
, false, MemOp_STORE
, simm
, rn
, rt
));
1902 ALWAYS_INLINE
void strb(RegisterID rt
, RegisterID rn
, PreIndex simm
)
1904 insn(loadStoreRegisterPreIndex(MemOpSize_8_or_128
, false, MemOp_STORE
, simm
, rn
, rt
));
1907 ALWAYS_INLINE
void strh(RegisterID rt
, RegisterID rn
, RegisterID rm
)
1909 strh(rt
, rn
, rm
, UXTX
, 0);
1912 ALWAYS_INLINE
void strh(RegisterID rt
, RegisterID rn
, RegisterID rm
, ExtendType extend
, int amount
)
1914 ASSERT(!amount
|| amount
== 1);
1915 insn(loadStoreRegisterRegisterOffset(MemOpSize_16
, false, MemOp_STORE
, rm
, extend
, amount
== 1, rn
, rt
));
1918 ALWAYS_INLINE
void strh(RegisterID rt
, RegisterID rn
, unsigned pimm
)
1920 insn(loadStoreRegisterUnsignedImmediate(MemOpSize_16
, false, MemOp_STORE
, encodePositiveImmediate
<16>(pimm
), rn
, rt
));
1923 ALWAYS_INLINE
void strh(RegisterID rt
, RegisterID rn
, PostIndex simm
)
1925 insn(loadStoreRegisterPostIndex(MemOpSize_16
, false, MemOp_STORE
, simm
, rn
, rt
));
1928 ALWAYS_INLINE
void strh(RegisterID rt
, RegisterID rn
, PreIndex simm
)
1930 insn(loadStoreRegisterPreIndex(MemOpSize_16
, false, MemOp_STORE
, simm
, rn
, rt
));
1933 template<int datasize
>
1934 ALWAYS_INLINE
void stur(RegisterID rt
, RegisterID rn
, int simm
)
1937 insn(loadStoreRegisterUnscaledImmediate(MEMOPSIZE
, false, MemOp_STORE
, simm
, rn
, rt
));
1940 ALWAYS_INLINE
void sturb(RegisterID rt
, RegisterID rn
, int simm
)
1942 insn(loadStoreRegisterUnscaledImmediate(MemOpSize_8_or_128
, false, MemOp_STORE
, simm
, rn
, rt
));
1945 ALWAYS_INLINE
void sturh(RegisterID rt
, RegisterID rn
, int simm
)
1947 insn(loadStoreRegisterUnscaledImmediate(MemOpSize_16
, false, MemOp_STORE
, simm
, rn
, rt
));
1950 template<int datasize
, SetFlags setFlags
= DontSetFlags
>
1951 ALWAYS_INLINE
void sub(RegisterID rd
, RegisterID rn
, UInt12 imm12
, int shift
= 0)
1954 ASSERT(!shift
|| shift
== 12);
1955 insn(addSubtractImmediate(DATASIZE
, AddOp_SUB
, setFlags
, shift
== 12, imm12
, rn
, rd
));
1958 template<int datasize
, SetFlags setFlags
= DontSetFlags
>
1959 ALWAYS_INLINE
void sub(RegisterID rd
, RegisterID rn
, RegisterID rm
)
1961 sub
<datasize
, setFlags
>(rd
, rn
, rm
, LSL
, 0);
1964 template<int datasize
, SetFlags setFlags
= DontSetFlags
>
1965 ALWAYS_INLINE
void sub(RegisterID rd
, RegisterID rn
, RegisterID rm
, ExtendType extend
, int amount
)
1968 insn(addSubtractExtendedRegister(DATASIZE
, AddOp_SUB
, setFlags
, rm
, extend
, amount
, rn
, rd
));
1971 template<int datasize
, SetFlags setFlags
= DontSetFlags
>
1972 ALWAYS_INLINE
void sub(RegisterID rd
, RegisterID rn
, RegisterID rm
, ShiftType shift
, int amount
)
1975 if (isSp(rd
) || isSp(rn
)) {
1976 ASSERT(shift
== LSL
);
1978 sub
<datasize
, setFlags
>(rd
, rn
, rm
, UXTX
, amount
);
1980 insn(addSubtractShiftedRegister(DATASIZE
, AddOp_SUB
, setFlags
, shift
, rm
, amount
, rn
, rd
));
1983 template<int datasize
>
1984 ALWAYS_INLINE
void sxtb(RegisterID rd
, RegisterID rn
)
1986 sbfm
<datasize
>(rd
, rn
, 0, 7);
1989 template<int datasize
>
1990 ALWAYS_INLINE
void sxth(RegisterID rd
, RegisterID rn
)
1992 sbfm
<datasize
>(rd
, rn
, 0, 15);
1995 ALWAYS_INLINE
void sxtw(RegisterID rd
, RegisterID rn
)
1997 sbfm
<64>(rd
, rn
, 0, 31);
2000 ALWAYS_INLINE
void tbz(RegisterID rt
, int imm
, int offset
= 0)
2002 ASSERT(!(offset
& 3));
2004 insn(testAndBranchImmediate(false, imm
, offset
, rt
));
2007 ALWAYS_INLINE
void tbnz(RegisterID rt
, int imm
, int offset
= 0)
2009 ASSERT(!(offset
& 3));
2011 insn(testAndBranchImmediate(true, imm
, offset
, rt
));
2014 template<int datasize
>
2015 ALWAYS_INLINE
void tst(RegisterID rn
, RegisterID rm
)
2017 and_
<datasize
, S
>(ARM64Registers::zr
, rn
, rm
);
2020 template<int datasize
>
2021 ALWAYS_INLINE
void tst(RegisterID rn
, RegisterID rm
, ShiftType shift
, int amount
)
2023 and_
<datasize
, S
>(ARM64Registers::zr
, rn
, rm
, shift
, amount
);
2026 template<int datasize
>
2027 ALWAYS_INLINE
void tst(RegisterID rn
, LogicalImmediate imm
)
2029 and_
<datasize
, S
>(ARM64Registers::zr
, rn
, imm
);
2032 template<int datasize
>
2033 ALWAYS_INLINE
void ubfiz(RegisterID rd
, RegisterID rn
, int lsb
, int width
)
2035 ubfm
<datasize
>(rd
, rn
, (datasize
- lsb
) & (datasize
- 1), width
- 1);
2038 template<int datasize
>
2039 ALWAYS_INLINE
void ubfm(RegisterID rd
, RegisterID rn
, int immr
, int imms
)
2042 insn(bitfield(DATASIZE
, BitfieldOp_UBFM
, immr
, imms
, rn
, rd
));
2045 template<int datasize
>
2046 ALWAYS_INLINE
void ubfx(RegisterID rd
, RegisterID rn
, int lsb
, int width
)
2048 ubfm
<datasize
>(rd
, rn
, lsb
, lsb
+ width
- 1);
2051 template<int datasize
>
2052 ALWAYS_INLINE
void udiv(RegisterID rd
, RegisterID rn
, RegisterID rm
)
2055 insn(dataProcessing2Source(DATASIZE
, rm
, DataOp_UDIV
, rn
, rd
));
2058 ALWAYS_INLINE
void umaddl(RegisterID rd
, RegisterID rn
, RegisterID rm
, RegisterID ra
)
2060 insn(dataProcessing3Source(Datasize_64
, DataOp_UMADDL
, rm
, ra
, rn
, rd
));
2063 ALWAYS_INLINE
void umnegl(RegisterID rd
, RegisterID rn
, RegisterID rm
)
2065 umsubl(rd
, rn
, rm
, ARM64Registers::zr
);
2068 ALWAYS_INLINE
void umsubl(RegisterID rd
, RegisterID rn
, RegisterID rm
, RegisterID ra
)
2070 insn(dataProcessing3Source(Datasize_64
, DataOp_UMSUBL
, rm
, ra
, rn
, rd
));
2073 ALWAYS_INLINE
void umulh(RegisterID rd
, RegisterID rn
, RegisterID rm
)
2075 insn(dataProcessing3Source(Datasize_64
, DataOp_UMULH
, rm
, ARM64Registers::zr
, rn
, rd
));
2078 ALWAYS_INLINE
void umull(RegisterID rd
, RegisterID rn
, RegisterID rm
)
2080 umaddl(rd
, rn
, rm
, ARM64Registers::zr
);
2083 template<int datasize
>
2084 ALWAYS_INLINE
void uxtb(RegisterID rd
, RegisterID rn
)
2086 ubfm
<datasize
>(rd
, rn
, 0, 7);
2089 template<int datasize
>
2090 ALWAYS_INLINE
void uxth(RegisterID rd
, RegisterID rn
)
2092 ubfm
<datasize
>(rd
, rn
, 0, 15);
2095 ALWAYS_INLINE
void uxtw(RegisterID rd
, RegisterID rn
)
2097 ubfm
<64>(rd
, rn
, 0, 31);
2100 // Floating Point Instructions:
2102 template<int datasize
>
2103 ALWAYS_INLINE
void fabs(FPRegisterID vd
, FPRegisterID vn
)
2106 insn(floatingPointDataProcessing1Source(DATASIZE
, FPDataOp_FABS
, vn
, vd
));
2109 template<int datasize
>
2110 ALWAYS_INLINE
void fadd(FPRegisterID vd
, FPRegisterID vn
, FPRegisterID vm
)
2113 insn(floatingPointDataProcessing2Source(DATASIZE
, vm
, FPDataOp_FADD
, vn
, vd
));
2116 template<int datasize
>
2117 ALWAYS_INLINE
void fccmp(FPRegisterID vn
, FPRegisterID vm
, int nzcv
, Condition cond
)
2120 insn(floatingPointConditionalCompare(DATASIZE
, vm
, cond
, vn
, FPCondCmpOp_FCMP
, nzcv
));
2123 template<int datasize
>
2124 ALWAYS_INLINE
void fccmpe(FPRegisterID vn
, FPRegisterID vm
, int nzcv
, Condition cond
)
2127 insn(floatingPointConditionalCompare(DATASIZE
, vm
, cond
, vn
, FPCondCmpOp_FCMPE
, nzcv
));
2130 template<int datasize
>
2131 ALWAYS_INLINE
void fcmp(FPRegisterID vn
, FPRegisterID vm
)
2134 insn(floatingPointCompare(DATASIZE
, vm
, vn
, FPCmpOp_FCMP
));
2137 template<int datasize
>
2138 ALWAYS_INLINE
void fcmp_0(FPRegisterID vn
)
2141 insn(floatingPointCompare(DATASIZE
, static_cast<FPRegisterID
>(0), vn
, FPCmpOp_FCMP0
));
2144 template<int datasize
>
2145 ALWAYS_INLINE
void fcmpe(FPRegisterID vn
, FPRegisterID vm
)
2148 insn(floatingPointCompare(DATASIZE
, vm
, vn
, FPCmpOp_FCMPE
));
2151 template<int datasize
>
2152 ALWAYS_INLINE
void fcmpe_0(FPRegisterID vn
)
2155 insn(floatingPointCompare(DATASIZE
, static_cast<FPRegisterID
>(0), vn
, FPCmpOp_FCMPE0
));
2158 template<int datasize
>
2159 ALWAYS_INLINE
void fcsel(FPRegisterID vd
, FPRegisterID vn
, FPRegisterID vm
, Condition cond
)
2162 insn(floatingPointConditionalSelect(DATASIZE
, vm
, cond
, vn
, vd
));
2165 template<int dstsize
, int srcsize
>
2166 ALWAYS_INLINE
void fcvt(FPRegisterID vd
, FPRegisterID vn
)
2168 ASSERT(dstsize
== 16 || dstsize
== 32 || dstsize
== 64);
2169 ASSERT(srcsize
== 16 || srcsize
== 32 || srcsize
== 64);
2170 ASSERT(dstsize
!= srcsize
);
2171 Datasize type
= (srcsize
== 64) ? Datasize_64
: (srcsize
== 32) ? Datasize_32
: Datasize_16
;
2172 FPDataOp1Source opcode
= (dstsize
== 64) ? FPDataOp_FCVT_toDouble
: (dstsize
== 32) ? FPDataOp_FCVT_toSingle
: FPDataOp_FCVT_toHalf
;
2173 insn(floatingPointDataProcessing1Source(type
, opcode
, vn
, vd
));
2176 template<int dstsize
, int srcsize
>
2177 ALWAYS_INLINE
void fcvtas(RegisterID rd
, FPRegisterID vn
)
2179 CHECK_DATASIZE_OF(dstsize
);
2180 CHECK_DATASIZE_OF(srcsize
);
2181 insn(floatingPointIntegerConversions(DATASIZE_OF(dstsize
), DATASIZE_OF(srcsize
), FPIntConvOp_FCVTAS
, vn
, rd
));
2184 template<int dstsize
, int srcsize
>
2185 ALWAYS_INLINE
void fcvtau(RegisterID rd
, FPRegisterID vn
)
2187 CHECK_DATASIZE_OF(dstsize
);
2188 CHECK_DATASIZE_OF(srcsize
);
2189 insn(floatingPointIntegerConversions(DATASIZE_OF(dstsize
), DATASIZE_OF(srcsize
), FPIntConvOp_FCVTAU
, vn
, rd
));
2192 template<int dstsize
, int srcsize
>
2193 ALWAYS_INLINE
void fcvtms(RegisterID rd
, FPRegisterID vn
)
2195 CHECK_DATASIZE_OF(dstsize
);
2196 CHECK_DATASIZE_OF(srcsize
);
2197 insn(floatingPointIntegerConversions(DATASIZE_OF(dstsize
), DATASIZE_OF(srcsize
), FPIntConvOp_FCVTMS
, vn
, rd
));
2200 template<int dstsize
, int srcsize
>
2201 ALWAYS_INLINE
void fcvtmu(RegisterID rd
, FPRegisterID vn
)
2203 CHECK_DATASIZE_OF(dstsize
);
2204 CHECK_DATASIZE_OF(srcsize
);
2205 insn(floatingPointIntegerConversions(DATASIZE_OF(dstsize
), DATASIZE_OF(srcsize
), FPIntConvOp_FCVTMU
, vn
, rd
));
2208 template<int dstsize
, int srcsize
>
2209 ALWAYS_INLINE
void fcvtns(RegisterID rd
, FPRegisterID vn
)
2211 CHECK_DATASIZE_OF(dstsize
);
2212 CHECK_DATASIZE_OF(srcsize
);
2213 insn(floatingPointIntegerConversions(DATASIZE_OF(dstsize
), DATASIZE_OF(srcsize
), FPIntConvOp_FCVTNS
, vn
, rd
));
2216 template<int dstsize
, int srcsize
>
2217 ALWAYS_INLINE
void fcvtnu(RegisterID rd
, FPRegisterID vn
)
2219 CHECK_DATASIZE_OF(dstsize
);
2220 CHECK_DATASIZE_OF(srcsize
);
2221 insn(floatingPointIntegerConversions(DATASIZE_OF(dstsize
), DATASIZE_OF(srcsize
), FPIntConvOp_FCVTNU
, vn
, rd
));
2224 template<int dstsize
, int srcsize
>
2225 ALWAYS_INLINE
void fcvtps(RegisterID rd
, FPRegisterID vn
)
2227 CHECK_DATASIZE_OF(dstsize
);
2228 CHECK_DATASIZE_OF(srcsize
);
2229 insn(floatingPointIntegerConversions(DATASIZE_OF(dstsize
), DATASIZE_OF(srcsize
), FPIntConvOp_FCVTPS
, vn
, rd
));
2232 template<int dstsize
, int srcsize
>
2233 ALWAYS_INLINE
void fcvtpu(RegisterID rd
, FPRegisterID vn
)
2235 CHECK_DATASIZE_OF(dstsize
);
2236 CHECK_DATASIZE_OF(srcsize
);
2237 insn(floatingPointIntegerConversions(DATASIZE_OF(dstsize
), DATASIZE_OF(srcsize
), FPIntConvOp_FCVTPU
, vn
, rd
));
2240 template<int dstsize
, int srcsize
>
2241 ALWAYS_INLINE
void fcvtzs(RegisterID rd
, FPRegisterID vn
)
2243 CHECK_DATASIZE_OF(dstsize
);
2244 CHECK_DATASIZE_OF(srcsize
);
2245 insn(floatingPointIntegerConversions(DATASIZE_OF(dstsize
), DATASIZE_OF(srcsize
), FPIntConvOp_FCVTZS
, vn
, rd
));
2248 template<int dstsize
, int srcsize
>
2249 ALWAYS_INLINE
void fcvtzu(RegisterID rd
, FPRegisterID vn
)
2251 CHECK_DATASIZE_OF(dstsize
);
2252 CHECK_DATASIZE_OF(srcsize
);
2253 insn(floatingPointIntegerConversions(DATASIZE_OF(dstsize
), DATASIZE_OF(srcsize
), FPIntConvOp_FCVTZU
, vn
, rd
));
2256 template<int datasize
>
2257 ALWAYS_INLINE
void fdiv(FPRegisterID vd
, FPRegisterID vn
, FPRegisterID vm
)
2260 insn(floatingPointDataProcessing2Source(DATASIZE
, vm
, FPDataOp_FDIV
, vn
, vd
));
2263 template<int datasize
>
2264 ALWAYS_INLINE
void fmadd(FPRegisterID vd
, FPRegisterID vn
, FPRegisterID vm
, FPRegisterID va
)
2267 insn(floatingPointDataProcessing3Source(DATASIZE
, false, vm
, AddOp_ADD
, va
, vn
, vd
));
2270 template<int datasize
>
2271 ALWAYS_INLINE
void fmax(FPRegisterID vd
, FPRegisterID vn
, FPRegisterID vm
)
2274 insn(floatingPointDataProcessing2Source(DATASIZE
, vm
, FPDataOp_FMAX
, vn
, vd
));
2277 template<int datasize
>
2278 ALWAYS_INLINE
void fmaxnm(FPRegisterID vd
, FPRegisterID vn
, FPRegisterID vm
)
2281 insn(floatingPointDataProcessing2Source(DATASIZE
, vm
, FPDataOp_FMAXNM
, vn
, vd
));
2284 template<int datasize
>
2285 ALWAYS_INLINE
void fmin(FPRegisterID vd
, FPRegisterID vn
, FPRegisterID vm
)
2288 insn(floatingPointDataProcessing2Source(DATASIZE
, vm
, FPDataOp_FMIN
, vn
, vd
));
2291 template<int datasize
>
2292 ALWAYS_INLINE
void fminnm(FPRegisterID vd
, FPRegisterID vn
, FPRegisterID vm
)
2295 insn(floatingPointDataProcessing2Source(DATASIZE
, vm
, FPDataOp_FMINNM
, vn
, vd
));
2298 template<int datasize
>
2299 ALWAYS_INLINE
void fmov(FPRegisterID vd
, FPRegisterID vn
)
2302 insn(floatingPointDataProcessing1Source(DATASIZE
, FPDataOp_FMOV
, vn
, vd
));
2305 template<int datasize
>
2306 ALWAYS_INLINE
void fmov(FPRegisterID vd
, RegisterID rn
)
2309 insn(floatingPointIntegerConversions(DATASIZE
, DATASIZE
, FPIntConvOp_FMOV_XtoQ
, rn
, vd
));
2312 template<int datasize
>
2313 ALWAYS_INLINE
void fmov(RegisterID rd
, FPRegisterID vn
)
2316 insn(floatingPointIntegerConversions(DATASIZE
, DATASIZE
, FPIntConvOp_FMOV_QtoX
, vn
, rd
));
2319 template<int datasize
>
2320 ALWAYS_INLINE
void fmov(FPRegisterID vd
, double imm
)
2323 insn(floatingPointImmediate(DATASIZE
, encodeFPImm(imm
), vd
));
2326 ALWAYS_INLINE
void fmov_top(FPRegisterID vd
, RegisterID rn
)
2328 insn(floatingPointIntegerConversions(Datasize_64
, Datasize_64
, FPIntConvOp_FMOV_XtoQ_top
, rn
, vd
));
2331 ALWAYS_INLINE
void fmov_top(RegisterID rd
, FPRegisterID vn
)
2333 insn(floatingPointIntegerConversions(Datasize_64
, Datasize_64
, FPIntConvOp_FMOV_QtoX_top
, vn
, rd
));
2336 template<int datasize
>
2337 ALWAYS_INLINE
void fmsub(FPRegisterID vd
, FPRegisterID vn
, FPRegisterID vm
, FPRegisterID va
)
2340 insn(floatingPointDataProcessing3Source(DATASIZE
, false, vm
, AddOp_SUB
, va
, vn
, vd
));
2343 template<int datasize
>
2344 ALWAYS_INLINE
void fmul(FPRegisterID vd
, FPRegisterID vn
, FPRegisterID vm
)
2347 insn(floatingPointDataProcessing2Source(DATASIZE
, vm
, FPDataOp_FMUL
, vn
, vd
));
2350 template<int datasize
>
2351 ALWAYS_INLINE
void fneg(FPRegisterID vd
, FPRegisterID vn
)
2354 insn(floatingPointDataProcessing1Source(DATASIZE
, FPDataOp_FNEG
, vn
, vd
));
2357 template<int datasize
>
2358 ALWAYS_INLINE
void fnmadd(FPRegisterID vd
, FPRegisterID vn
, FPRegisterID vm
, FPRegisterID va
)
2361 insn(floatingPointDataProcessing3Source(DATASIZE
, true, vm
, AddOp_ADD
, va
, vn
, vd
));
2364 template<int datasize
>
2365 ALWAYS_INLINE
void fnmsub(FPRegisterID vd
, FPRegisterID vn
, FPRegisterID vm
, FPRegisterID va
)
2368 insn(floatingPointDataProcessing3Source(DATASIZE
, true, vm
, AddOp_SUB
, va
, vn
, vd
));
2371 template<int datasize
>
2372 ALWAYS_INLINE
void fnmul(FPRegisterID vd
, FPRegisterID vn
, FPRegisterID vm
)
2375 insn(floatingPointDataProcessing2Source(DATASIZE
, vm
, FPDataOp_FNMUL
, vn
, vd
));
2378 template<int datasize
>
2379 ALWAYS_INLINE
void frinta(FPRegisterID vd
, FPRegisterID vn
)
2382 insn(floatingPointDataProcessing1Source(DATASIZE
, FPDataOp_FRINTA
, vn
, vd
));
2385 template<int datasize
>
2386 ALWAYS_INLINE
void frinti(FPRegisterID vd
, FPRegisterID vn
)
2389 insn(floatingPointDataProcessing1Source(DATASIZE
, FPDataOp_FRINTI
, vn
, vd
));
2392 template<int datasize
>
2393 ALWAYS_INLINE
void frintm(FPRegisterID vd
, FPRegisterID vn
)
2396 insn(floatingPointDataProcessing1Source(DATASIZE
, FPDataOp_FRINTM
, vn
, vd
));
2399 template<int datasize
>
2400 ALWAYS_INLINE
void frintn(FPRegisterID vd
, FPRegisterID vn
)
2403 insn(floatingPointDataProcessing1Source(DATASIZE
, FPDataOp_FRINTN
, vn
, vd
));
2406 template<int datasize
>
2407 ALWAYS_INLINE
void frintp(FPRegisterID vd
, FPRegisterID vn
)
2410 insn(floatingPointDataProcessing1Source(DATASIZE
, FPDataOp_FRINTP
, vn
, vd
));
2413 template<int datasize
>
2414 ALWAYS_INLINE
void frintx(FPRegisterID vd
, FPRegisterID vn
)
2417 insn(floatingPointDataProcessing1Source(DATASIZE
, FPDataOp_FRINTX
, vn
, vd
));
2420 template<int datasize
>
2421 ALWAYS_INLINE
void frintz(FPRegisterID vd
, FPRegisterID vn
)
2424 insn(floatingPointDataProcessing1Source(DATASIZE
, FPDataOp_FRINTZ
, vn
, vd
));
2427 template<int datasize
>
2428 ALWAYS_INLINE
void fsqrt(FPRegisterID vd
, FPRegisterID vn
)
2431 insn(floatingPointDataProcessing1Source(DATASIZE
, FPDataOp_FSQRT
, vn
, vd
));
2434 template<int datasize
>
2435 ALWAYS_INLINE
void fsub(FPRegisterID vd
, FPRegisterID vn
, FPRegisterID vm
)
2438 insn(floatingPointDataProcessing2Source(DATASIZE
, vm
, FPDataOp_FSUB
, vn
, vd
));
2441 template<int datasize
>
2442 ALWAYS_INLINE
void ldr(FPRegisterID rt
, RegisterID rn
, RegisterID rm
)
2444 ldr
<datasize
>(rt
, rn
, rm
, UXTX
, 0);
2447 template<int datasize
>
2448 ALWAYS_INLINE
void ldr(FPRegisterID rt
, RegisterID rn
, RegisterID rm
, ExtendType extend
, int amount
)
2450 CHECK_FP_MEMOP_DATASIZE();
2451 insn(loadStoreRegisterRegisterOffset(MEMOPSIZE
, true, datasize
== 128 ? MemOp_LOAD_V128
: MemOp_LOAD
, rm
, extend
, encodeShiftAmount
<datasize
>(amount
), rn
, rt
));
2454 template<int datasize
>
2455 ALWAYS_INLINE
void ldr(FPRegisterID rt
, RegisterID rn
, unsigned pimm
)
2457 CHECK_FP_MEMOP_DATASIZE();
2458 insn(loadStoreRegisterUnsignedImmediate(MEMOPSIZE
, true, datasize
== 128 ? MemOp_LOAD_V128
: MemOp_LOAD
, encodePositiveImmediate
<datasize
>(pimm
), rn
, rt
));
2461 template<int datasize
>
2462 ALWAYS_INLINE
void ldr(FPRegisterID rt
, RegisterID rn
, PostIndex simm
)
2464 CHECK_FP_MEMOP_DATASIZE();
2465 insn(loadStoreRegisterPostIndex(MEMOPSIZE
, true, datasize
== 128 ? MemOp_LOAD_V128
: MemOp_LOAD
, simm
, rn
, rt
));
2468 template<int datasize
>
2469 ALWAYS_INLINE
void ldr(FPRegisterID rt
, RegisterID rn
, PreIndex simm
)
2471 CHECK_FP_MEMOP_DATASIZE();
2472 insn(loadStoreRegisterPreIndex(MEMOPSIZE
, true, datasize
== 128 ? MemOp_LOAD_V128
: MemOp_LOAD
, simm
, rn
, rt
));
2475 template<int datasize
>
2476 ALWAYS_INLINE
void ldr_literal(FPRegisterID rt
, int offset
= 0)
2478 CHECK_FP_MEMOP_DATASIZE();
2479 ASSERT(datasize
>= 32);
2480 ASSERT(!(offset
& 3));
2481 insn(loadRegisterLiteral(datasize
== 128 ? LdrLiteralOp_128BIT
: datasize
== 64 ? LdrLiteralOp_64BIT
: LdrLiteralOp_32BIT
, true, offset
>> 2, rt
));
2484 template<int datasize
>
2485 ALWAYS_INLINE
void ldur(FPRegisterID rt
, RegisterID rn
, int simm
)
2487 CHECK_FP_MEMOP_DATASIZE();
2488 insn(loadStoreRegisterUnscaledImmediate(MEMOPSIZE
, true, datasize
== 128 ? MemOp_LOAD_V128
: MemOp_LOAD
, simm
, rn
, rt
));
2491 template<int dstsize
, int srcsize
>
2492 ALWAYS_INLINE
void scvtf(FPRegisterID vd
, RegisterID rn
)
2494 CHECK_DATASIZE_OF(dstsize
);
2495 CHECK_DATASIZE_OF(srcsize
);
2496 insn(floatingPointIntegerConversions(DATASIZE_OF(srcsize
), DATASIZE_OF(dstsize
), FPIntConvOp_SCVTF
, rn
, vd
));
2499 template<int datasize
>
2500 ALWAYS_INLINE
void str(FPRegisterID rt
, RegisterID rn
, RegisterID rm
)
2502 str
<datasize
>(rt
, rn
, rm
, UXTX
, 0);
2505 template<int datasize
>
2506 ALWAYS_INLINE
void str(FPRegisterID rt
, RegisterID rn
, RegisterID rm
, ExtendType extend
, int amount
)
2508 CHECK_FP_MEMOP_DATASIZE();
2509 insn(loadStoreRegisterRegisterOffset(MEMOPSIZE
, true, datasize
== 128 ? MemOp_STORE_V128
: MemOp_STORE
, rm
, extend
, encodeShiftAmount
<datasize
>(amount
), rn
, rt
));
2512 template<int datasize
>
2513 ALWAYS_INLINE
void str(FPRegisterID rt
, RegisterID rn
, unsigned pimm
)
2515 CHECK_FP_MEMOP_DATASIZE();
2516 insn(loadStoreRegisterUnsignedImmediate(MEMOPSIZE
, true, datasize
== 128 ? MemOp_STORE_V128
: MemOp_STORE
, encodePositiveImmediate
<datasize
>(pimm
), rn
, rt
));
2519 template<int datasize
>
2520 ALWAYS_INLINE
void str(FPRegisterID rt
, RegisterID rn
, PostIndex simm
)
2522 CHECK_FP_MEMOP_DATASIZE();
2523 insn(loadStoreRegisterPostIndex(MEMOPSIZE
, true, datasize
== 128 ? MemOp_STORE_V128
: MemOp_STORE
, simm
, rn
, rt
));
2526 template<int datasize
>
2527 ALWAYS_INLINE
void str(FPRegisterID rt
, RegisterID rn
, PreIndex simm
)
2529 CHECK_FP_MEMOP_DATASIZE();
2530 insn(loadStoreRegisterPreIndex(MEMOPSIZE
, true, datasize
== 128 ? MemOp_STORE_V128
: MemOp_STORE
, simm
, rn
, rt
));
2533 template<int datasize
>
2534 ALWAYS_INLINE
void stur(FPRegisterID rt
, RegisterID rn
, int simm
)
2537 insn(loadStoreRegisterUnscaledImmediate(MEMOPSIZE
, true, datasize
== 128 ? MemOp_STORE_V128
: MemOp_STORE
, simm
, rn
, rt
));
2540 template<int dstsize
, int srcsize
>
2541 ALWAYS_INLINE
void ucvtf(FPRegisterID vd
, RegisterID rn
)
2543 CHECK_DATASIZE_OF(dstsize
);
2544 CHECK_DATASIZE_OF(srcsize
);
2545 insn(floatingPointIntegerConversions(DATASIZE_OF(srcsize
), DATASIZE_OF(dstsize
), FPIntConvOp_UCVTF
, rn
, vd
));
2550 AssemblerLabel
labelIgnoringWatchpoints()
2552 return m_buffer
.label();
2555 AssemblerLabel
labelForWatchpoint()
2557 AssemblerLabel result
= m_buffer
.label();
2558 if (static_cast<int>(result
.m_offset
) != m_indexOfLastWatchpoint
)
2560 m_indexOfLastWatchpoint
= result
.m_offset
;
2561 m_indexOfTailOfLastWatchpoint
= result
.m_offset
+ maxJumpReplacementSize();
2565 AssemblerLabel
label()
2567 AssemblerLabel result
= m_buffer
.label();
2568 while (UNLIKELY(static_cast<int>(result
.m_offset
) < m_indexOfTailOfLastWatchpoint
)) {
2570 result
= m_buffer
.label();
2575 AssemblerLabel
align(int alignment
)
2577 ASSERT(!(alignment
& 3));
2578 while (!m_buffer
.isAligned(alignment
))
2583 static void* getRelocatedAddress(void* code
, AssemblerLabel label
)
2585 ASSERT(label
.isSet());
2586 return reinterpret_cast<void*>(reinterpret_cast<ptrdiff_t>(code
) + label
.m_offset
);
2589 static int getDifferenceBetweenLabels(AssemblerLabel a
, AssemblerLabel b
)
2591 return b
.m_offset
- a
.m_offset
;
2594 void* unlinkedCode() { return m_buffer
.data(); }
2595 size_t codeSize() const { return m_buffer
.codeSize(); }
2597 static unsigned getCallReturnOffset(AssemblerLabel call
)
2599 ASSERT(call
.isSet());
2600 return call
.m_offset
;
2603 // Linking & patching:
2605 // 'link' and 'patch' methods are for use on unprotected code - such as the code
2606 // within the AssemblerBuffer, and code being patched by the patch buffer. Once
2607 // code has been finalized it is (platform support permitting) within a non-
2608 // writable region of memory; to modify the code in an execute-only execuable
2609 // pool the 'repatch' and 'relink' methods should be used.
2611 void linkJump(AssemblerLabel from
, AssemblerLabel to
, JumpType type
, Condition condition
)
2614 ASSERT(from
.isSet());
2615 m_jumpsToLink
.append(LinkRecord(from
.m_offset
, to
.m_offset
, type
, condition
));
2618 void linkJump(AssemblerLabel from
, AssemblerLabel to
, JumpType type
, Condition condition
, bool is64Bit
, RegisterID compareRegister
)
2621 ASSERT(from
.isSet());
2622 m_jumpsToLink
.append(LinkRecord(from
.m_offset
, to
.m_offset
, type
, condition
, is64Bit
, compareRegister
));
2625 void linkJump(AssemblerLabel from
, AssemblerLabel to
, JumpType type
, Condition condition
, unsigned bitNumber
, RegisterID compareRegister
)
2628 ASSERT(from
.isSet());
2629 m_jumpsToLink
.append(LinkRecord(from
.m_offset
, to
.m_offset
, type
, condition
, bitNumber
, compareRegister
));
2632 void linkJump(AssemblerLabel from
, AssemblerLabel to
)
2634 ASSERT(from
.isSet());
2636 relinkJumpOrCall
<false>(addressOf(from
), addressOf(to
));
2639 static void linkJump(void* code
, AssemblerLabel from
, void* to
)
2641 ASSERT(from
.isSet());
2642 relinkJumpOrCall
<false>(addressOf(code
, from
), to
);
2645 static void linkCall(void* code
, AssemblerLabel from
, void* to
)
2647 ASSERT(from
.isSet());
2648 linkJumpOrCall
<true>(addressOf(code
, from
) - 1, to
);
2651 static void linkPointer(void* code
, AssemblerLabel where
, void* valuePtr
)
2653 linkPointer(addressOf(code
, where
), valuePtr
);
2656 static void replaceWithJump(void* where
, void* to
)
2658 intptr_t offset
= (reinterpret_cast<intptr_t>(to
) - reinterpret_cast<intptr_t>(where
)) >> 2;
2659 ASSERT(static_cast<int>(offset
) == offset
);
2660 *static_cast<int*>(where
) = unconditionalBranchImmediate(false, static_cast<int>(offset
));
2661 cacheFlush(where
, sizeof(int));
2664 static ptrdiff_t maxJumpReplacementSize()
2669 static void replaceWithLoad(void* where
)
2678 if (disassembleAddSubtractImmediate(where
, sf
, op
, S
, shift
, imm12
, rn
, rd
)) {
2679 ASSERT(sf
== Datasize_64
);
2680 ASSERT(op
== AddOp_ADD
);
2683 ASSERT(!(imm12
& ~0xff8));
2684 *static_cast<int*>(where
) = loadStoreRegisterUnsignedImmediate(MemOpSize_64
, false, MemOp_LOAD
, encodePositiveImmediate
<64>(imm12
), rn
, rd
);
2685 cacheFlush(where
, sizeof(int));
2687 #if !ASSERT_DISABLED
2695 ASSERT(disassembleLoadStoreRegisterUnsignedImmediate(where
, size
, V
, opc
, imm12
, rn
, rt
));
2696 ASSERT(size
== MemOpSize_64
);
2698 ASSERT(opc
== MemOp_LOAD
);
2699 ASSERT(!(imm12
& ~0x1ff));
2704 static void replaceWithAddressComputation(void* where
)
2712 if (disassembleLoadStoreRegisterUnsignedImmediate(where
, size
, V
, opc
, imm12
, rn
, rt
)) {
2713 ASSERT(size
== MemOpSize_64
);
2715 ASSERT(opc
== MemOp_LOAD
);
2716 ASSERT(!(imm12
& ~0x1ff));
2717 *static_cast<int*>(where
) = addSubtractImmediate(Datasize_64
, AddOp_ADD
, DontSetFlags
, 0, imm12
* sizeof(void*), rn
, rt
);
2718 cacheFlush(where
, sizeof(int));
2720 #if !ASSERT_DISABLED
2729 ASSERT(disassembleAddSubtractImmediate(where
, sf
, op
, S
, shift
, imm12
, rn
, rd
));
2730 ASSERT(sf
== Datasize_64
);
2731 ASSERT(op
== AddOp_ADD
);
2734 ASSERT(!(imm12
& ~0xff8));
2739 static void repatchPointer(void* where
, void* valuePtr
)
2741 linkPointer(static_cast<int*>(where
), valuePtr
, true);
2744 static void setPointer(int* address
, void* valuePtr
, RegisterID rd
, bool flush
)
2746 uintptr_t value
= reinterpret_cast<uintptr_t>(valuePtr
);
2747 address
[0] = moveWideImediate(Datasize_64
, MoveWideOp_Z
, 0, getHalfword(value
, 0), rd
);
2748 address
[1] = moveWideImediate(Datasize_64
, MoveWideOp_K
, 1, getHalfword(value
, 1), rd
);
2749 address
[2] = moveWideImediate(Datasize_64
, MoveWideOp_K
, 2, getHalfword(value
, 2), rd
);
2752 cacheFlush(address
, sizeof(int) * 3);
2755 static void repatchInt32(void* where
, int32_t value
)
2757 int* address
= static_cast<int*>(where
);
2764 bool expected
= disassembleMoveWideImediate(address
, sf
, opc
, hw
, imm16
, rd
);
2765 ASSERT_UNUSED(expected
, expected
&& !sf
&& (opc
== MoveWideOp_Z
|| opc
== MoveWideOp_N
) && !hw
);
2766 ASSERT(checkMovk
<Datasize_32
>(address
[1], 1, rd
));
2769 address
[0] = moveWideImediate(Datasize_32
, MoveWideOp_Z
, 0, getHalfword(value
, 0), rd
);
2770 address
[1] = moveWideImediate(Datasize_32
, MoveWideOp_K
, 1, getHalfword(value
, 1), rd
);
2772 address
[0] = moveWideImediate(Datasize_32
, MoveWideOp_N
, 0, ~getHalfword(value
, 0), rd
);
2773 address
[1] = moveWideImediate(Datasize_32
, MoveWideOp_K
, 1, getHalfword(value
, 1), rd
);
2776 cacheFlush(where
, sizeof(int) * 2);
2779 static void* readPointer(void* where
)
2781 int* address
= static_cast<int*>(where
);
2787 RegisterID rdFirst
, rd
;
2789 bool expected
= disassembleMoveWideImediate(address
, sf
, opc
, hw
, imm16
, rdFirst
);
2790 ASSERT_UNUSED(expected
, expected
&& sf
&& opc
== MoveWideOp_Z
&& !hw
);
2791 uintptr_t result
= imm16
;
2793 expected
= disassembleMoveWideImediate(address
+ 1, sf
, opc
, hw
, imm16
, rd
);
2794 ASSERT_UNUSED(expected
, expected
&& sf
&& opc
== MoveWideOp_K
&& hw
== 1 && rd
== rdFirst
);
2795 result
|= static_cast<uintptr_t>(imm16
) << 16;
2797 expected
= disassembleMoveWideImediate(address
+ 2, sf
, opc
, hw
, imm16
, rd
);
2798 ASSERT_UNUSED(expected
, expected
&& sf
&& opc
== MoveWideOp_K
&& hw
== 2 && rd
== rdFirst
);
2799 result
|= static_cast<uintptr_t>(imm16
) << 32;
2801 return reinterpret_cast<void*>(result
);
2804 static void* readCallTarget(void* from
)
2806 return readPointer(reinterpret_cast<int*>(from
) - 4);
2809 static void relinkJump(void* from
, void* to
)
2811 relinkJumpOrCall
<false>(reinterpret_cast<int*>(from
), to
);
2812 cacheFlush(from
, sizeof(int));
2815 static void relinkCall(void* from
, void* to
)
2817 relinkJumpOrCall
<true>(reinterpret_cast<int*>(from
) - 1, to
);
2818 cacheFlush(reinterpret_cast<int*>(from
) - 1, sizeof(int));
2821 static void repatchCompact(void* where
, int32_t value
)
2823 ASSERT(!(value
& ~0x3ff8));
2831 bool expected
= disassembleLoadStoreRegisterUnsignedImmediate(where
, size
, V
, opc
, imm12
, rn
, rt
);
2832 ASSERT_UNUSED(expected
, expected
&& size
>= MemOpSize_32
&& !V
&& opc
== MemOp_LOAD
); // expect 32/64 bit load to GPR.
2834 if (size
== MemOpSize_32
)
2835 imm12
= encodePositiveImmediate
<32>(value
);
2837 imm12
= encodePositiveImmediate
<64>(value
);
2838 *static_cast<int*>(where
) = loadStoreRegisterUnsignedImmediate(size
, V
, opc
, imm12
, rn
, rt
);
2840 cacheFlush(where
, sizeof(int));
2843 unsigned debugOffset() { return m_buffer
.debugOffset(); }
2845 #if OS(LINUX) && COMPILER(GCC)
2846 static inline void linuxPageFlush(uintptr_t begin
, uintptr_t end
)
2848 __builtin___clear_cache(reinterpret_cast<void*>(begin
), reinterpret_cast<void*>(end
));
2852 static void cacheFlush(void* code
, size_t size
)
2855 sys_cache_control(kCacheFunctionPrepareForExecution
, code
, size
);
2857 size_t page
= pageSize();
2858 uintptr_t current
= reinterpret_cast<uintptr_t>(code
);
2859 uintptr_t end
= current
+ size
;
2860 uintptr_t firstPageEnd
= (current
& ~(page
- 1)) + page
;
2862 if (end
<= firstPageEnd
) {
2863 linuxPageFlush(current
, end
);
2867 linuxPageFlush(current
, firstPageEnd
);
2869 for (current
= firstPageEnd
; current
+ page
< end
; current
+= page
)
2870 linuxPageFlush(current
, current
+ page
);
2872 linuxPageFlush(current
, end
);
2874 #error "The cacheFlush support is missing on this platform."
2878 // Assembler admin methods:
2880 static int jumpSizeDelta(JumpType jumpType
, JumpLinkType jumpLinkType
) { return JUMP_ENUM_SIZE(jumpType
) - JUMP_ENUM_SIZE(jumpLinkType
); }
2882 static ALWAYS_INLINE
bool linkRecordSourceComparator(const LinkRecord
& a
, const LinkRecord
& b
)
2884 return a
.from() < b
.from();
2887 static bool canCompact(JumpType jumpType
)
2889 // Fixed jumps cannot be compacted
2890 return (jumpType
== JumpNoCondition
) || (jumpType
== JumpCondition
) || (jumpType
== JumpCompareAndBranch
) || (jumpType
== JumpTestBit
);
2893 static JumpLinkType
computeJumpType(JumpType jumpType
, const uint8_t* from
, const uint8_t* to
)
2898 case JumpNoConditionFixedSize
:
2899 return LinkJumpNoCondition
;
2900 case JumpConditionFixedSize
:
2901 return LinkJumpCondition
;
2902 case JumpCompareAndBranchFixedSize
:
2903 return LinkJumpCompareAndBranch
;
2904 case JumpTestBitFixedSize
:
2905 return LinkJumpTestBit
;
2906 case JumpNoCondition
:
2907 return LinkJumpNoCondition
;
2908 case JumpCondition
: {
2909 ASSERT(!(reinterpret_cast<intptr_t>(from
) & 0x3));
2910 ASSERT(!(reinterpret_cast<intptr_t>(to
) & 0x3));
2911 intptr_t relative
= reinterpret_cast<intptr_t>(to
) - (reinterpret_cast<intptr_t>(from
));
2913 if (((relative
<< 43) >> 43) == relative
)
2914 return LinkJumpConditionDirect
;
2916 return LinkJumpCondition
;
2918 case JumpCompareAndBranch
: {
2919 ASSERT(!(reinterpret_cast<intptr_t>(from
) & 0x3));
2920 ASSERT(!(reinterpret_cast<intptr_t>(to
) & 0x3));
2921 intptr_t relative
= reinterpret_cast<intptr_t>(to
) - (reinterpret_cast<intptr_t>(from
));
2923 if (((relative
<< 43) >> 43) == relative
)
2924 return LinkJumpCompareAndBranchDirect
;
2926 return LinkJumpCompareAndBranch
;
2929 ASSERT(!(reinterpret_cast<intptr_t>(from
) & 0x3));
2930 ASSERT(!(reinterpret_cast<intptr_t>(to
) & 0x3));
2931 intptr_t relative
= reinterpret_cast<intptr_t>(to
) - (reinterpret_cast<intptr_t>(from
));
2933 if (((relative
<< 50) >> 50) == relative
)
2934 return LinkJumpTestBitDirect
;
2936 return LinkJumpTestBit
;
2939 ASSERT_NOT_REACHED();
2942 return LinkJumpNoCondition
;
2945 static JumpLinkType
computeJumpType(LinkRecord
& record
, const uint8_t* from
, const uint8_t* to
)
2947 JumpLinkType linkType
= computeJumpType(record
.type(), from
, to
);
2948 record
.setLinkType(linkType
);
2952 Vector
<LinkRecord
, 0, UnsafeVectorOverflow
>& jumpsToLink()
2954 std::sort(m_jumpsToLink
.begin(), m_jumpsToLink
.end(), linkRecordSourceComparator
);
2955 return m_jumpsToLink
;
2958 static void ALWAYS_INLINE
link(LinkRecord
& record
, uint8_t* from
, uint8_t* to
)
2960 switch (record
.linkType()) {
2961 case LinkJumpNoCondition
:
2962 linkJumpOrCall
<false>(reinterpret_cast<int*>(from
), to
);
2964 case LinkJumpConditionDirect
:
2965 linkConditionalBranch
<true>(record
.condition(), reinterpret_cast<int*>(from
), to
);
2967 case LinkJumpCondition
:
2968 linkConditionalBranch
<false>(record
.condition(), reinterpret_cast<int*>(from
) - 1, to
);
2970 case LinkJumpCompareAndBranchDirect
:
2971 linkCompareAndBranch
<true>(record
.condition(), record
.is64Bit(), record
.compareRegister(), reinterpret_cast<int*>(from
), to
);
2973 case LinkJumpCompareAndBranch
:
2974 linkCompareAndBranch
<false>(record
.condition(), record
.is64Bit(), record
.compareRegister(), reinterpret_cast<int*>(from
) - 1, to
);
2976 case LinkJumpTestBitDirect
:
2977 linkTestAndBranch
<true>(record
.condition(), record
.bitNumber(), record
.compareRegister(), reinterpret_cast<int*>(from
), to
);
2979 case LinkJumpTestBit
:
2980 linkTestAndBranch
<false>(record
.condition(), record
.bitNumber(), record
.compareRegister(), reinterpret_cast<int*>(from
) - 1, to
);
2983 ASSERT_NOT_REACHED();
2989 template<Datasize size
>
2990 static bool checkMovk(int insn
, int _hw
, RegisterID _rd
)
2997 bool expected
= disassembleMoveWideImediate(&insn
, sf
, opc
, hw
, imm16
, rd
);
3001 && opc
== MoveWideOp_K
3006 static void linkPointer(int* address
, void* valuePtr
, bool flush
= false)
3013 bool expected
= disassembleMoveWideImediate(address
, sf
, opc
, hw
, imm16
, rd
);
3014 ASSERT_UNUSED(expected
, expected
&& sf
&& opc
== MoveWideOp_Z
&& !hw
);
3015 ASSERT(checkMovk
<Datasize_64
>(address
[1], 1, rd
));
3016 ASSERT(checkMovk
<Datasize_64
>(address
[2], 2, rd
));
3018 setPointer(address
, valuePtr
, rd
, flush
);
3021 template<bool isCall
>
3022 static void linkJumpOrCall(int* from
, void* to
)
3026 bool isUnconditionalBranchImmediateOrNop
= disassembleUnconditionalBranchImmediate(from
, link
, imm26
) || disassembleNop(from
);
3028 ASSERT_UNUSED(isUnconditionalBranchImmediateOrNop
, isUnconditionalBranchImmediateOrNop
);
3029 ASSERT_UNUSED(isCall
, (link
== isCall
) || disassembleNop(from
));
3030 ASSERT(!(reinterpret_cast<intptr_t>(from
) & 3));
3031 ASSERT(!(reinterpret_cast<intptr_t>(to
) & 3));
3032 intptr_t offset
= (reinterpret_cast<intptr_t>(to
) - reinterpret_cast<intptr_t>(from
)) >> 2;
3033 ASSERT(static_cast<int>(offset
) == offset
);
3035 *from
= unconditionalBranchImmediate(isCall
, static_cast<int>(offset
));
3038 template<bool isDirect
>
3039 static void linkCompareAndBranch(Condition condition
, bool is64Bit
, RegisterID rt
, int* from
, void* to
)
3041 ASSERT(!(reinterpret_cast<intptr_t>(from
) & 3));
3042 ASSERT(!(reinterpret_cast<intptr_t>(to
) & 3));
3043 intptr_t offset
= (reinterpret_cast<intptr_t>(to
) - reinterpret_cast<intptr_t>(from
)) >> 2;
3044 ASSERT(((offset
<< 38) >> 38) == offset
);
3046 bool useDirect
= ((offset
<< 45) >> 45) == offset
; // Fits in 19 bits
3047 ASSERT(!isDirect
|| useDirect
);
3049 if (useDirect
|| isDirect
) {
3050 *from
= compareAndBranchImmediate(is64Bit
? Datasize_64
: Datasize_32
, condition
== ConditionNE
, static_cast<int>(offset
), rt
);
3052 *(from
+ 1) = nopPseudo();
3054 *from
= compareAndBranchImmediate(is64Bit
? Datasize_64
: Datasize_32
, invert(condition
) == ConditionNE
, 2, rt
);
3055 linkJumpOrCall
<false>(from
+ 1, to
);
3059 template<bool isDirect
>
3060 static void linkConditionalBranch(Condition condition
, int* from
, void* to
)
3062 ASSERT(!(reinterpret_cast<intptr_t>(from
) & 3));
3063 ASSERT(!(reinterpret_cast<intptr_t>(to
) & 3));
3064 intptr_t offset
= (reinterpret_cast<intptr_t>(to
) - reinterpret_cast<intptr_t>(from
)) >> 2;
3065 ASSERT(((offset
<< 38) >> 38) == offset
);
3067 bool useDirect
= ((offset
<< 45) >> 45) == offset
; // Fits in 19 bits
3068 ASSERT(!isDirect
|| useDirect
);
3070 if (useDirect
|| isDirect
) {
3071 *from
= conditionalBranchImmediate(static_cast<int>(offset
), condition
);
3073 *(from
+ 1) = nopPseudo();
3075 *from
= conditionalBranchImmediate(2, invert(condition
));
3076 linkJumpOrCall
<false>(from
+ 1, to
);
3080 template<bool isDirect
>
3081 static void linkTestAndBranch(Condition condition
, unsigned bitNumber
, RegisterID rt
, int* from
, void* to
)
3083 ASSERT(!(reinterpret_cast<intptr_t>(from
) & 3));
3084 ASSERT(!(reinterpret_cast<intptr_t>(to
) & 3));
3085 intptr_t offset
= (reinterpret_cast<intptr_t>(to
) - reinterpret_cast<intptr_t>(from
)) >> 2;
3086 ASSERT(static_cast<int>(offset
) == offset
);
3087 ASSERT(((offset
<< 38) >> 38) == offset
);
3089 bool useDirect
= ((offset
<< 50) >> 50) == offset
; // Fits in 14 bits
3090 ASSERT(!isDirect
|| useDirect
);
3092 if (useDirect
|| isDirect
) {
3093 *from
= testAndBranchImmediate(condition
== ConditionNE
, static_cast<int>(bitNumber
), static_cast<int>(offset
), rt
);
3095 *(from
+ 1) = nopPseudo();
3097 *from
= testAndBranchImmediate(invert(condition
) == ConditionNE
, static_cast<int>(bitNumber
), 2, rt
);
3098 linkJumpOrCall
<false>(from
+ 1, to
);
3102 template<bool isCall
>
3103 static void relinkJumpOrCall(int* from
, void* to
)
3105 if (!isCall
&& disassembleNop(from
)) {
3108 Condition condition
;
3109 bool isConditionalBranchImmediate
= disassembleConditionalBranchImmediate(from
- 1, op01
, imm19
, condition
);
3111 if (isConditionalBranchImmediate
) {
3112 ASSERT_UNUSED(op01
, !op01
);
3113 ASSERT_UNUSED(isCall
, !isCall
);
3116 condition
= invert(condition
);
3118 linkConditionalBranch
<false>(condition
, from
- 1, to
);
3125 bool isCompareAndBranchImmediate
= disassembleCompareAndBranchImmediate(from
- 1, opSize
, op
, imm19
, rt
);
3127 if (isCompareAndBranchImmediate
) {
3131 linkCompareAndBranch
<false>(op
? ConditionNE
: ConditionEQ
, opSize
== Datasize_64
, rt
, from
- 1, to
);
3137 bool isTestAndBranchImmediate
= disassembleTestAndBranchImmediate(from
- 1, op
, bitNumber
, imm14
, rt
);
3139 if (isTestAndBranchImmediate
) {
3143 linkTestAndBranch
<false>(op
? ConditionNE
: ConditionEQ
, bitNumber
, rt
, from
- 1, to
);
3148 linkJumpOrCall
<isCall
>(from
, to
);
3151 static int* addressOf(void* code
, AssemblerLabel label
)
3153 return reinterpret_cast<int*>(static_cast<char*>(code
) + label
.m_offset
);
3156 int* addressOf(AssemblerLabel label
)
3158 return addressOf(m_buffer
.data(), label
);
3161 static RegisterID
disassembleXOrSp(int reg
) { return reg
== 31 ? ARM64Registers::sp
: static_cast<RegisterID
>(reg
); }
3162 static RegisterID
disassembleXOrZr(int reg
) { return reg
== 31 ? ARM64Registers::zr
: static_cast<RegisterID
>(reg
); }
3163 static RegisterID
disassembleXOrZrOrSp(bool useZr
, int reg
) { return reg
== 31 ? (useZr
? ARM64Registers::zr
: ARM64Registers::sp
) : static_cast<RegisterID
>(reg
); }
3165 static bool disassembleAddSubtractImmediate(void* address
, Datasize
& sf
, AddOp
& op
, SetFlags
& S
, int& shift
, int& imm12
, RegisterID
& rn
, RegisterID
& rd
)
3167 int insn
= *static_cast<int*>(address
);
3168 sf
= static_cast<Datasize
>((insn
>> 31) & 1);
3169 op
= static_cast<AddOp
>((insn
>> 30) & 1);
3170 S
= static_cast<SetFlags
>((insn
>> 29) & 1);
3171 shift
= (insn
>> 22) & 3;
3172 imm12
= (insn
>> 10) & 0x3ff;
3173 rn
= disassembleXOrSp((insn
>> 5) & 0x1f);
3174 rd
= disassembleXOrZrOrSp(S
, insn
& 0x1f);
3175 return (insn
& 0x1f000000) == 0x11000000;
3178 static bool disassembleLoadStoreRegisterUnsignedImmediate(void* address
, MemOpSize
& size
, bool& V
, MemOp
& opc
, int& imm12
, RegisterID
& rn
, RegisterID
& rt
)
3180 int insn
= *static_cast<int*>(address
);
3181 size
= static_cast<MemOpSize
>((insn
>> 30) & 3);
3182 V
= (insn
>> 26) & 1;
3183 opc
= static_cast<MemOp
>((insn
>> 22) & 3);
3184 imm12
= (insn
>> 10) & 0xfff;
3185 rn
= disassembleXOrSp((insn
>> 5) & 0x1f);
3186 rt
= disassembleXOrZr(insn
& 0x1f);
3187 return (insn
& 0x3b000000) == 0x39000000;
3190 static bool disassembleMoveWideImediate(void* address
, Datasize
& sf
, MoveWideOp
& opc
, int& hw
, uint16_t& imm16
, RegisterID
& rd
)
3192 int insn
= *static_cast<int*>(address
);
3193 sf
= static_cast<Datasize
>((insn
>> 31) & 1);
3194 opc
= static_cast<MoveWideOp
>((insn
>> 29) & 3);
3195 hw
= (insn
>> 21) & 3;
3197 rd
= disassembleXOrZr(insn
& 0x1f);
3198 return (insn
& 0x1f800000) == 0x12800000;
3201 static bool disassembleNop(void* address
)
3203 unsigned insn
= *static_cast<unsigned*>(address
);
3204 return insn
== 0xd503201f;
3207 static bool disassembleCompareAndBranchImmediate(void* address
, Datasize
& sf
, bool& op
, int& imm19
, RegisterID
& rt
)
3209 int insn
= *static_cast<int*>(address
);
3210 sf
= static_cast<Datasize
>((insn
>> 31) & 1);
3211 op
= (insn
>> 24) & 0x1;
3212 imm19
= (insn
<< 8) >> 13;
3213 rt
= static_cast<RegisterID
>(insn
& 0x1f);
3214 return (insn
& 0x7e000000) == 0x34000000;
3218 static bool disassembleConditionalBranchImmediate(void* address
, unsigned& op01
, int& imm19
, Condition
&condition
)
3220 int insn
= *static_cast<int*>(address
);
3221 op01
= ((insn
>> 23) & 0x2) | ((insn
>> 4) & 0x1);
3222 imm19
= (insn
<< 8) >> 13;
3223 condition
= static_cast<Condition
>(insn
& 0xf);
3224 return (insn
& 0xfe000000) == 0x54000000;
3227 static bool disassembleTestAndBranchImmediate(void* address
, bool& op
, unsigned& bitNumber
, int& imm14
, RegisterID
& rt
)
3229 int insn
= *static_cast<int*>(address
);
3230 op
= (insn
>> 24) & 0x1;
3231 imm14
= (insn
<< 13) >> 18;
3232 bitNumber
= static_cast<unsigned>((((insn
>> 26) & 0x20)) | ((insn
> 19) & 0x1f));
3233 rt
= static_cast<RegisterID
>(insn
& 0x1f);
3234 return (insn
& 0x7e000000) == 0x36000000;
3238 static bool disassembleUnconditionalBranchImmediate(void* address
, bool& op
, int& imm26
)
3240 int insn
= *static_cast<int*>(address
);
3241 op
= (insn
>> 31) & 1;
3242 imm26
= (insn
<< 6) >> 6;
3243 return (insn
& 0x7c000000) == 0x14000000;
3246 static int xOrSp(RegisterID reg
) { ASSERT(!isZr(reg
)); return reg
; }
3247 static int xOrZr(RegisterID reg
) { ASSERT(!isSp(reg
)); return reg
& 31; }
3248 static FPRegisterID
xOrZrAsFPR(RegisterID reg
) { return static_cast<FPRegisterID
>(xOrZr(reg
)); }
3249 static int xOrZrOrSp(bool useZr
, RegisterID reg
) { return useZr
? xOrZr(reg
) : xOrSp(reg
); }
3251 ALWAYS_INLINE
void insn(int instruction
)
3253 m_buffer
.putInt(instruction
);
3256 ALWAYS_INLINE
static int addSubtractExtendedRegister(Datasize sf
, AddOp op
, SetFlags S
, RegisterID rm
, ExtendType option
, int imm3
, RegisterID rn
, RegisterID rd
)
3259 // The only allocated values for opt is 0.
3261 return (0x0b200000 | sf
<< 31 | op
<< 30 | S
<< 29 | opt
<< 22 | xOrZr(rm
) << 16 | option
<< 13 | (imm3
& 0x7) << 10 | xOrSp(rn
) << 5 | xOrZrOrSp(S
, rd
));
3264 ALWAYS_INLINE
static int addSubtractImmediate(Datasize sf
, AddOp op
, SetFlags S
, int shift
, int imm12
, RegisterID rn
, RegisterID rd
)
3267 ASSERT(isUInt12(imm12
));
3268 return (0x11000000 | sf
<< 31 | op
<< 30 | S
<< 29 | shift
<< 22 | (imm12
& 0xfff) << 10 | xOrSp(rn
) << 5 | xOrZrOrSp(S
, rd
));
3271 ALWAYS_INLINE
static int addSubtractShiftedRegister(Datasize sf
, AddOp op
, SetFlags S
, ShiftType shift
, RegisterID rm
, int imm6
, RegisterID rn
, RegisterID rd
)
3274 ASSERT(!(imm6
& (sf
? ~63 : ~31)));
3275 return (0x0b000000 | sf
<< 31 | op
<< 30 | S
<< 29 | shift
<< 22 | xOrZr(rm
) << 16 | (imm6
& 0x3f) << 10 | xOrZr(rn
) << 5 | xOrZr(rd
));
3278 ALWAYS_INLINE
static int addSubtractWithCarry(Datasize sf
, AddOp op
, SetFlags S
, RegisterID rm
, RegisterID rn
, RegisterID rd
)
3280 const int opcode2
= 0;
3281 return (0x1a000000 | sf
<< 31 | op
<< 30 | S
<< 29 | xOrZr(rm
) << 16 | opcode2
<< 10 | xOrZr(rn
) << 5 | xOrZr(rd
));
3284 ALWAYS_INLINE
static int bitfield(Datasize sf
, BitfieldOp opc
, int immr
, int imms
, RegisterID rn
, RegisterID rd
)
3286 ASSERT(immr
< (sf
? 64 : 32));
3287 ASSERT(imms
< (sf
? 64 : 32));
3289 return (0x13000000 | sf
<< 31 | opc
<< 29 | N
<< 22 | immr
<< 16 | imms
<< 10 | xOrZr(rn
) << 5 | xOrZr(rd
));
3292 // 'op' means negate
3293 ALWAYS_INLINE
static int compareAndBranchImmediate(Datasize sf
, bool op
, int32_t imm19
, RegisterID rt
)
3295 ASSERT(imm19
== (imm19
<< 13) >> 13);
3296 return (0x34000000 | sf
<< 31 | op
<< 24 | (imm19
& 0x7ffff) << 5 | xOrZr(rt
));
3299 ALWAYS_INLINE
static int conditionalBranchImmediate(int32_t imm19
, Condition cond
)
3301 ASSERT(imm19
== (imm19
<< 13) >> 13);
3302 ASSERT(!(cond
& ~15));
3303 // The only allocated values for o1 & o0 are 0.
3306 return (0x54000000 | o1
<< 24 | (imm19
& 0x7ffff) << 5 | o0
<< 4 | cond
);
3309 ALWAYS_INLINE
static int conditionalCompareImmediate(Datasize sf
, AddOp op
, int imm5
, Condition cond
, RegisterID rn
, int nzcv
)
3311 ASSERT(!(imm5
& ~0x1f));
3316 return (0x1a400800 | sf
<< 31 | op
<< 30 | S
<< 29 | (imm5
& 0x1f) << 16 | cond
<< 12 | o2
<< 10 | xOrZr(rn
) << 5 | o3
<< 4 | nzcv
);
3319 ALWAYS_INLINE
static int conditionalCompareRegister(Datasize sf
, AddOp op
, RegisterID rm
, Condition cond
, RegisterID rn
, int nzcv
)
3325 return (0x1a400000 | sf
<< 31 | op
<< 30 | S
<< 29 | xOrZr(rm
) << 16 | cond
<< 12 | o2
<< 10 | xOrZr(rn
) << 5 | o3
<< 4 | nzcv
);
3328 // 'op' means negate
3329 // 'op2' means increment
3330 ALWAYS_INLINE
static int conditionalSelect(Datasize sf
, bool op
, RegisterID rm
, Condition cond
, bool op2
, RegisterID rn
, RegisterID rd
)
3333 return (0x1a800000 | sf
<< 31 | op
<< 30 | S
<< 29 | xOrZr(rm
) << 16 | cond
<< 12 | op2
<< 10 | xOrZr(rn
) << 5 | xOrZr(rd
));
3336 ALWAYS_INLINE
static int dataProcessing1Source(Datasize sf
, DataOp1Source opcode
, RegisterID rn
, RegisterID rd
)
3339 const int opcode2
= 0;
3340 return (0x5ac00000 | sf
<< 31 | S
<< 29 | opcode2
<< 16 | opcode
<< 10 | xOrZr(rn
) << 5 | xOrZr(rd
));
3343 ALWAYS_INLINE
static int dataProcessing2Source(Datasize sf
, RegisterID rm
, DataOp2Source opcode
, RegisterID rn
, RegisterID rd
)
3346 return (0x1ac00000 | sf
<< 31 | S
<< 29 | xOrZr(rm
) << 16 | opcode
<< 10 | xOrZr(rn
) << 5 | xOrZr(rd
));
3349 ALWAYS_INLINE
static int dataProcessing3Source(Datasize sf
, DataOp3Source opcode
, RegisterID rm
, RegisterID ra
, RegisterID rn
, RegisterID rd
)
3351 int op54
= opcode
>> 4;
3352 int op31
= (opcode
>> 1) & 7;
3353 int op0
= opcode
& 1;
3354 return (0x1b000000 | sf
<< 31 | op54
<< 29 | op31
<< 21 | xOrZr(rm
) << 16 | op0
<< 15 | xOrZr(ra
) << 10 | xOrZr(rn
) << 5 | xOrZr(rd
));
3357 ALWAYS_INLINE
static int excepnGeneration(ExcepnOp opc
, uint16_t imm16
, int LL
)
3359 ASSERT((opc
== ExcepnOp_BREAKPOINT
|| opc
== ExcepnOp_HALT
) ? !LL
: (LL
&& (LL
< 4)));
3361 return (0xd4000000 | opc
<< 21 | imm16
<< 5 | op2
<< 2 | LL
);
3364 ALWAYS_INLINE
static int extract(Datasize sf
, RegisterID rm
, int imms
, RegisterID rn
, RegisterID rd
)
3366 ASSERT(imms
< (sf
? 64 : 32));
3370 return (0x13800000 | sf
<< 31 | op21
<< 29 | N
<< 22 | o0
<< 21 | xOrZr(rm
) << 16 | imms
<< 10 | xOrZr(rn
) << 5 | xOrZr(rd
));
3373 ALWAYS_INLINE
static int floatingPointCompare(Datasize type
, FPRegisterID rm
, FPRegisterID rn
, FPCmpOp opcode2
)
3378 return (0x1e202000 | M
<< 31 | S
<< 29 | type
<< 22 | rm
<< 16 | op
<< 14 | rn
<< 5 | opcode2
);
3381 ALWAYS_INLINE
static int floatingPointConditionalCompare(Datasize type
, FPRegisterID rm
, Condition cond
, FPRegisterID rn
, FPCondCmpOp op
, int nzcv
)
3386 return (0x1e200400 | M
<< 31 | S
<< 29 | type
<< 22 | rm
<< 16 | cond
<< 12 | rn
<< 5 | op
<< 4 | nzcv
);
3389 ALWAYS_INLINE
static int floatingPointConditionalSelect(Datasize type
, FPRegisterID rm
, Condition cond
, FPRegisterID rn
, FPRegisterID rd
)
3393 return (0x1e200c00 | M
<< 31 | S
<< 29 | type
<< 22 | rm
<< 16 | cond
<< 12 | rn
<< 5 | rd
);
3396 ALWAYS_INLINE
static int floatingPointImmediate(Datasize type
, int imm8
, FPRegisterID rd
)
3401 return (0x1e201000 | M
<< 31 | S
<< 29 | type
<< 22 | (imm8
& 0xff) << 13 | imm5
<< 5 | rd
);
3404 ALWAYS_INLINE
static int floatingPointIntegerConversions(Datasize sf
, Datasize type
, FPIntConvOp rmodeOpcode
, FPRegisterID rn
, FPRegisterID rd
)
3407 return (0x1e200000 | sf
<< 31 | S
<< 29 | type
<< 22 | rmodeOpcode
<< 16 | rn
<< 5 | rd
);
3410 ALWAYS_INLINE
static int floatingPointIntegerConversions(Datasize sf
, Datasize type
, FPIntConvOp rmodeOpcode
, FPRegisterID rn
, RegisterID rd
)
3412 return floatingPointIntegerConversions(sf
, type
, rmodeOpcode
, rn
, xOrZrAsFPR(rd
));
3415 ALWAYS_INLINE
static int floatingPointIntegerConversions(Datasize sf
, Datasize type
, FPIntConvOp rmodeOpcode
, RegisterID rn
, FPRegisterID rd
)
3417 return floatingPointIntegerConversions(sf
, type
, rmodeOpcode
, xOrZrAsFPR(rn
), rd
);
3420 ALWAYS_INLINE
static int floatingPointDataProcessing1Source(Datasize type
, FPDataOp1Source opcode
, FPRegisterID rn
, FPRegisterID rd
)
3424 return (0x1e204000 | M
<< 31 | S
<< 29 | type
<< 22 | opcode
<< 15 | rn
<< 5 | rd
);
3427 ALWAYS_INLINE
static int floatingPointDataProcessing2Source(Datasize type
, FPRegisterID rm
, FPDataOp2Source opcode
, FPRegisterID rn
, FPRegisterID rd
)
3431 return (0x1e200800 | M
<< 31 | S
<< 29 | type
<< 22 | rm
<< 16 | opcode
<< 12 | rn
<< 5 | rd
);
3434 // 'o1' means negate
3435 ALWAYS_INLINE
static int floatingPointDataProcessing3Source(Datasize type
, bool o1
, FPRegisterID rm
, AddOp o2
, FPRegisterID ra
, FPRegisterID rn
, FPRegisterID rd
)
3439 return (0x1f000000 | M
<< 31 | S
<< 29 | type
<< 22 | o1
<< 21 | rm
<< 16 | o2
<< 15 | ra
<< 10 | rn
<< 5 | rd
);
3443 ALWAYS_INLINE
static int loadRegisterLiteral(LdrLiteralOp opc
, bool V
, int imm19
, FPRegisterID rt
)
3445 ASSERT(((imm19
<< 13) >> 13) == imm19
);
3446 return (0x18000000 | opc
<< 30 | V
<< 26 | (imm19
& 0x7ffff) << 5 | rt
);
3449 ALWAYS_INLINE
static int loadRegisterLiteral(LdrLiteralOp opc
, bool V
, int imm19
, RegisterID rt
)
3451 return loadRegisterLiteral(opc
, V
, imm19
, xOrZrAsFPR(rt
));
3455 ALWAYS_INLINE
static int loadStoreRegisterPostIndex(MemOpSize size
, bool V
, MemOp opc
, int imm9
, RegisterID rn
, FPRegisterID rt
)
3457 ASSERT(!(size
&& V
&& (opc
& 2))); // Maximum vector size is 128 bits.
3458 ASSERT(!((size
& 2) && !V
&& (opc
== 3))); // signed 32-bit load must be extending from 8/16 bits.
3459 ASSERT(isInt9(imm9
));
3460 return (0x38000400 | size
<< 30 | V
<< 26 | opc
<< 22 | (imm9
& 0x1ff) << 12 | xOrSp(rn
) << 5 | rt
);
3463 ALWAYS_INLINE
static int loadStoreRegisterPostIndex(MemOpSize size
, bool V
, MemOp opc
, int imm9
, RegisterID rn
, RegisterID rt
)
3465 return loadStoreRegisterPostIndex(size
, V
, opc
, imm9
, rn
, xOrZrAsFPR(rt
));
3469 ALWAYS_INLINE
static int loadStoreRegisterPairPostIndex(MemPairOpSize size
, bool V
, MemOp opc
, int immediate
, RegisterID rn
, FPRegisterID rt
, FPRegisterID rt2
)
3472 ASSERT(opc
== (opc
& 1)); // Only load or store, load signed 64 is handled via size.
3473 ASSERT(V
|| (size
!= MemPairOp_LoadSigned_32
) || (opc
== MemOp_LOAD
)); // There isn't an integer store signed.
3474 unsigned immedShiftAmount
= memPairOffsetShift(V
, size
);
3475 int imm7
= immediate
>> immedShiftAmount
;
3476 ASSERT((imm7
<< immedShiftAmount
) == immediate
&& isInt7(imm7
));
3477 return (0x28800000 | size
<< 30 | V
<< 26 | opc
<< 22 | (imm7
& 0x7f) << 15 | rt2
<< 10 | xOrSp(rn
) << 5 | rt
);
3480 ALWAYS_INLINE
static int loadStoreRegisterPairPostIndex(MemPairOpSize size
, bool V
, MemOp opc
, int immediate
, RegisterID rn
, RegisterID rt
, RegisterID rt2
)
3482 return loadStoreRegisterPairPostIndex(size
, V
, opc
, immediate
, rn
, xOrZrAsFPR(rt
), xOrZrAsFPR(rt2
));
3486 ALWAYS_INLINE
static int loadStoreRegisterPreIndex(MemOpSize size
, bool V
, MemOp opc
, int imm9
, RegisterID rn
, FPRegisterID rt
)
3488 ASSERT(!(size
&& V
&& (opc
& 2))); // Maximum vector size is 128 bits.
3489 ASSERT(!((size
& 2) && !V
&& (opc
== 3))); // signed 32-bit load must be extending from 8/16 bits.
3490 ASSERT(isInt9(imm9
));
3491 return (0x38000c00 | size
<< 30 | V
<< 26 | opc
<< 22 | (imm9
& 0x1ff) << 12 | xOrSp(rn
) << 5 | rt
);
3494 ALWAYS_INLINE
static int loadStoreRegisterPreIndex(MemOpSize size
, bool V
, MemOp opc
, int imm9
, RegisterID rn
, RegisterID rt
)
3496 return loadStoreRegisterPreIndex(size
, V
, opc
, imm9
, rn
, xOrZrAsFPR(rt
));
3500 ALWAYS_INLINE
static int loadStoreRegisterPairPreIndex(MemPairOpSize size
, bool V
, MemOp opc
, int immediate
, RegisterID rn
, FPRegisterID rt
, FPRegisterID rt2
)
3503 ASSERT(opc
== (opc
& 1)); // Only load or store, load signed 64 is handled via size.
3504 ASSERT(V
|| (size
!= MemPairOp_LoadSigned_32
) || (opc
== MemOp_LOAD
)); // There isn't an integer store signed.
3505 unsigned immedShiftAmount
= memPairOffsetShift(V
, size
);
3506 int imm7
= immediate
>> immedShiftAmount
;
3507 ASSERT((imm7
<< immedShiftAmount
) == immediate
&& isInt7(imm7
));
3508 return (0x29800000 | size
<< 30 | V
<< 26 | opc
<< 22 | (imm7
& 0x7f) << 15 | rt2
<< 10 | xOrSp(rn
) << 5 | rt
);
3511 ALWAYS_INLINE
static int loadStoreRegisterPairPreIndex(MemPairOpSize size
, bool V
, MemOp opc
, int immediate
, RegisterID rn
, RegisterID rt
, RegisterID rt2
)
3513 return loadStoreRegisterPairPreIndex(size
, V
, opc
, immediate
, rn
, xOrZrAsFPR(rt
), xOrZrAsFPR(rt2
));
3517 // 'S' means shift rm
3518 ALWAYS_INLINE
static int loadStoreRegisterRegisterOffset(MemOpSize size
, bool V
, MemOp opc
, RegisterID rm
, ExtendType option
, bool S
, RegisterID rn
, FPRegisterID rt
)
3520 ASSERT(!(size
&& V
&& (opc
& 2))); // Maximum vector size is 128 bits.
3521 ASSERT(!((size
& 2) && !V
&& (opc
== 3))); // signed 32-bit load must be extending from 8/16 bits.
3522 ASSERT(option
& 2); // The ExtendType for the address must be 32/64 bit, signed or unsigned - not 8/16bit.
3523 return (0x38200800 | size
<< 30 | V
<< 26 | opc
<< 22 | xOrZr(rm
) << 16 | option
<< 13 | S
<< 12 | xOrSp(rn
) << 5 | rt
);
3526 ALWAYS_INLINE
static int loadStoreRegisterRegisterOffset(MemOpSize size
, bool V
, MemOp opc
, RegisterID rm
, ExtendType option
, bool S
, RegisterID rn
, RegisterID rt
)
3528 return loadStoreRegisterRegisterOffset(size
, V
, opc
, rm
, option
, S
, rn
, xOrZrAsFPR(rt
));
3532 ALWAYS_INLINE
static int loadStoreRegisterUnscaledImmediate(MemOpSize size
, bool V
, MemOp opc
, int imm9
, RegisterID rn
, FPRegisterID rt
)
3534 ASSERT(!(size
&& V
&& (opc
& 2))); // Maximum vector size is 128 bits.
3535 ASSERT(!((size
& 2) && !V
&& (opc
== 3))); // signed 32-bit load must be extending from 8/16 bits.
3536 ASSERT(isInt9(imm9
));
3537 return (0x38000000 | size
<< 30 | V
<< 26 | opc
<< 22 | (imm9
& 0x1ff) << 12 | xOrSp(rn
) << 5 | rt
);
3540 ALWAYS_INLINE
static int loadStoreRegisterUnscaledImmediate(MemOpSize size
, bool V
, MemOp opc
, int imm9
, RegisterID rn
, RegisterID rt
)
3542 ASSERT(isInt9(imm9
));
3543 return loadStoreRegisterUnscaledImmediate(size
, V
, opc
, imm9
, rn
, xOrZrAsFPR(rt
));
3547 ALWAYS_INLINE
static int loadStoreRegisterUnsignedImmediate(MemOpSize size
, bool V
, MemOp opc
, int imm12
, RegisterID rn
, FPRegisterID rt
)
3549 ASSERT(!(size
&& V
&& (opc
& 2))); // Maximum vector size is 128 bits.
3550 ASSERT(!((size
& 2) && !V
&& (opc
== 3))); // signed 32-bit load must be extending from 8/16 bits.
3551 ASSERT(isUInt12(imm12
));
3552 return (0x39000000 | size
<< 30 | V
<< 26 | opc
<< 22 | (imm12
& 0xfff) << 10 | xOrSp(rn
) << 5 | rt
);
3555 ALWAYS_INLINE
static int loadStoreRegisterUnsignedImmediate(MemOpSize size
, bool V
, MemOp opc
, int imm12
, RegisterID rn
, RegisterID rt
)
3557 return loadStoreRegisterUnsignedImmediate(size
, V
, opc
, imm12
, rn
, xOrZrAsFPR(rt
));
3560 ALWAYS_INLINE
static int logicalImmediate(Datasize sf
, LogicalOp opc
, int N_immr_imms
, RegisterID rn
, RegisterID rd
)
3562 ASSERT(!(N_immr_imms
& (sf
? ~0x1fff : ~0xfff)));
3563 return (0x12000000 | sf
<< 31 | opc
<< 29 | N_immr_imms
<< 10 | xOrZr(rn
) << 5 | xOrZrOrSp(opc
== LogicalOp_ANDS
, rd
));
3566 // 'N' means negate rm
3567 ALWAYS_INLINE
static int logicalShiftedRegister(Datasize sf
, LogicalOp opc
, ShiftType shift
, bool N
, RegisterID rm
, int imm6
, RegisterID rn
, RegisterID rd
)
3569 ASSERT(!(imm6
& (sf
? ~63 : ~31)));
3570 return (0x0a000000 | sf
<< 31 | opc
<< 29 | shift
<< 22 | N
<< 21 | xOrZr(rm
) << 16 | (imm6
& 0x3f) << 10 | xOrZr(rn
) << 5 | xOrZr(rd
));
3573 ALWAYS_INLINE
static int moveWideImediate(Datasize sf
, MoveWideOp opc
, int hw
, uint16_t imm16
, RegisterID rd
)
3575 ASSERT(hw
< (sf
? 4 : 2));
3576 return (0x12800000 | sf
<< 31 | opc
<< 29 | hw
<< 21 | (int)imm16
<< 5 | xOrZr(rd
));
3580 ALWAYS_INLINE
static int unconditionalBranchImmediate(bool op
, int32_t imm26
)
3582 ASSERT(imm26
== (imm26
<< 6) >> 6);
3583 return (0x14000000 | op
<< 31 | (imm26
& 0x3ffffff));
3587 ALWAYS_INLINE
static int pcRelative(bool op
, int32_t imm21
, RegisterID rd
)
3589 ASSERT(imm21
== (imm21
<< 11) >> 11);
3590 int32_t immlo
= imm21
& 3;
3591 int32_t immhi
= (imm21
>> 2) & 0x7ffff;
3592 return (0x10000000 | op
<< 31 | immlo
<< 29 | immhi
<< 5 | xOrZr(rd
));
3595 ALWAYS_INLINE
static int system(bool L
, int op0
, int op1
, int crn
, int crm
, int op2
, RegisterID rt
)
3597 return (0xd5000000 | L
<< 21 | op0
<< 19 | op1
<< 16 | crn
<< 12 | crm
<< 8 | op2
<< 5 | xOrZr(rt
));
3600 ALWAYS_INLINE
static int hintPseudo(int imm
)
3602 ASSERT(!(imm
& ~0x7f));
3603 return system(0, 0, 3, 2, (imm
>> 3) & 0xf, imm
& 0x7, ARM64Registers::zr
);
3606 ALWAYS_INLINE
static int nopPseudo()
3608 return hintPseudo(0);
3611 // 'op' means negate
3612 ALWAYS_INLINE
static int testAndBranchImmediate(bool op
, int b50
, int imm14
, RegisterID rt
)
3614 ASSERT(!(b50
& ~0x3f));
3615 ASSERT(imm14
== (imm14
<< 18) >> 18);
3617 int b40
= b50
& 0x1f;
3618 return (0x36000000 | b5
<< 31 | op
<< 24 | b40
<< 19 | (imm14
& 0x3fff) << 5 | xOrZr(rt
));
3621 ALWAYS_INLINE
static int unconditionalBranchRegister(BranchType opc
, RegisterID rn
)
3623 // The only allocated values for op2 is 0x1f, for op3 & op4 are 0.
3624 const int op2
= 0x1f;
3627 return (0xd6000000 | opc
<< 21 | op2
<< 16 | op3
<< 10 | xOrZr(rn
) << 5 | op4
);
3630 AssemblerBuffer m_buffer
;
3631 Vector
<LinkRecord
, 0, UnsafeVectorOverflow
> m_jumpsToLink
;
3632 int m_indexOfLastWatchpoint
;
3633 int m_indexOfTailOfLastWatchpoint
;
3638 #undef CHECK_DATASIZE_OF
3641 #undef CHECK_DATASIZE
3644 #undef CHECK_FP_MEMOP_DATASIZE
3646 #endif // ENABLE(ASSEMBLER) && CPU(ARM64)
3648 #endif // ARM64Assembler_h