2 * Copyright (C) 2012, 2014 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 #ifndef ARM64Assembler_h
27 #define ARM64Assembler_h
29 #if ENABLE(ASSEMBLER) && CPU(ARM64)
31 #include "AssemblerBuffer.h"
33 #include <wtf/Assertions.h>
34 #include <wtf/Vector.h>
37 #define CHECK_DATASIZE_OF(datasize) ASSERT(datasize == 32 || datasize == 64)
38 #define DATASIZE_OF(datasize) ((datasize == 64) ? Datasize_64 : Datasize_32)
39 #define MEMOPSIZE_OF(datasize) ((datasize == 8 || datasize == 128) ? MemOpSize_8_or_128 : (datasize == 16) ? MemOpSize_16 : (datasize == 32) ? MemOpSize_32 : MemOpSize_64)
40 #define CHECK_DATASIZE() CHECK_DATASIZE_OF(datasize)
41 #define DATASIZE DATASIZE_OF(datasize)
42 #define MEMOPSIZE MEMOPSIZE_OF(datasize)
43 #define CHECK_FP_MEMOP_DATASIZE() ASSERT(datasize == 8 || datasize == 16 || datasize == 32 || datasize == 64 || datasize == 128)
44 #define MEMPAIROPSIZE_INT(datasize) ((datasize == 64) ? MemPairOp_64 : MemPairOp_32)
45 #define MEMPAIROPSIZE_FP(datasize) ((datasize == 128) ? MemPairOp_V128 : (datasize == 64) ? MemPairOp_V64 : MemPairOp_32)
49 ALWAYS_INLINE
bool isInt7(int32_t value
)
51 return value
== ((value
<< 25) >> 25);
54 ALWAYS_INLINE
bool isInt9(int32_t value
)
56 return value
== ((value
<< 23) >> 23);
59 ALWAYS_INLINE
bool isInt11(int32_t value
)
61 return value
== ((value
<< 21) >> 21);
64 ALWAYS_INLINE
bool isUInt5(int32_t value
)
66 return !(value
& ~0x1f);
69 ALWAYS_INLINE
bool isUInt12(int32_t value
)
71 return !(value
& ~0xfff);
74 ALWAYS_INLINE
bool isUInt12(intptr_t value
)
76 return !(value
& ~0xfffL
);
81 explicit UInt5(int value
)
84 ASSERT(isUInt5(value
));
87 operator int() { return m_value
; }
95 explicit UInt12(int value
)
98 ASSERT(isUInt12(value
));
101 operator int() { return m_value
; }
109 explicit PostIndex(int value
)
112 ASSERT(isInt9(value
));
115 operator int() { return m_value
; }
123 explicit PreIndex(int value
)
126 ASSERT(isInt9(value
));
129 operator int() { return m_value
; }
135 class PairPostIndex
{
137 explicit PairPostIndex(int value
)
140 ASSERT(isInt11(value
));
143 operator int() { return m_value
; }
151 explicit PairPreIndex(int value
)
154 ASSERT(isInt11(value
));
157 operator int() { return m_value
; }
163 class LogicalImmediate
{
165 static LogicalImmediate
create32(uint32_t value
)
167 // Check for 0, -1 - these cannot be encoded.
168 if (!value
|| !~value
)
169 return InvalidLogicalImmediate
;
171 // First look for a 32-bit pattern, then for repeating 16-bit
172 // patterns, 8-bit, 4-bit, and finally 2-bit.
176 if (findBitRange
<32>(value
, hsb
, lsb
, inverted
))
177 return encodeLogicalImmediate
<32>(hsb
, lsb
, inverted
);
179 if ((value
& 0xffff) != (value
>> 16))
180 return InvalidLogicalImmediate
;
183 if (findBitRange
<16>(value
, hsb
, lsb
, inverted
))
184 return encodeLogicalImmediate
<16>(hsb
, lsb
, inverted
);
186 if ((value
& 0xff) != (value
>> 8))
187 return InvalidLogicalImmediate
;
190 if (findBitRange
<8>(value
, hsb
, lsb
, inverted
))
191 return encodeLogicalImmediate
<8>(hsb
, lsb
, inverted
);
193 if ((value
& 0xf) != (value
>> 4))
194 return InvalidLogicalImmediate
;
197 if (findBitRange
<4>(value
, hsb
, lsb
, inverted
))
198 return encodeLogicalImmediate
<4>(hsb
, lsb
, inverted
);
200 if ((value
& 0x3) != (value
>> 2))
201 return InvalidLogicalImmediate
;
204 if (findBitRange
<2>(value
, hsb
, lsb
, inverted
))
205 return encodeLogicalImmediate
<2>(hsb
, lsb
, inverted
);
207 return InvalidLogicalImmediate
;
210 static LogicalImmediate
create64(uint64_t value
)
212 // Check for 0, -1 - these cannot be encoded.
213 if (!value
|| !~value
)
214 return InvalidLogicalImmediate
;
216 // Look for a contiguous bit range.
219 if (findBitRange
<64>(value
, hsb
, lsb
, inverted
))
220 return encodeLogicalImmediate
<64>(hsb
, lsb
, inverted
);
222 // If the high & low 32 bits are equal, we can try for a 32-bit (or narrower) pattern.
223 if (static_cast<uint32_t>(value
) == static_cast<uint32_t>(value
>> 32))
224 return create32(static_cast<uint32_t>(value
));
225 return InvalidLogicalImmediate
;
236 return m_value
!= InvalidLogicalImmediate
;
241 return m_value
& (1 << 12);
245 LogicalImmediate(int value
)
250 // Generate a mask with bits in the range hsb..0 set, for example:
251 // hsb:63 = 0xffffffffffffffff
252 // hsb:42 = 0x000007ffffffffff
253 // hsb: 0 = 0x0000000000000001
254 static uint64_t mask(unsigned hsb
)
257 return 0xffffffffffffffffull
>> (63 - hsb
);
261 static void partialHSB(uint64_t& value
, unsigned&result
)
263 if (value
& (0xffffffffffffffffull
<< N
)) {
269 // Find the bit number of the highest bit set in a non-zero value, for example:
270 // 0x8080808080808080 = hsb:63
271 // 0x0000000000000001 = hsb: 0
272 // 0x000007ffffe00000 = hsb:42
273 static unsigned highestSetBit(uint64_t value
)
277 partialHSB
<32>(value
, hsb
);
278 partialHSB
<16>(value
, hsb
);
279 partialHSB
<8>(value
, hsb
);
280 partialHSB
<4>(value
, hsb
);
281 partialHSB
<2>(value
, hsb
);
282 partialHSB
<1>(value
, hsb
);
286 // This function takes a value and a bit width, where value obeys the following constraints:
287 // * bits outside of the width of the value must be zero.
288 // * bits within the width of value must neither be all clear or all set.
289 // The input is inspected to detect values that consist of either two or three contiguous
290 // ranges of bits. The output range hsb..lsb will describe the second range of the value.
291 // if the range is set, inverted will be false, and if the range is clear, inverted will
292 // be true. For example (with width 8):
293 // 00001111 = hsb:3, lsb:0, inverted:false
294 // 11110000 = hsb:3, lsb:0, inverted:true
295 // 00111100 = hsb:5, lsb:2, inverted:false
296 // 11000011 = hsb:5, lsb:2, inverted:true
297 template<unsigned width
>
298 static bool findBitRange(uint64_t value
, unsigned& hsb
, unsigned& lsb
, bool& inverted
)
300 ASSERT(value
& mask(width
- 1));
301 ASSERT(value
!= mask(width
- 1));
302 ASSERT(!(value
& ~mask(width
- 1)));
304 // Detect cases where the top bit is set; if so, flip all the bits & set invert.
305 // This halves the number of patterns we need to look for.
306 const uint64_t msb
= 1ull << (width
- 1);
307 if ((inverted
= (value
& msb
)))
308 value
^= mask(width
- 1);
310 // Find the highest set bit in value, generate a corresponding mask & flip all
312 hsb
= highestSetBit(value
);
315 // If this cleared the value, then the range hsb..0 was all set.
320 // Try making one more mask, and flipping the bits!
321 lsb
= highestSetBit(value
);
324 // Success - but lsb actually points to the hsb of a third range - add one
325 // to get to the lsb of the mid range.
333 // Encodes the set of immN:immr:imms fields found in a logical immediate.
334 template<unsigned width
>
335 static int encodeLogicalImmediate(unsigned hsb
, unsigned lsb
, bool inverted
)
337 // Check width is a power of 2!
338 ASSERT(!(width
& (width
-1)));
339 ASSERT(width
<= 64 && width
>= 2);
347 // For 64-bit values this is easy - just set immN to true, and imms just
348 // contains the bit number of the highest set bit of the set range. For
349 // values with narrower widths, these are encoded by a leading set of
350 // one bits, followed by a zero bit, followed by the remaining set of bits
351 // being the high bit of the range. For a 32-bit immediate there are no
352 // leading one bits, just a zero followed by a five bit number. For a
353 // 16-bit immediate there is one one bit, a zero bit, and then a four bit
354 // bit-position, etc.
358 imms
= 63 & ~(width
+ width
- 1);
361 // if width is 64 & hsb is 62, then we have a value something like:
362 // 0x80000000ffffffff (in this case with lsb 32).
363 // The ror should be by 1, imms (effectively set width minus 1) is
364 // 32. Set width is full width minus cleared width.
365 immr
= (width
- 1) - hsb
;
366 imms
|= (width
- ((hsb
- lsb
) + 1)) - 1;
368 // if width is 64 & hsb is 62, then we have a value something like:
369 // 0x7fffffff00000000 (in this case with lsb 32).
370 // The value is effectively rol'ed by lsb, which is equivalent to
371 // a ror by width - lsb (or 0, in the case where lsb is 0). imms
373 immr
= (width
- lsb
) & (width
- 1);
377 return immN
<< 12 | immr
<< 6 | imms
;
380 static const int InvalidLogicalImmediate
= -1;
385 inline uint16_t getHalfword(uint64_t value
, int which
)
387 return value
>> (which
<< 4);
390 namespace ARM64Registers
{
392 // Parameter/result registers
401 // Indirect result location register
403 // Temporary registers
411 // Intra-procedure-call scratch registers (temporary)
414 // Platform Register (temporary)
435 // Parameter/result registers
444 // Callee-saved (up to 64-bits only!)
453 // Temporary registers
472 static bool isSp(RegisterID reg
) { return reg
== sp
; }
473 static bool isZr(RegisterID reg
) { return reg
== zr
; }
476 class ARM64Assembler
{
478 typedef ARM64Registers::RegisterID RegisterID
;
479 typedef ARM64Registers::FPRegisterID FPRegisterID
;
481 static RegisterID
firstRegister() { return ARM64Registers::x0
; }
482 static RegisterID
lastRegister() { return ARM64Registers::sp
; }
484 static FPRegisterID
firstFPRegister() { return ARM64Registers::q0
; }
485 static FPRegisterID
lastFPRegister() { return ARM64Registers::q31
; }
488 static bool isSp(RegisterID reg
) { return ARM64Registers::isSp(reg
); }
489 static bool isZr(RegisterID reg
) { return ARM64Registers::isZr(reg
); }
493 : m_indexOfLastWatchpoint(INT_MIN
)
494 , m_indexOfTailOfLastWatchpoint(INT_MIN
)
498 AssemblerBuffer
& buffer() { return m_buffer
; }
500 // (HS, LO, HI, LS) -> (AE, B, A, BE)
501 // (VS, VC) -> (O, NO)
505 ConditionHS
, ConditionCS
= ConditionHS
,
506 ConditionLO
, ConditionCC
= ConditionLO
,
521 static Condition
invert(Condition cond
)
523 return static_cast<Condition
>(cond
^ 1);
549 #define JUMP_ENUM_WITH_SIZE(index, value) (((value) << 4) | (index))
550 #define JUMP_ENUM_SIZE(jump) ((jump) >> 4)
551 enum JumpType
{ JumpFixed
= JUMP_ENUM_WITH_SIZE(0, 0),
552 JumpNoCondition
= JUMP_ENUM_WITH_SIZE(1, 1 * sizeof(uint32_t)),
553 JumpCondition
= JUMP_ENUM_WITH_SIZE(2, 2 * sizeof(uint32_t)),
554 JumpCompareAndBranch
= JUMP_ENUM_WITH_SIZE(3, 2 * sizeof(uint32_t)),
555 JumpTestBit
= JUMP_ENUM_WITH_SIZE(4, 2 * sizeof(uint32_t)),
556 JumpNoConditionFixedSize
= JUMP_ENUM_WITH_SIZE(5, 1 * sizeof(uint32_t)),
557 JumpConditionFixedSize
= JUMP_ENUM_WITH_SIZE(6, 2 * sizeof(uint32_t)),
558 JumpCompareAndBranchFixedSize
= JUMP_ENUM_WITH_SIZE(7, 2 * sizeof(uint32_t)),
559 JumpTestBitFixedSize
= JUMP_ENUM_WITH_SIZE(8, 2 * sizeof(uint32_t)),
562 LinkInvalid
= JUMP_ENUM_WITH_SIZE(0, 0),
563 LinkJumpNoCondition
= JUMP_ENUM_WITH_SIZE(1, 1 * sizeof(uint32_t)),
564 LinkJumpConditionDirect
= JUMP_ENUM_WITH_SIZE(2, 1 * sizeof(uint32_t)),
565 LinkJumpCondition
= JUMP_ENUM_WITH_SIZE(3, 2 * sizeof(uint32_t)),
566 LinkJumpCompareAndBranch
= JUMP_ENUM_WITH_SIZE(4, 2 * sizeof(uint32_t)),
567 LinkJumpCompareAndBranchDirect
= JUMP_ENUM_WITH_SIZE(5, 1 * sizeof(uint32_t)),
568 LinkJumpTestBit
= JUMP_ENUM_WITH_SIZE(6, 2 * sizeof(uint32_t)),
569 LinkJumpTestBitDirect
= JUMP_ENUM_WITH_SIZE(7, 1 * sizeof(uint32_t)),
574 LinkRecord(intptr_t from
, intptr_t to
, JumpType type
, Condition condition
)
576 data
.realTypes
.m_from
= from
;
577 data
.realTypes
.m_to
= to
;
578 data
.realTypes
.m_type
= type
;
579 data
.realTypes
.m_linkType
= LinkInvalid
;
580 data
.realTypes
.m_condition
= condition
;
582 LinkRecord(intptr_t from
, intptr_t to
, JumpType type
, Condition condition
, bool is64Bit
, RegisterID compareRegister
)
584 data
.realTypes
.m_from
= from
;
585 data
.realTypes
.m_to
= to
;
586 data
.realTypes
.m_type
= type
;
587 data
.realTypes
.m_linkType
= LinkInvalid
;
588 data
.realTypes
.m_condition
= condition
;
589 data
.realTypes
.m_is64Bit
= is64Bit
;
590 data
.realTypes
.m_compareRegister
= compareRegister
;
592 LinkRecord(intptr_t from
, intptr_t to
, JumpType type
, Condition condition
, unsigned bitNumber
, RegisterID compareRegister
)
594 data
.realTypes
.m_from
= from
;
595 data
.realTypes
.m_to
= to
;
596 data
.realTypes
.m_type
= type
;
597 data
.realTypes
.m_linkType
= LinkInvalid
;
598 data
.realTypes
.m_condition
= condition
;
599 data
.realTypes
.m_bitNumber
= bitNumber
;
600 data
.realTypes
.m_compareRegister
= compareRegister
;
602 void operator=(const LinkRecord
& other
)
604 data
.copyTypes
.content
[0] = other
.data
.copyTypes
.content
[0];
605 data
.copyTypes
.content
[1] = other
.data
.copyTypes
.content
[1];
606 data
.copyTypes
.content
[2] = other
.data
.copyTypes
.content
[2];
608 intptr_t from() const { return data
.realTypes
.m_from
; }
609 void setFrom(intptr_t from
) { data
.realTypes
.m_from
= from
; }
610 intptr_t to() const { return data
.realTypes
.m_to
; }
611 JumpType
type() const { return data
.realTypes
.m_type
; }
612 JumpLinkType
linkType() const { return data
.realTypes
.m_linkType
; }
613 void setLinkType(JumpLinkType linkType
) { ASSERT(data
.realTypes
.m_linkType
== LinkInvalid
); data
.realTypes
.m_linkType
= linkType
; }
614 Condition
condition() const { return data
.realTypes
.m_condition
; }
615 bool is64Bit() const { return data
.realTypes
.m_is64Bit
; }
616 unsigned bitNumber() const { return data
.realTypes
.m_bitNumber
; }
617 RegisterID
compareRegister() const { return data
.realTypes
.m_compareRegister
; }
622 intptr_t m_from
: 48;
625 JumpLinkType m_linkType
: 8;
626 Condition m_condition
: 4;
627 unsigned m_bitNumber
: 6;
628 RegisterID m_compareRegister
: 6;
634 COMPILE_ASSERT(sizeof(RealTypes
) == sizeof(CopyTypes
), LinkRecordCopyStructSizeEqualsRealStruct
);
638 // bits(N) VFPExpandImm(bits(8) imm8);
640 // Encoding of floating point immediates is a litte complicated. Here's a
641 // high level description:
642 // +/-m*2-n where m and n are integers, 16 <= m <= 31, 0 <= n <= 7
643 // and the algirithm for expanding to a single precision float:
644 // return imm8<7>:NOT(imm8<6>):Replicate(imm8<6>,5):imm8<5:0>:Zeros(19);
646 // The trickiest bit is how the exponent is handled. The following table
647 // may help clarify things a little:
649 // 100 01111100 124 -3 1020 01111111100
650 // 101 01111101 125 -2 1021 01111111101
651 // 110 01111110 126 -1 1022 01111111110
652 // 111 01111111 127 0 1023 01111111111
653 // 000 10000000 128 1 1024 10000000000
654 // 001 10000001 129 2 1025 10000000001
655 // 010 10000010 130 3 1026 10000000010
656 // 011 10000011 131 4 1027 10000000011
657 // The first column shows the bit pattern stored in bits 6-4 of the arm
658 // encoded immediate. The second column shows the 8-bit IEEE 754 single
659 // -precision exponent in binary, the third column shows the raw decimal
660 // value. IEEE 754 single-precision numbers are stored with a bias of 127
661 // to the exponent, so the fourth column shows the resulting exponent.
662 // From this was can see that the exponent can be in the range -3..4,
663 // which agrees with the high level description given above. The fifth
664 // and sixth columns shows the value stored in a IEEE 754 double-precision
665 // number to represent these exponents in decimal and binary, given the
668 // Ultimately, detecting doubles that can be encoded as immediates on arm
669 // and encoding doubles is actually not too bad. A floating point value can
670 // be encoded by retaining the sign bit, the low three bits of the exponent
671 // and the high 4 bits of the mantissa. To validly be able to encode an
672 // immediate the remainder of the mantissa must be zero, and the high part
673 // of the exponent must match the top bit retained, bar the highest bit
674 // which must be its inverse.
675 static bool canEncodeFPImm(double d
)
677 // Discard the sign bit, the low two bits of the exponent & the highest
678 // four bits of the mantissa.
679 uint64_t masked
= bitwise_cast
<uint64_t>(d
) & 0x7fc0ffffffffffffull
;
680 return (masked
== 0x3fc0000000000000ull
) || (masked
== 0x4000000000000000ull
);
683 template<int datasize
>
684 static bool canEncodePImmOffset(int32_t offset
)
686 int32_t maxPImm
= 4095 * (datasize
/ 8);
689 if (offset
> maxPImm
)
691 if (offset
& ((datasize
/ 8 ) - 1))
696 static bool canEncodeSImmOffset(int32_t offset
)
698 return isInt9(offset
);
702 int encodeFPImm(double d
)
704 ASSERT(canEncodeFPImm(d
));
705 uint64_t u64
= bitwise_cast
<uint64_t>(d
);
706 return (static_cast<int>(u64
>> 56) & 0x80) | (static_cast<int>(u64
>> 48) & 0x7f);
709 template<int datasize
>
710 int encodeShiftAmount(int amount
)
712 ASSERT(!amount
|| datasize
== (8 << amount
));
716 template<int datasize
>
717 static int encodePositiveImmediate(unsigned pimm
)
719 ASSERT(!(pimm
& ((datasize
/ 8) - 1)));
720 return pimm
/ (datasize
/ 8);
784 ExcepnOp_EXCEPTION
= 0,
785 ExcepnOp_BREAKPOINT
= 1,
792 FPCmpOp_FCMP0
= 0x08,
793 FPCmpOp_FCMPE
= 0x10,
794 FPCmpOp_FCMPE0
= 0x18
802 enum FPDataOp1Source
{
807 FPDataOp_FCVT_toSingle
= 4,
808 FPDataOp_FCVT_toDouble
= 5,
809 FPDataOp_FCVT_toHalf
= 7,
812 FPDataOp_FRINTM
= 10,
813 FPDataOp_FRINTZ
= 11,
814 FPDataOp_FRINTA
= 12,
815 FPDataOp_FRINTX
= 14,
819 enum FPDataOp2Source
{
832 FPIntConvOp_FCVTNS
= 0x00,
833 FPIntConvOp_FCVTNU
= 0x01,
834 FPIntConvOp_SCVTF
= 0x02,
835 FPIntConvOp_UCVTF
= 0x03,
836 FPIntConvOp_FCVTAS
= 0x04,
837 FPIntConvOp_FCVTAU
= 0x05,
838 FPIntConvOp_FMOV_QtoX
= 0x06,
839 FPIntConvOp_FMOV_XtoQ
= 0x07,
840 FPIntConvOp_FCVTPS
= 0x08,
841 FPIntConvOp_FCVTPU
= 0x09,
842 FPIntConvOp_FMOV_QtoX_top
= 0x0e,
843 FPIntConvOp_FMOV_XtoQ_top
= 0x0f,
844 FPIntConvOp_FCVTMS
= 0x10,
845 FPIntConvOp_FCVTMU
= 0x11,
846 FPIntConvOp_FCVTZS
= 0x18,
847 FPIntConvOp_FCVTZU
= 0x19,
862 MemOp_PREFETCH
= 2, // size must be 3
863 MemOp_LOAD_signed64
= 2, // size may be 0, 1 or 2
864 MemOp_LOAD_signed32
= 3 // size may be 0 or 1
869 MemPairOp_LoadSigned_32
= 1,
872 MemPairOp_V32
= MemPairOp_32
,
884 LdrLiteralOp_32BIT
= 0,
885 LdrLiteralOp_64BIT
= 1,
886 LdrLiteralOp_LDRSW
= 2,
887 LdrLiteralOp_128BIT
= 2
890 static unsigned memPairOffsetShift(bool V
, MemPairOpSize size
)
892 // return the log2 of the size in bytes, e.g. 64 bit size returns 3
895 return (size
>> 1) + 2;
899 // Integer Instructions:
901 template<int datasize
, SetFlags setFlags
= DontSetFlags
>
902 ALWAYS_INLINE
void adc(RegisterID rd
, RegisterID rn
, RegisterID rm
)
905 insn(addSubtractWithCarry(DATASIZE
, AddOp_ADD
, setFlags
, rm
, rn
, rd
));
908 template<int datasize
, SetFlags setFlags
= DontSetFlags
>
909 ALWAYS_INLINE
void add(RegisterID rd
, RegisterID rn
, UInt12 imm12
, int shift
= 0)
912 ASSERT(!shift
|| shift
== 12);
913 insn(addSubtractImmediate(DATASIZE
, AddOp_ADD
, setFlags
, shift
== 12, imm12
, rn
, rd
));
916 template<int datasize
, SetFlags setFlags
= DontSetFlags
>
917 ALWAYS_INLINE
void add(RegisterID rd
, RegisterID rn
, RegisterID rm
)
919 add
<datasize
, setFlags
>(rd
, rn
, rm
, LSL
, 0);
922 template<int datasize
, SetFlags setFlags
= DontSetFlags
>
923 ALWAYS_INLINE
void add(RegisterID rd
, RegisterID rn
, RegisterID rm
, ExtendType extend
, int amount
)
926 insn(addSubtractExtendedRegister(DATASIZE
, AddOp_ADD
, setFlags
, rm
, extend
, amount
, rn
, rd
));
929 template<int datasize
, SetFlags setFlags
= DontSetFlags
>
930 ALWAYS_INLINE
void add(RegisterID rd
, RegisterID rn
, RegisterID rm
, ShiftType shift
, int amount
)
933 if (isSp(rd
) || isSp(rn
)) {
934 ASSERT(shift
== LSL
);
936 add
<datasize
, setFlags
>(rd
, rn
, rm
, UXTX
, amount
);
938 insn(addSubtractShiftedRegister(DATASIZE
, AddOp_ADD
, setFlags
, shift
, rm
, amount
, rn
, rd
));
941 ALWAYS_INLINE
void adr(RegisterID rd
, int offset
)
943 insn(pcRelative(false, offset
, rd
));
946 ALWAYS_INLINE
void adrp(RegisterID rd
, int offset
)
948 ASSERT(!(offset
& 0xfff));
949 insn(pcRelative(true, offset
>> 12, rd
));
950 nopCortexA53Fix843419();
953 template<int datasize
, SetFlags setFlags
= DontSetFlags
>
954 ALWAYS_INLINE
void and_(RegisterID rd
, RegisterID rn
, RegisterID rm
)
956 and_
<datasize
, setFlags
>(rd
, rn
, rm
, LSL
, 0);
959 template<int datasize
, SetFlags setFlags
= DontSetFlags
>
960 ALWAYS_INLINE
void and_(RegisterID rd
, RegisterID rn
, RegisterID rm
, ShiftType shift
, int amount
)
963 insn(logicalShiftedRegister(DATASIZE
, setFlags
? LogicalOp_ANDS
: LogicalOp_AND
, shift
, false, rm
, amount
, rn
, rd
));
966 template<int datasize
, SetFlags setFlags
= DontSetFlags
>
967 ALWAYS_INLINE
void and_(RegisterID rd
, RegisterID rn
, LogicalImmediate imm
)
970 insn(logicalImmediate(DATASIZE
, setFlags
? LogicalOp_ANDS
: LogicalOp_AND
, imm
.value(), rn
, rd
));
973 template<int datasize
>
974 ALWAYS_INLINE
void asr(RegisterID rd
, RegisterID rn
, int shift
)
976 ASSERT(shift
< datasize
);
977 sbfm
<datasize
>(rd
, rn
, shift
, datasize
- 1);
980 template<int datasize
>
981 ALWAYS_INLINE
void asr(RegisterID rd
, RegisterID rn
, RegisterID rm
)
983 asrv
<datasize
>(rd
, rn
, rm
);
986 template<int datasize
>
987 ALWAYS_INLINE
void asrv(RegisterID rd
, RegisterID rn
, RegisterID rm
)
990 insn(dataProcessing2Source(DATASIZE
, rm
, DataOp_ASRV
, rn
, rd
));
993 ALWAYS_INLINE
void b(int32_t offset
= 0)
995 ASSERT(!(offset
& 3));
997 ASSERT(offset
== (offset
<< 6) >> 6);
998 insn(unconditionalBranchImmediate(false, offset
));
1001 ALWAYS_INLINE
void b_cond(Condition cond
, int32_t offset
= 0)
1003 ASSERT(!(offset
& 3));
1005 ASSERT(offset
== (offset
<< 13) >> 13);
1006 insn(conditionalBranchImmediate(offset
, cond
));
1009 template<int datasize
>
1010 ALWAYS_INLINE
void bfi(RegisterID rd
, RegisterID rn
, int lsb
, int width
)
1012 bfm
<datasize
>(rd
, rn
, (datasize
- lsb
) & (datasize
- 1), width
- 1);
1015 template<int datasize
>
1016 ALWAYS_INLINE
void bfm(RegisterID rd
, RegisterID rn
, int immr
, int imms
)
1019 insn(bitfield(DATASIZE
, BitfieldOp_BFM
, immr
, imms
, rn
, rd
));
1022 template<int datasize
>
1023 ALWAYS_INLINE
void bfxil(RegisterID rd
, RegisterID rn
, int lsb
, int width
)
1025 bfm
<datasize
>(rd
, rn
, lsb
, lsb
+ width
- 1);
1028 template<int datasize
, SetFlags setFlags
= DontSetFlags
>
1029 ALWAYS_INLINE
void bic(RegisterID rd
, RegisterID rn
, RegisterID rm
)
1031 bic
<datasize
, setFlags
>(rd
, rn
, rm
, LSL
, 0);
1034 template<int datasize
, SetFlags setFlags
= DontSetFlags
>
1035 ALWAYS_INLINE
void bic(RegisterID rd
, RegisterID rn
, RegisterID rm
, ShiftType shift
, int amount
)
1038 insn(logicalShiftedRegister(DATASIZE
, setFlags
? LogicalOp_ANDS
: LogicalOp_AND
, shift
, true, rm
, amount
, rn
, rd
));
1041 ALWAYS_INLINE
void bl(int32_t offset
= 0)
1043 ASSERT(!(offset
& 3));
1045 insn(unconditionalBranchImmediate(true, offset
));
1048 ALWAYS_INLINE
void blr(RegisterID rn
)
1050 insn(unconditionalBranchRegister(BranchType_CALL
, rn
));
1053 ALWAYS_INLINE
void br(RegisterID rn
)
1055 insn(unconditionalBranchRegister(BranchType_JMP
, rn
));
1058 ALWAYS_INLINE
void brk(uint16_t imm
)
1060 insn(excepnGeneration(ExcepnOp_BREAKPOINT
, imm
, 0));
1063 template<int datasize
>
1064 ALWAYS_INLINE
void cbnz(RegisterID rt
, int32_t offset
= 0)
1067 ASSERT(!(offset
& 3));
1069 insn(compareAndBranchImmediate(DATASIZE
, true, offset
, rt
));
1072 template<int datasize
>
1073 ALWAYS_INLINE
void cbz(RegisterID rt
, int32_t offset
= 0)
1076 ASSERT(!(offset
& 3));
1078 insn(compareAndBranchImmediate(DATASIZE
, false, offset
, rt
));
1081 template<int datasize
>
1082 ALWAYS_INLINE
void ccmn(RegisterID rn
, RegisterID rm
, int nzcv
, Condition cond
)
1085 insn(conditionalCompareRegister(DATASIZE
, AddOp_ADD
, rm
, cond
, rn
, nzcv
));
1088 template<int datasize
>
1089 ALWAYS_INLINE
void ccmn(RegisterID rn
, UInt5 imm
, int nzcv
, Condition cond
)
1092 insn(conditionalCompareImmediate(DATASIZE
, AddOp_ADD
, imm
, cond
, rn
, nzcv
));
1095 template<int datasize
>
1096 ALWAYS_INLINE
void ccmp(RegisterID rn
, RegisterID rm
, int nzcv
, Condition cond
)
1099 insn(conditionalCompareRegister(DATASIZE
, AddOp_SUB
, rm
, cond
, rn
, nzcv
));
1102 template<int datasize
>
1103 ALWAYS_INLINE
void ccmp(RegisterID rn
, UInt5 imm
, int nzcv
, Condition cond
)
1106 insn(conditionalCompareImmediate(DATASIZE
, AddOp_SUB
, imm
, cond
, rn
, nzcv
));
1109 template<int datasize
>
1110 ALWAYS_INLINE
void cinc(RegisterID rd
, RegisterID rn
, Condition cond
)
1112 csinc
<datasize
>(rd
, rn
, rn
, invert(cond
));
1115 template<int datasize
>
1116 ALWAYS_INLINE
void cinv(RegisterID rd
, RegisterID rn
, Condition cond
)
1118 csinv
<datasize
>(rd
, rn
, rn
, invert(cond
));
1121 template<int datasize
>
1122 ALWAYS_INLINE
void cls(RegisterID rd
, RegisterID rn
)
1125 insn(dataProcessing1Source(DATASIZE
, DataOp_CLS
, rn
, rd
));
1128 template<int datasize
>
1129 ALWAYS_INLINE
void clz(RegisterID rd
, RegisterID rn
)
1132 insn(dataProcessing1Source(DATASIZE
, DataOp_CLZ
, rn
, rd
));
1135 template<int datasize
>
1136 ALWAYS_INLINE
void cmn(RegisterID rn
, UInt12 imm12
, int shift
= 0)
1138 add
<datasize
, S
>(ARM64Registers::zr
, rn
, imm12
, shift
);
1141 template<int datasize
>
1142 ALWAYS_INLINE
void cmn(RegisterID rn
, RegisterID rm
)
1144 add
<datasize
, S
>(ARM64Registers::zr
, rn
, rm
);
1147 template<int datasize
>
1148 ALWAYS_INLINE
void cmn(RegisterID rn
, RegisterID rm
, ExtendType extend
, int amount
)
1150 add
<datasize
, S
>(ARM64Registers::zr
, rn
, rm
, extend
, amount
);
1153 template<int datasize
>
1154 ALWAYS_INLINE
void cmn(RegisterID rn
, RegisterID rm
, ShiftType shift
, int amount
)
1156 add
<datasize
, S
>(ARM64Registers::zr
, rn
, rm
, shift
, amount
);
1159 template<int datasize
>
1160 ALWAYS_INLINE
void cmp(RegisterID rn
, UInt12 imm12
, int shift
= 0)
1162 sub
<datasize
, S
>(ARM64Registers::zr
, rn
, imm12
, shift
);
1165 template<int datasize
>
1166 ALWAYS_INLINE
void cmp(RegisterID rn
, RegisterID rm
)
1168 sub
<datasize
, S
>(ARM64Registers::zr
, rn
, rm
);
1171 template<int datasize
>
1172 ALWAYS_INLINE
void cmp(RegisterID rn
, RegisterID rm
, ExtendType extend
, int amount
)
1174 sub
<datasize
, S
>(ARM64Registers::zr
, rn
, rm
, extend
, amount
);
1177 template<int datasize
>
1178 ALWAYS_INLINE
void cmp(RegisterID rn
, RegisterID rm
, ShiftType shift
, int amount
)
1180 sub
<datasize
, S
>(ARM64Registers::zr
, rn
, rm
, shift
, amount
);
1183 template<int datasize
>
1184 ALWAYS_INLINE
void cneg(RegisterID rd
, RegisterID rn
, Condition cond
)
1186 csneg
<datasize
>(rd
, rn
, rn
, invert(cond
));
1189 template<int datasize
>
1190 ALWAYS_INLINE
void csel(RegisterID rd
, RegisterID rn
, RegisterID rm
, Condition cond
)
1193 insn(conditionalSelect(DATASIZE
, false, rm
, cond
, false, rn
, rd
));
1196 template<int datasize
>
1197 ALWAYS_INLINE
void cset(RegisterID rd
, Condition cond
)
1199 csinc
<datasize
>(rd
, ARM64Registers::zr
, ARM64Registers::zr
, invert(cond
));
1202 template<int datasize
>
1203 ALWAYS_INLINE
void csetm(RegisterID rd
, Condition cond
)
1205 csinv
<datasize
>(rd
, ARM64Registers::zr
, ARM64Registers::zr
, invert(cond
));
1208 template<int datasize
>
1209 ALWAYS_INLINE
void csinc(RegisterID rd
, RegisterID rn
, RegisterID rm
, Condition cond
)
1212 insn(conditionalSelect(DATASIZE
, false, rm
, cond
, true, rn
, rd
));
1215 template<int datasize
>
1216 ALWAYS_INLINE
void csinv(RegisterID rd
, RegisterID rn
, RegisterID rm
, Condition cond
)
1219 insn(conditionalSelect(DATASIZE
, true, rm
, cond
, false, rn
, rd
));
1222 template<int datasize
>
1223 ALWAYS_INLINE
void csneg(RegisterID rd
, RegisterID rn
, RegisterID rm
, Condition cond
)
1226 insn(conditionalSelect(DATASIZE
, true, rm
, cond
, true, rn
, rd
));
1229 template<int datasize
>
1230 ALWAYS_INLINE
void eon(RegisterID rd
, RegisterID rn
, RegisterID rm
)
1232 eon
<datasize
>(rd
, rn
, rm
, LSL
, 0);
1235 template<int datasize
>
1236 ALWAYS_INLINE
void eon(RegisterID rd
, RegisterID rn
, RegisterID rm
, ShiftType shift
, int amount
)
1239 insn(logicalShiftedRegister(DATASIZE
, LogicalOp_EOR
, shift
, true, rm
, amount
, rn
, rd
));
1242 template<int datasize
>
1243 ALWAYS_INLINE
void eor(RegisterID rd
, RegisterID rn
, RegisterID rm
)
1245 eor
<datasize
>(rd
, rn
, rm
, LSL
, 0);
1248 template<int datasize
>
1249 ALWAYS_INLINE
void eor(RegisterID rd
, RegisterID rn
, RegisterID rm
, ShiftType shift
, int amount
)
1252 insn(logicalShiftedRegister(DATASIZE
, LogicalOp_EOR
, shift
, false, rm
, amount
, rn
, rd
));
1255 template<int datasize
>
1256 ALWAYS_INLINE
void eor(RegisterID rd
, RegisterID rn
, LogicalImmediate imm
)
1259 insn(logicalImmediate(DATASIZE
, LogicalOp_EOR
, imm
.value(), rn
, rd
));
1262 template<int datasize
>
1263 ALWAYS_INLINE
void extr(RegisterID rd
, RegisterID rn
, RegisterID rm
, int lsb
)
1266 insn(extract(DATASIZE
, rm
, lsb
, rn
, rd
));
1269 ALWAYS_INLINE
void hint(int imm
)
1271 insn(hintPseudo(imm
));
1274 ALWAYS_INLINE
void hlt(uint16_t imm
)
1276 insn(excepnGeneration(ExcepnOp_HALT
, imm
, 0));
1279 template<int datasize
>
1280 ALWAYS_INLINE
void ldp(RegisterID rt
, RegisterID rt2
, RegisterID rn
, PairPostIndex simm
)
1283 insn(loadStoreRegisterPairPostIndex(MEMPAIROPSIZE_INT(datasize
), false, MemOp_LOAD
, simm
, rn
, rt
, rt2
));
1286 template<int datasize
>
1287 ALWAYS_INLINE
void ldp(RegisterID rt
, RegisterID rt2
, RegisterID rn
, PairPreIndex simm
)
1290 insn(loadStoreRegisterPairPreIndex(MEMPAIROPSIZE_INT(datasize
), false, MemOp_LOAD
, simm
, rn
, rt
, rt2
));
1293 template<int datasize
>
1294 ALWAYS_INLINE
void ldr(RegisterID rt
, RegisterID rn
, RegisterID rm
)
1296 ldr
<datasize
>(rt
, rn
, rm
, UXTX
, 0);
1299 template<int datasize
>
1300 ALWAYS_INLINE
void ldr(RegisterID rt
, RegisterID rn
, RegisterID rm
, ExtendType extend
, int amount
)
1303 insn(loadStoreRegisterRegisterOffset(MEMOPSIZE
, false, MemOp_LOAD
, rm
, extend
, encodeShiftAmount
<datasize
>(amount
), rn
, rt
));
1306 template<int datasize
>
1307 ALWAYS_INLINE
void ldr(RegisterID rt
, RegisterID rn
, unsigned pimm
)
1310 insn(loadStoreRegisterUnsignedImmediate(MEMOPSIZE
, false, MemOp_LOAD
, encodePositiveImmediate
<datasize
>(pimm
), rn
, rt
));
1313 template<int datasize
>
1314 ALWAYS_INLINE
void ldr(RegisterID rt
, RegisterID rn
, PostIndex simm
)
1317 insn(loadStoreRegisterPostIndex(MEMOPSIZE
, false, MemOp_LOAD
, simm
, rn
, rt
));
1320 template<int datasize
>
1321 ALWAYS_INLINE
void ldr(RegisterID rt
, RegisterID rn
, PreIndex simm
)
1324 insn(loadStoreRegisterPreIndex(MEMOPSIZE
, false, MemOp_LOAD
, simm
, rn
, rt
));
1327 template<int datasize
>
1328 ALWAYS_INLINE
void ldr_literal(RegisterID rt
, int offset
= 0)
1331 ASSERT(!(offset
& 3));
1332 insn(loadRegisterLiteral(datasize
== 64 ? LdrLiteralOp_64BIT
: LdrLiteralOp_32BIT
, false, offset
>> 2, rt
));
1335 ALWAYS_INLINE
void ldrb(RegisterID rt
, RegisterID rn
, RegisterID rm
)
1337 // Not calling the 5 argument form of ldrb, since is amount is ommitted S is false.
1338 insn(loadStoreRegisterRegisterOffset(MemOpSize_8_or_128
, false, MemOp_LOAD
, rm
, UXTX
, false, rn
, rt
));
1341 ALWAYS_INLINE
void ldrb(RegisterID rt
, RegisterID rn
, RegisterID rm
, ExtendType extend
, int amount
)
1343 ASSERT_UNUSED(amount
, !amount
);
1344 insn(loadStoreRegisterRegisterOffset(MemOpSize_8_or_128
, false, MemOp_LOAD
, rm
, extend
, true, rn
, rt
));
1347 ALWAYS_INLINE
void ldrb(RegisterID rt
, RegisterID rn
, unsigned pimm
)
1349 insn(loadStoreRegisterUnsignedImmediate(MemOpSize_8_or_128
, false, MemOp_LOAD
, encodePositiveImmediate
<8>(pimm
), rn
, rt
));
1352 ALWAYS_INLINE
void ldrb(RegisterID rt
, RegisterID rn
, PostIndex simm
)
1354 insn(loadStoreRegisterPostIndex(MemOpSize_8_or_128
, false, MemOp_LOAD
, simm
, rn
, rt
));
1357 ALWAYS_INLINE
void ldrb(RegisterID rt
, RegisterID rn
, PreIndex simm
)
1359 insn(loadStoreRegisterPreIndex(MemOpSize_8_or_128
, false, MemOp_LOAD
, simm
, rn
, rt
));
1362 ALWAYS_INLINE
void ldrh(RegisterID rt
, RegisterID rn
, RegisterID rm
)
1364 ldrh(rt
, rn
, rm
, UXTX
, 0);
1367 ALWAYS_INLINE
void ldrh(RegisterID rt
, RegisterID rn
, RegisterID rm
, ExtendType extend
, int amount
)
1369 ASSERT(!amount
|| amount
== 1);
1370 insn(loadStoreRegisterRegisterOffset(MemOpSize_16
, false, MemOp_LOAD
, rm
, extend
, amount
== 1, rn
, rt
));
1373 ALWAYS_INLINE
void ldrh(RegisterID rt
, RegisterID rn
, unsigned pimm
)
1375 insn(loadStoreRegisterUnsignedImmediate(MemOpSize_16
, false, MemOp_LOAD
, encodePositiveImmediate
<16>(pimm
), rn
, rt
));
1378 ALWAYS_INLINE
void ldrh(RegisterID rt
, RegisterID rn
, PostIndex simm
)
1380 insn(loadStoreRegisterPostIndex(MemOpSize_16
, false, MemOp_LOAD
, simm
, rn
, rt
));
1383 ALWAYS_INLINE
void ldrh(RegisterID rt
, RegisterID rn
, PreIndex simm
)
1385 insn(loadStoreRegisterPreIndex(MemOpSize_16
, false, MemOp_LOAD
, simm
, rn
, rt
));
1388 template<int datasize
>
1389 ALWAYS_INLINE
void ldrsb(RegisterID rt
, RegisterID rn
, RegisterID rm
)
1392 // Not calling the 5 argument form of ldrsb, since is amount is ommitted S is false.
1393 insn(loadStoreRegisterRegisterOffset(MemOpSize_8_or_128
, false, (datasize
== 64) ? MemOp_LOAD_signed64
: MemOp_LOAD_signed32
, rm
, UXTX
, false, rn
, rt
));
1396 template<int datasize
>
1397 ALWAYS_INLINE
void ldrsb(RegisterID rt
, RegisterID rn
, RegisterID rm
, ExtendType extend
, int amount
)
1400 ASSERT_UNUSED(amount
, !amount
);
1401 insn(loadStoreRegisterRegisterOffset(MemOpSize_8_or_128
, false, (datasize
== 64) ? MemOp_LOAD_signed64
: MemOp_LOAD_signed32
, rm
, extend
, true, rn
, rt
));
1404 template<int datasize
>
1405 ALWAYS_INLINE
void ldrsb(RegisterID rt
, RegisterID rn
, unsigned pimm
)
1408 insn(loadStoreRegisterUnsignedImmediate(MemOpSize_8_or_128
, false, (datasize
== 64) ? MemOp_LOAD_signed64
: MemOp_LOAD_signed32
, encodePositiveImmediate
<8>(pimm
), rn
, rt
));
1411 template<int datasize
>
1412 ALWAYS_INLINE
void ldrsb(RegisterID rt
, RegisterID rn
, PostIndex simm
)
1415 insn(loadStoreRegisterPostIndex(MemOpSize_8_or_128
, false, (datasize
== 64) ? MemOp_LOAD_signed64
: MemOp_LOAD_signed32
, simm
, rn
, rt
));
1418 template<int datasize
>
1419 ALWAYS_INLINE
void ldrsb(RegisterID rt
, RegisterID rn
, PreIndex simm
)
1422 insn(loadStoreRegisterPreIndex(MemOpSize_8_or_128
, false, (datasize
== 64) ? MemOp_LOAD_signed64
: MemOp_LOAD_signed32
, simm
, rn
, rt
));
1425 template<int datasize
>
1426 ALWAYS_INLINE
void ldrsh(RegisterID rt
, RegisterID rn
, RegisterID rm
)
1428 ldrsh
<datasize
>(rt
, rn
, rm
, UXTX
, 0);
1431 template<int datasize
>
1432 ALWAYS_INLINE
void ldrsh(RegisterID rt
, RegisterID rn
, RegisterID rm
, ExtendType extend
, int amount
)
1435 ASSERT(!amount
|| amount
== 1);
1436 insn(loadStoreRegisterRegisterOffset(MemOpSize_16
, false, (datasize
== 64) ? MemOp_LOAD_signed64
: MemOp_LOAD_signed32
, rm
, extend
, amount
== 1, rn
, rt
));
1439 template<int datasize
>
1440 ALWAYS_INLINE
void ldrsh(RegisterID rt
, RegisterID rn
, unsigned pimm
)
1443 insn(loadStoreRegisterUnsignedImmediate(MemOpSize_16
, false, (datasize
== 64) ? MemOp_LOAD_signed64
: MemOp_LOAD_signed32
, encodePositiveImmediate
<16>(pimm
), rn
, rt
));
1446 template<int datasize
>
1447 ALWAYS_INLINE
void ldrsh(RegisterID rt
, RegisterID rn
, PostIndex simm
)
1450 insn(loadStoreRegisterPostIndex(MemOpSize_16
, false, (datasize
== 64) ? MemOp_LOAD_signed64
: MemOp_LOAD_signed32
, simm
, rn
, rt
));
1453 template<int datasize
>
1454 ALWAYS_INLINE
void ldrsh(RegisterID rt
, RegisterID rn
, PreIndex simm
)
1457 insn(loadStoreRegisterPreIndex(MemOpSize_16
, false, (datasize
== 64) ? MemOp_LOAD_signed64
: MemOp_LOAD_signed32
, simm
, rn
, rt
));
1460 ALWAYS_INLINE
void ldrsw(RegisterID rt
, RegisterID rn
, RegisterID rm
)
1462 ldrsw(rt
, rn
, rm
, UXTX
, 0);
1465 ALWAYS_INLINE
void ldrsw(RegisterID rt
, RegisterID rn
, RegisterID rm
, ExtendType extend
, int amount
)
1467 ASSERT(!amount
|| amount
== 2);
1468 insn(loadStoreRegisterRegisterOffset(MemOpSize_32
, false, MemOp_LOAD_signed64
, rm
, extend
, amount
== 2, rn
, rt
));
1471 ALWAYS_INLINE
void ldrsw(RegisterID rt
, RegisterID rn
, unsigned pimm
)
1473 insn(loadStoreRegisterUnsignedImmediate(MemOpSize_32
, false, MemOp_LOAD_signed64
, encodePositiveImmediate
<32>(pimm
), rn
, rt
));
1476 ALWAYS_INLINE
void ldrsw(RegisterID rt
, RegisterID rn
, PostIndex simm
)
1478 insn(loadStoreRegisterPostIndex(MemOpSize_32
, false, MemOp_LOAD_signed64
, simm
, rn
, rt
));
1481 ALWAYS_INLINE
void ldrsw(RegisterID rt
, RegisterID rn
, PreIndex simm
)
1483 insn(loadStoreRegisterPreIndex(MemOpSize_32
, false, MemOp_LOAD_signed64
, simm
, rn
, rt
));
1486 ALWAYS_INLINE
void ldrsw_literal(RegisterID rt
, int offset
= 0)
1488 ASSERT(!(offset
& 3));
1489 insn(loadRegisterLiteral(LdrLiteralOp_LDRSW
, false, offset
>> 2, rt
));
1492 template<int datasize
>
1493 ALWAYS_INLINE
void ldur(RegisterID rt
, RegisterID rn
, int simm
)
1496 insn(loadStoreRegisterUnscaledImmediate(MEMOPSIZE
, false, MemOp_LOAD
, simm
, rn
, rt
));
1499 ALWAYS_INLINE
void ldurb(RegisterID rt
, RegisterID rn
, int simm
)
1501 insn(loadStoreRegisterUnscaledImmediate(MemOpSize_8_or_128
, false, MemOp_LOAD
, simm
, rn
, rt
));
1504 ALWAYS_INLINE
void ldurh(RegisterID rt
, RegisterID rn
, int simm
)
1506 insn(loadStoreRegisterUnscaledImmediate(MemOpSize_16
, false, MemOp_LOAD
, simm
, rn
, rt
));
1509 template<int datasize
>
1510 ALWAYS_INLINE
void ldursb(RegisterID rt
, RegisterID rn
, int simm
)
1513 insn(loadStoreRegisterUnscaledImmediate(MemOpSize_8_or_128
, false, (datasize
== 64) ? MemOp_LOAD_signed64
: MemOp_LOAD_signed32
, simm
, rn
, rt
));
1516 template<int datasize
>
1517 ALWAYS_INLINE
void ldursh(RegisterID rt
, RegisterID rn
, int simm
)
1520 insn(loadStoreRegisterUnscaledImmediate(MemOpSize_16
, false, (datasize
== 64) ? MemOp_LOAD_signed64
: MemOp_LOAD_signed32
, simm
, rn
, rt
));
1523 ALWAYS_INLINE
void ldursw(RegisterID rt
, RegisterID rn
, int simm
)
1525 insn(loadStoreRegisterUnscaledImmediate(MemOpSize_32
, false, MemOp_LOAD_signed64
, simm
, rn
, rt
));
1528 template<int datasize
>
1529 ALWAYS_INLINE
void lsl(RegisterID rd
, RegisterID rn
, int shift
)
1531 ASSERT(shift
< datasize
);
1532 ubfm
<datasize
>(rd
, rn
, (datasize
- shift
) & (datasize
- 1), datasize
- 1 - shift
);
1535 template<int datasize
>
1536 ALWAYS_INLINE
void lsl(RegisterID rd
, RegisterID rn
, RegisterID rm
)
1538 lslv
<datasize
>(rd
, rn
, rm
);
1541 template<int datasize
>
1542 ALWAYS_INLINE
void lslv(RegisterID rd
, RegisterID rn
, RegisterID rm
)
1545 insn(dataProcessing2Source(DATASIZE
, rm
, DataOp_LSLV
, rn
, rd
));
1548 template<int datasize
>
1549 ALWAYS_INLINE
void lsr(RegisterID rd
, RegisterID rn
, int shift
)
1551 ASSERT(shift
< datasize
);
1552 ubfm
<datasize
>(rd
, rn
, shift
, datasize
- 1);
1555 template<int datasize
>
1556 ALWAYS_INLINE
void lsr(RegisterID rd
, RegisterID rn
, RegisterID rm
)
1558 lsrv
<datasize
>(rd
, rn
, rm
);
1561 template<int datasize
>
1562 ALWAYS_INLINE
void lsrv(RegisterID rd
, RegisterID rn
, RegisterID rm
)
1565 insn(dataProcessing2Source(DATASIZE
, rm
, DataOp_LSRV
, rn
, rd
));
1568 template<int datasize
>
1569 ALWAYS_INLINE
void madd(RegisterID rd
, RegisterID rn
, RegisterID rm
, RegisterID ra
)
1572 nopCortexA53Fix835769
<datasize
>();
1573 insn(dataProcessing3Source(DATASIZE
, DataOp_MADD
, rm
, ra
, rn
, rd
));
1576 template<int datasize
>
1577 ALWAYS_INLINE
void mneg(RegisterID rd
, RegisterID rn
, RegisterID rm
)
1579 msub
<datasize
>(rd
, rn
, rm
, ARM64Registers::zr
);
1582 template<int datasize
>
1583 ALWAYS_INLINE
void mov(RegisterID rd
, RegisterID rm
)
1585 if (isSp(rd
) || isSp(rm
))
1586 add
<datasize
>(rd
, rm
, UInt12(0));
1588 orr
<datasize
>(rd
, ARM64Registers::zr
, rm
);
1591 template<int datasize
>
1592 ALWAYS_INLINE
void movi(RegisterID rd
, LogicalImmediate imm
)
1594 orr
<datasize
>(rd
, ARM64Registers::zr
, imm
);
1597 template<int datasize
>
1598 ALWAYS_INLINE
void movk(RegisterID rd
, uint16_t value
, int shift
= 0)
1601 ASSERT(!(shift
& 0xf));
1602 insn(moveWideImediate(DATASIZE
, MoveWideOp_K
, shift
>> 4, value
, rd
));
1605 template<int datasize
>
1606 ALWAYS_INLINE
void movn(RegisterID rd
, uint16_t value
, int shift
= 0)
1609 ASSERT(!(shift
& 0xf));
1610 insn(moveWideImediate(DATASIZE
, MoveWideOp_N
, shift
>> 4, value
, rd
));
1613 template<int datasize
>
1614 ALWAYS_INLINE
void movz(RegisterID rd
, uint16_t value
, int shift
= 0)
1617 ASSERT(!(shift
& 0xf));
1618 insn(moveWideImediate(DATASIZE
, MoveWideOp_Z
, shift
>> 4, value
, rd
));
1621 template<int datasize
>
1622 ALWAYS_INLINE
void msub(RegisterID rd
, RegisterID rn
, RegisterID rm
, RegisterID ra
)
1625 nopCortexA53Fix835769
<datasize
>();
1626 insn(dataProcessing3Source(DATASIZE
, DataOp_MSUB
, rm
, ra
, rn
, rd
));
1629 template<int datasize
>
1630 ALWAYS_INLINE
void mul(RegisterID rd
, RegisterID rn
, RegisterID rm
)
1632 madd
<datasize
>(rd
, rn
, rm
, ARM64Registers::zr
);
1635 template<int datasize
>
1636 ALWAYS_INLINE
void mvn(RegisterID rd
, RegisterID rm
)
1638 orn
<datasize
>(rd
, ARM64Registers::zr
, rm
);
1641 template<int datasize
>
1642 ALWAYS_INLINE
void mvn(RegisterID rd
, RegisterID rm
, ShiftType shift
, int amount
)
1644 orn
<datasize
>(rd
, ARM64Registers::zr
, rm
, shift
, amount
);
1647 template<int datasize
, SetFlags setFlags
= DontSetFlags
>
1648 ALWAYS_INLINE
void neg(RegisterID rd
, RegisterID rm
)
1650 sub
<datasize
, setFlags
>(rd
, ARM64Registers::zr
, rm
);
1653 template<int datasize
, SetFlags setFlags
= DontSetFlags
>
1654 ALWAYS_INLINE
void neg(RegisterID rd
, RegisterID rm
, ShiftType shift
, int amount
)
1656 sub
<datasize
, setFlags
>(rd
, ARM64Registers::zr
, rm
, shift
, amount
);
1659 template<int datasize
, SetFlags setFlags
= DontSetFlags
>
1660 ALWAYS_INLINE
void ngc(RegisterID rd
, RegisterID rm
)
1662 sbc
<datasize
, setFlags
>(rd
, ARM64Registers::zr
, rm
);
1665 template<int datasize
, SetFlags setFlags
= DontSetFlags
>
1666 ALWAYS_INLINE
void ngc(RegisterID rd
, RegisterID rm
, ShiftType shift
, int amount
)
1668 sbc
<datasize
, setFlags
>(rd
, ARM64Registers::zr
, rm
, shift
, amount
);
1671 ALWAYS_INLINE
void nop()
1676 static void fillNops(void* base
, size_t size
)
1678 RELEASE_ASSERT(!(size
% sizeof(int32_t)));
1679 size_t n
= size
/ sizeof(int32_t);
1680 for (int32_t* ptr
= static_cast<int32_t*>(base
); n
--;)
1681 *ptr
++ = nopPseudo();
1684 ALWAYS_INLINE
void dmbSY()
1689 template<int datasize
>
1690 ALWAYS_INLINE
void orn(RegisterID rd
, RegisterID rn
, RegisterID rm
)
1692 orn
<datasize
>(rd
, rn
, rm
, LSL
, 0);
1695 template<int datasize
>
1696 ALWAYS_INLINE
void orn(RegisterID rd
, RegisterID rn
, RegisterID rm
, ShiftType shift
, int amount
)
1699 insn(logicalShiftedRegister(DATASIZE
, LogicalOp_ORR
, shift
, true, rm
, amount
, rn
, rd
));
1702 template<int datasize
>
1703 ALWAYS_INLINE
void orr(RegisterID rd
, RegisterID rn
, RegisterID rm
)
1705 orr
<datasize
>(rd
, rn
, rm
, LSL
, 0);
1708 template<int datasize
>
1709 ALWAYS_INLINE
void orr(RegisterID rd
, RegisterID rn
, RegisterID rm
, ShiftType shift
, int amount
)
1712 insn(logicalShiftedRegister(DATASIZE
, LogicalOp_ORR
, shift
, false, rm
, amount
, rn
, rd
));
1715 template<int datasize
>
1716 ALWAYS_INLINE
void orr(RegisterID rd
, RegisterID rn
, LogicalImmediate imm
)
1719 insn(logicalImmediate(DATASIZE
, LogicalOp_ORR
, imm
.value(), rn
, rd
));
1722 template<int datasize
>
1723 ALWAYS_INLINE
void rbit(RegisterID rd
, RegisterID rn
)
1726 insn(dataProcessing1Source(DATASIZE
, DataOp_RBIT
, rn
, rd
));
1729 ALWAYS_INLINE
void ret(RegisterID rn
= ARM64Registers::lr
)
1731 insn(unconditionalBranchRegister(BranchType_RET
, rn
));
1734 template<int datasize
>
1735 ALWAYS_INLINE
void rev(RegisterID rd
, RegisterID rn
)
1738 if (datasize
== 32) // 'rev' mnemonic means REV32 or REV64 depending on the operand width.
1739 insn(dataProcessing1Source(Datasize_32
, DataOp_REV32
, rn
, rd
));
1741 insn(dataProcessing1Source(Datasize_64
, DataOp_REV64
, rn
, rd
));
1744 template<int datasize
>
1745 ALWAYS_INLINE
void rev16(RegisterID rd
, RegisterID rn
)
1748 insn(dataProcessing1Source(DATASIZE
, DataOp_REV16
, rn
, rd
));
1751 template<int datasize
>
1752 ALWAYS_INLINE
void rev32(RegisterID rd
, RegisterID rn
)
1754 ASSERT(datasize
== 64); // 'rev32' only valid with 64-bit operands.
1755 insn(dataProcessing1Source(Datasize_64
, DataOp_REV32
, rn
, rd
));
1758 template<int datasize
>
1759 ALWAYS_INLINE
void ror(RegisterID rd
, RegisterID rn
, RegisterID rm
)
1761 rorv
<datasize
>(rd
, rn
, rm
);
1764 template<int datasize
>
1765 ALWAYS_INLINE
void ror(RegisterID rd
, RegisterID rs
, int shift
)
1767 extr
<datasize
>(rd
, rs
, rs
, shift
);
1770 template<int datasize
>
1771 ALWAYS_INLINE
void rorv(RegisterID rd
, RegisterID rn
, RegisterID rm
)
1774 insn(dataProcessing2Source(DATASIZE
, rm
, DataOp_RORV
, rn
, rd
));
1777 template<int datasize
, SetFlags setFlags
= DontSetFlags
>
1778 ALWAYS_INLINE
void sbc(RegisterID rd
, RegisterID rn
, RegisterID rm
)
1781 insn(addSubtractWithCarry(DATASIZE
, AddOp_SUB
, setFlags
, rm
, rn
, rd
));
1784 template<int datasize
>
1785 ALWAYS_INLINE
void sbfiz(RegisterID rd
, RegisterID rn
, int lsb
, int width
)
1787 sbfm
<datasize
>(rd
, rn
, (datasize
- lsb
) & (datasize
- 1), width
- 1);
1790 template<int datasize
>
1791 ALWAYS_INLINE
void sbfm(RegisterID rd
, RegisterID rn
, int immr
, int imms
)
1794 insn(bitfield(DATASIZE
, BitfieldOp_SBFM
, immr
, imms
, rn
, rd
));
1797 template<int datasize
>
1798 ALWAYS_INLINE
void sbfx(RegisterID rd
, RegisterID rn
, int lsb
, int width
)
1800 sbfm
<datasize
>(rd
, rn
, lsb
, lsb
+ width
- 1);
1803 template<int datasize
>
1804 ALWAYS_INLINE
void sdiv(RegisterID rd
, RegisterID rn
, RegisterID rm
)
1807 insn(dataProcessing2Source(DATASIZE
, rm
, DataOp_SDIV
, rn
, rd
));
1810 ALWAYS_INLINE
void smaddl(RegisterID rd
, RegisterID rn
, RegisterID rm
, RegisterID ra
)
1812 nopCortexA53Fix835769
<64>();
1813 insn(dataProcessing3Source(Datasize_64
, DataOp_SMADDL
, rm
, ra
, rn
, rd
));
1816 ALWAYS_INLINE
void smnegl(RegisterID rd
, RegisterID rn
, RegisterID rm
)
1818 smsubl(rd
, rn
, rm
, ARM64Registers::zr
);
1821 ALWAYS_INLINE
void smsubl(RegisterID rd
, RegisterID rn
, RegisterID rm
, RegisterID ra
)
1823 nopCortexA53Fix835769
<64>();
1824 insn(dataProcessing3Source(Datasize_64
, DataOp_SMSUBL
, rm
, ra
, rn
, rd
));
1827 ALWAYS_INLINE
void smulh(RegisterID rd
, RegisterID rn
, RegisterID rm
)
1829 insn(dataProcessing3Source(Datasize_64
, DataOp_SMULH
, rm
, ARM64Registers::zr
, rn
, rd
));
1832 ALWAYS_INLINE
void smull(RegisterID rd
, RegisterID rn
, RegisterID rm
)
1834 smaddl(rd
, rn
, rm
, ARM64Registers::zr
);
1837 template<int datasize
>
1838 ALWAYS_INLINE
void stp(RegisterID rt
, RegisterID rt2
, RegisterID rn
, PairPostIndex simm
)
1841 insn(loadStoreRegisterPairPostIndex(MEMPAIROPSIZE_INT(datasize
), false, MemOp_STORE
, simm
, rn
, rt
, rt2
));
1844 template<int datasize
>
1845 ALWAYS_INLINE
void stp(RegisterID rt
, RegisterID rt2
, RegisterID rn
, PairPreIndex simm
)
1848 insn(loadStoreRegisterPairPreIndex(MEMPAIROPSIZE_INT(datasize
), false, MemOp_STORE
, simm
, rn
, rt
, rt2
));
1851 template<int datasize
>
1852 ALWAYS_INLINE
void str(RegisterID rt
, RegisterID rn
, RegisterID rm
)
1854 str
<datasize
>(rt
, rn
, rm
, UXTX
, 0);
1857 template<int datasize
>
1858 ALWAYS_INLINE
void str(RegisterID rt
, RegisterID rn
, RegisterID rm
, ExtendType extend
, int amount
)
1861 insn(loadStoreRegisterRegisterOffset(MEMOPSIZE
, false, MemOp_STORE
, rm
, extend
, encodeShiftAmount
<datasize
>(amount
), rn
, rt
));
1864 template<int datasize
>
1865 ALWAYS_INLINE
void str(RegisterID rt
, RegisterID rn
, unsigned pimm
)
1868 insn(loadStoreRegisterUnsignedImmediate(MEMOPSIZE
, false, MemOp_STORE
, encodePositiveImmediate
<datasize
>(pimm
), rn
, rt
));
1871 template<int datasize
>
1872 ALWAYS_INLINE
void str(RegisterID rt
, RegisterID rn
, PostIndex simm
)
1875 insn(loadStoreRegisterPostIndex(MEMOPSIZE
, false, MemOp_STORE
, simm
, rn
, rt
));
1878 template<int datasize
>
1879 ALWAYS_INLINE
void str(RegisterID rt
, RegisterID rn
, PreIndex simm
)
1882 insn(loadStoreRegisterPreIndex(MEMOPSIZE
, false, MemOp_STORE
, simm
, rn
, rt
));
1885 ALWAYS_INLINE
void strb(RegisterID rt
, RegisterID rn
, RegisterID rm
)
1887 // Not calling the 5 argument form of strb, since is amount is ommitted S is false.
1888 insn(loadStoreRegisterRegisterOffset(MemOpSize_8_or_128
, false, MemOp_STORE
, rm
, UXTX
, false, rn
, rt
));
1891 ALWAYS_INLINE
void strb(RegisterID rt
, RegisterID rn
, RegisterID rm
, ExtendType extend
, int amount
)
1893 ASSERT_UNUSED(amount
, !amount
);
1894 insn(loadStoreRegisterRegisterOffset(MemOpSize_8_or_128
, false, MemOp_STORE
, rm
, extend
, true, rn
, rt
));
1897 ALWAYS_INLINE
void strb(RegisterID rt
, RegisterID rn
, unsigned pimm
)
1899 insn(loadStoreRegisterUnsignedImmediate(MemOpSize_8_or_128
, false, MemOp_STORE
, encodePositiveImmediate
<8>(pimm
), rn
, rt
));
1902 ALWAYS_INLINE
void strb(RegisterID rt
, RegisterID rn
, PostIndex simm
)
1904 insn(loadStoreRegisterPostIndex(MemOpSize_8_or_128
, false, MemOp_STORE
, simm
, rn
, rt
));
1907 ALWAYS_INLINE
void strb(RegisterID rt
, RegisterID rn
, PreIndex simm
)
1909 insn(loadStoreRegisterPreIndex(MemOpSize_8_or_128
, false, MemOp_STORE
, simm
, rn
, rt
));
1912 ALWAYS_INLINE
void strh(RegisterID rt
, RegisterID rn
, RegisterID rm
)
1914 strh(rt
, rn
, rm
, UXTX
, 0);
1917 ALWAYS_INLINE
void strh(RegisterID rt
, RegisterID rn
, RegisterID rm
, ExtendType extend
, int amount
)
1919 ASSERT(!amount
|| amount
== 1);
1920 insn(loadStoreRegisterRegisterOffset(MemOpSize_16
, false, MemOp_STORE
, rm
, extend
, amount
== 1, rn
, rt
));
1923 ALWAYS_INLINE
void strh(RegisterID rt
, RegisterID rn
, unsigned pimm
)
1925 insn(loadStoreRegisterUnsignedImmediate(MemOpSize_16
, false, MemOp_STORE
, encodePositiveImmediate
<16>(pimm
), rn
, rt
));
1928 ALWAYS_INLINE
void strh(RegisterID rt
, RegisterID rn
, PostIndex simm
)
1930 insn(loadStoreRegisterPostIndex(MemOpSize_16
, false, MemOp_STORE
, simm
, rn
, rt
));
1933 ALWAYS_INLINE
void strh(RegisterID rt
, RegisterID rn
, PreIndex simm
)
1935 insn(loadStoreRegisterPreIndex(MemOpSize_16
, false, MemOp_STORE
, simm
, rn
, rt
));
1938 template<int datasize
>
1939 ALWAYS_INLINE
void stur(RegisterID rt
, RegisterID rn
, int simm
)
1942 insn(loadStoreRegisterUnscaledImmediate(MEMOPSIZE
, false, MemOp_STORE
, simm
, rn
, rt
));
1945 ALWAYS_INLINE
void sturb(RegisterID rt
, RegisterID rn
, int simm
)
1947 insn(loadStoreRegisterUnscaledImmediate(MemOpSize_8_or_128
, false, MemOp_STORE
, simm
, rn
, rt
));
1950 ALWAYS_INLINE
void sturh(RegisterID rt
, RegisterID rn
, int simm
)
1952 insn(loadStoreRegisterUnscaledImmediate(MemOpSize_16
, false, MemOp_STORE
, simm
, rn
, rt
));
1955 template<int datasize
, SetFlags setFlags
= DontSetFlags
>
1956 ALWAYS_INLINE
void sub(RegisterID rd
, RegisterID rn
, UInt12 imm12
, int shift
= 0)
1959 ASSERT(!shift
|| shift
== 12);
1960 insn(addSubtractImmediate(DATASIZE
, AddOp_SUB
, setFlags
, shift
== 12, imm12
, rn
, rd
));
1963 template<int datasize
, SetFlags setFlags
= DontSetFlags
>
1964 ALWAYS_INLINE
void sub(RegisterID rd
, RegisterID rn
, RegisterID rm
)
1966 ASSERT_WITH_MESSAGE(!isSp(rd
) || setFlags
== DontSetFlags
, "SUBS with shifted register does not support SP for Xd, it uses XZR for the register 31. SUBS with extended register support SP for Xd, but only if SetFlag is not used, otherwise register 31 is Xd.");
1967 ASSERT_WITH_MESSAGE(!isSp(rm
), "No encoding of SUBS supports SP for the third operand.");
1969 if (isSp(rd
) || isSp(rn
))
1970 sub
<datasize
, setFlags
>(rd
, rn
, rm
, UXTX
, 0);
1972 sub
<datasize
, setFlags
>(rd
, rn
, rm
, LSL
, 0);
1975 template<int datasize
, SetFlags setFlags
= DontSetFlags
>
1976 ALWAYS_INLINE
void sub(RegisterID rd
, RegisterID rn
, RegisterID rm
, ExtendType extend
, int amount
)
1979 insn(addSubtractExtendedRegister(DATASIZE
, AddOp_SUB
, setFlags
, rm
, extend
, amount
, rn
, rd
));
1982 template<int datasize
, SetFlags setFlags
= DontSetFlags
>
1983 ALWAYS_INLINE
void sub(RegisterID rd
, RegisterID rn
, RegisterID rm
, ShiftType shift
, int amount
)
1986 ASSERT(!isSp(rd
) && !isSp(rn
) && !isSp(rm
));
1987 insn(addSubtractShiftedRegister(DATASIZE
, AddOp_SUB
, setFlags
, shift
, rm
, amount
, rn
, rd
));
1990 template<int datasize
>
1991 ALWAYS_INLINE
void sxtb(RegisterID rd
, RegisterID rn
)
1993 sbfm
<datasize
>(rd
, rn
, 0, 7);
1996 template<int datasize
>
1997 ALWAYS_INLINE
void sxth(RegisterID rd
, RegisterID rn
)
1999 sbfm
<datasize
>(rd
, rn
, 0, 15);
2002 ALWAYS_INLINE
void sxtw(RegisterID rd
, RegisterID rn
)
2004 sbfm
<64>(rd
, rn
, 0, 31);
2007 ALWAYS_INLINE
void tbz(RegisterID rt
, int imm
, int offset
= 0)
2009 ASSERT(!(offset
& 3));
2011 insn(testAndBranchImmediate(false, imm
, offset
, rt
));
2014 ALWAYS_INLINE
void tbnz(RegisterID rt
, int imm
, int offset
= 0)
2016 ASSERT(!(offset
& 3));
2018 insn(testAndBranchImmediate(true, imm
, offset
, rt
));
2021 template<int datasize
>
2022 ALWAYS_INLINE
void tst(RegisterID rn
, RegisterID rm
)
2024 and_
<datasize
, S
>(ARM64Registers::zr
, rn
, rm
);
2027 template<int datasize
>
2028 ALWAYS_INLINE
void tst(RegisterID rn
, RegisterID rm
, ShiftType shift
, int amount
)
2030 and_
<datasize
, S
>(ARM64Registers::zr
, rn
, rm
, shift
, amount
);
2033 template<int datasize
>
2034 ALWAYS_INLINE
void tst(RegisterID rn
, LogicalImmediate imm
)
2036 and_
<datasize
, S
>(ARM64Registers::zr
, rn
, imm
);
2039 template<int datasize
>
2040 ALWAYS_INLINE
void ubfiz(RegisterID rd
, RegisterID rn
, int lsb
, int width
)
2042 ubfm
<datasize
>(rd
, rn
, (datasize
- lsb
) & (datasize
- 1), width
- 1);
2045 template<int datasize
>
2046 ALWAYS_INLINE
void ubfm(RegisterID rd
, RegisterID rn
, int immr
, int imms
)
2049 insn(bitfield(DATASIZE
, BitfieldOp_UBFM
, immr
, imms
, rn
, rd
));
2052 template<int datasize
>
2053 ALWAYS_INLINE
void ubfx(RegisterID rd
, RegisterID rn
, int lsb
, int width
)
2055 ubfm
<datasize
>(rd
, rn
, lsb
, lsb
+ width
- 1);
2058 template<int datasize
>
2059 ALWAYS_INLINE
void udiv(RegisterID rd
, RegisterID rn
, RegisterID rm
)
2062 insn(dataProcessing2Source(DATASIZE
, rm
, DataOp_UDIV
, rn
, rd
));
2065 ALWAYS_INLINE
void umaddl(RegisterID rd
, RegisterID rn
, RegisterID rm
, RegisterID ra
)
2067 nopCortexA53Fix835769
<64>();
2068 insn(dataProcessing3Source(Datasize_64
, DataOp_UMADDL
, rm
, ra
, rn
, rd
));
2071 ALWAYS_INLINE
void umnegl(RegisterID rd
, RegisterID rn
, RegisterID rm
)
2073 umsubl(rd
, rn
, rm
, ARM64Registers::zr
);
2076 ALWAYS_INLINE
void umsubl(RegisterID rd
, RegisterID rn
, RegisterID rm
, RegisterID ra
)
2078 nopCortexA53Fix835769
<64>();
2079 insn(dataProcessing3Source(Datasize_64
, DataOp_UMSUBL
, rm
, ra
, rn
, rd
));
2082 ALWAYS_INLINE
void umulh(RegisterID rd
, RegisterID rn
, RegisterID rm
)
2084 insn(dataProcessing3Source(Datasize_64
, DataOp_UMULH
, rm
, ARM64Registers::zr
, rn
, rd
));
2087 ALWAYS_INLINE
void umull(RegisterID rd
, RegisterID rn
, RegisterID rm
)
2089 umaddl(rd
, rn
, rm
, ARM64Registers::zr
);
2092 template<int datasize
>
2093 ALWAYS_INLINE
void uxtb(RegisterID rd
, RegisterID rn
)
2095 ubfm
<datasize
>(rd
, rn
, 0, 7);
2098 template<int datasize
>
2099 ALWAYS_INLINE
void uxth(RegisterID rd
, RegisterID rn
)
2101 ubfm
<datasize
>(rd
, rn
, 0, 15);
2104 ALWAYS_INLINE
void uxtw(RegisterID rd
, RegisterID rn
)
2106 ubfm
<64>(rd
, rn
, 0, 31);
2109 // Floating Point Instructions:
2111 template<int datasize
>
2112 ALWAYS_INLINE
void fabs(FPRegisterID vd
, FPRegisterID vn
)
2115 insn(floatingPointDataProcessing1Source(DATASIZE
, FPDataOp_FABS
, vn
, vd
));
2118 template<int datasize
>
2119 ALWAYS_INLINE
void fadd(FPRegisterID vd
, FPRegisterID vn
, FPRegisterID vm
)
2122 insn(floatingPointDataProcessing2Source(DATASIZE
, vm
, FPDataOp_FADD
, vn
, vd
));
2125 template<int datasize
>
2126 ALWAYS_INLINE
void fccmp(FPRegisterID vn
, FPRegisterID vm
, int nzcv
, Condition cond
)
2129 insn(floatingPointConditionalCompare(DATASIZE
, vm
, cond
, vn
, FPCondCmpOp_FCMP
, nzcv
));
2132 template<int datasize
>
2133 ALWAYS_INLINE
void fccmpe(FPRegisterID vn
, FPRegisterID vm
, int nzcv
, Condition cond
)
2136 insn(floatingPointConditionalCompare(DATASIZE
, vm
, cond
, vn
, FPCondCmpOp_FCMPE
, nzcv
));
2139 template<int datasize
>
2140 ALWAYS_INLINE
void fcmp(FPRegisterID vn
, FPRegisterID vm
)
2143 insn(floatingPointCompare(DATASIZE
, vm
, vn
, FPCmpOp_FCMP
));
2146 template<int datasize
>
2147 ALWAYS_INLINE
void fcmp_0(FPRegisterID vn
)
2150 insn(floatingPointCompare(DATASIZE
, static_cast<FPRegisterID
>(0), vn
, FPCmpOp_FCMP0
));
2153 template<int datasize
>
2154 ALWAYS_INLINE
void fcmpe(FPRegisterID vn
, FPRegisterID vm
)
2157 insn(floatingPointCompare(DATASIZE
, vm
, vn
, FPCmpOp_FCMPE
));
2160 template<int datasize
>
2161 ALWAYS_INLINE
void fcmpe_0(FPRegisterID vn
)
2164 insn(floatingPointCompare(DATASIZE
, static_cast<FPRegisterID
>(0), vn
, FPCmpOp_FCMPE0
));
2167 template<int datasize
>
2168 ALWAYS_INLINE
void fcsel(FPRegisterID vd
, FPRegisterID vn
, FPRegisterID vm
, Condition cond
)
2171 insn(floatingPointConditionalSelect(DATASIZE
, vm
, cond
, vn
, vd
));
2174 template<int dstsize
, int srcsize
>
2175 ALWAYS_INLINE
void fcvt(FPRegisterID vd
, FPRegisterID vn
)
2177 ASSERT(dstsize
== 16 || dstsize
== 32 || dstsize
== 64);
2178 ASSERT(srcsize
== 16 || srcsize
== 32 || srcsize
== 64);
2179 ASSERT(dstsize
!= srcsize
);
2180 Datasize type
= (srcsize
== 64) ? Datasize_64
: (srcsize
== 32) ? Datasize_32
: Datasize_16
;
2181 FPDataOp1Source opcode
= (dstsize
== 64) ? FPDataOp_FCVT_toDouble
: (dstsize
== 32) ? FPDataOp_FCVT_toSingle
: FPDataOp_FCVT_toHalf
;
2182 insn(floatingPointDataProcessing1Source(type
, opcode
, vn
, vd
));
2185 template<int dstsize
, int srcsize
>
2186 ALWAYS_INLINE
void fcvtas(RegisterID rd
, FPRegisterID vn
)
2188 CHECK_DATASIZE_OF(dstsize
);
2189 CHECK_DATASIZE_OF(srcsize
);
2190 insn(floatingPointIntegerConversions(DATASIZE_OF(dstsize
), DATASIZE_OF(srcsize
), FPIntConvOp_FCVTAS
, vn
, rd
));
2193 template<int dstsize
, int srcsize
>
2194 ALWAYS_INLINE
void fcvtau(RegisterID rd
, FPRegisterID vn
)
2196 CHECK_DATASIZE_OF(dstsize
);
2197 CHECK_DATASIZE_OF(srcsize
);
2198 insn(floatingPointIntegerConversions(DATASIZE_OF(dstsize
), DATASIZE_OF(srcsize
), FPIntConvOp_FCVTAU
, vn
, rd
));
2201 template<int dstsize
, int srcsize
>
2202 ALWAYS_INLINE
void fcvtms(RegisterID rd
, FPRegisterID vn
)
2204 CHECK_DATASIZE_OF(dstsize
);
2205 CHECK_DATASIZE_OF(srcsize
);
2206 insn(floatingPointIntegerConversions(DATASIZE_OF(dstsize
), DATASIZE_OF(srcsize
), FPIntConvOp_FCVTMS
, vn
, rd
));
2209 template<int dstsize
, int srcsize
>
2210 ALWAYS_INLINE
void fcvtmu(RegisterID rd
, FPRegisterID vn
)
2212 CHECK_DATASIZE_OF(dstsize
);
2213 CHECK_DATASIZE_OF(srcsize
);
2214 insn(floatingPointIntegerConversions(DATASIZE_OF(dstsize
), DATASIZE_OF(srcsize
), FPIntConvOp_FCVTMU
, vn
, rd
));
2217 template<int dstsize
, int srcsize
>
2218 ALWAYS_INLINE
void fcvtns(RegisterID rd
, FPRegisterID vn
)
2220 CHECK_DATASIZE_OF(dstsize
);
2221 CHECK_DATASIZE_OF(srcsize
);
2222 insn(floatingPointIntegerConversions(DATASIZE_OF(dstsize
), DATASIZE_OF(srcsize
), FPIntConvOp_FCVTNS
, vn
, rd
));
2225 template<int dstsize
, int srcsize
>
2226 ALWAYS_INLINE
void fcvtnu(RegisterID rd
, FPRegisterID vn
)
2228 CHECK_DATASIZE_OF(dstsize
);
2229 CHECK_DATASIZE_OF(srcsize
);
2230 insn(floatingPointIntegerConversions(DATASIZE_OF(dstsize
), DATASIZE_OF(srcsize
), FPIntConvOp_FCVTNU
, vn
, rd
));
2233 template<int dstsize
, int srcsize
>
2234 ALWAYS_INLINE
void fcvtps(RegisterID rd
, FPRegisterID vn
)
2236 CHECK_DATASIZE_OF(dstsize
);
2237 CHECK_DATASIZE_OF(srcsize
);
2238 insn(floatingPointIntegerConversions(DATASIZE_OF(dstsize
), DATASIZE_OF(srcsize
), FPIntConvOp_FCVTPS
, vn
, rd
));
2241 template<int dstsize
, int srcsize
>
2242 ALWAYS_INLINE
void fcvtpu(RegisterID rd
, FPRegisterID vn
)
2244 CHECK_DATASIZE_OF(dstsize
);
2245 CHECK_DATASIZE_OF(srcsize
);
2246 insn(floatingPointIntegerConversions(DATASIZE_OF(dstsize
), DATASIZE_OF(srcsize
), FPIntConvOp_FCVTPU
, vn
, rd
));
2249 template<int dstsize
, int srcsize
>
2250 ALWAYS_INLINE
void fcvtzs(RegisterID rd
, FPRegisterID vn
)
2252 CHECK_DATASIZE_OF(dstsize
);
2253 CHECK_DATASIZE_OF(srcsize
);
2254 insn(floatingPointIntegerConversions(DATASIZE_OF(dstsize
), DATASIZE_OF(srcsize
), FPIntConvOp_FCVTZS
, vn
, rd
));
2257 template<int dstsize
, int srcsize
>
2258 ALWAYS_INLINE
void fcvtzu(RegisterID rd
, FPRegisterID vn
)
2260 CHECK_DATASIZE_OF(dstsize
);
2261 CHECK_DATASIZE_OF(srcsize
);
2262 insn(floatingPointIntegerConversions(DATASIZE_OF(dstsize
), DATASIZE_OF(srcsize
), FPIntConvOp_FCVTZU
, vn
, rd
));
2265 template<int datasize
>
2266 ALWAYS_INLINE
void fdiv(FPRegisterID vd
, FPRegisterID vn
, FPRegisterID vm
)
2269 insn(floatingPointDataProcessing2Source(DATASIZE
, vm
, FPDataOp_FDIV
, vn
, vd
));
2272 template<int datasize
>
2273 ALWAYS_INLINE
void fmadd(FPRegisterID vd
, FPRegisterID vn
, FPRegisterID vm
, FPRegisterID va
)
2276 insn(floatingPointDataProcessing3Source(DATASIZE
, false, vm
, AddOp_ADD
, va
, vn
, vd
));
2279 template<int datasize
>
2280 ALWAYS_INLINE
void fmax(FPRegisterID vd
, FPRegisterID vn
, FPRegisterID vm
)
2283 insn(floatingPointDataProcessing2Source(DATASIZE
, vm
, FPDataOp_FMAX
, vn
, vd
));
2286 template<int datasize
>
2287 ALWAYS_INLINE
void fmaxnm(FPRegisterID vd
, FPRegisterID vn
, FPRegisterID vm
)
2290 insn(floatingPointDataProcessing2Source(DATASIZE
, vm
, FPDataOp_FMAXNM
, vn
, vd
));
2293 template<int datasize
>
2294 ALWAYS_INLINE
void fmin(FPRegisterID vd
, FPRegisterID vn
, FPRegisterID vm
)
2297 insn(floatingPointDataProcessing2Source(DATASIZE
, vm
, FPDataOp_FMIN
, vn
, vd
));
2300 template<int datasize
>
2301 ALWAYS_INLINE
void fminnm(FPRegisterID vd
, FPRegisterID vn
, FPRegisterID vm
)
2304 insn(floatingPointDataProcessing2Source(DATASIZE
, vm
, FPDataOp_FMINNM
, vn
, vd
));
2307 template<int datasize
>
2308 ALWAYS_INLINE
void fmov(FPRegisterID vd
, FPRegisterID vn
)
2311 insn(floatingPointDataProcessing1Source(DATASIZE
, FPDataOp_FMOV
, vn
, vd
));
2314 template<int datasize
>
2315 ALWAYS_INLINE
void fmov(FPRegisterID vd
, RegisterID rn
)
2318 insn(floatingPointIntegerConversions(DATASIZE
, DATASIZE
, FPIntConvOp_FMOV_XtoQ
, rn
, vd
));
2321 template<int datasize
>
2322 ALWAYS_INLINE
void fmov(RegisterID rd
, FPRegisterID vn
)
2325 insn(floatingPointIntegerConversions(DATASIZE
, DATASIZE
, FPIntConvOp_FMOV_QtoX
, vn
, rd
));
2328 template<int datasize
>
2329 ALWAYS_INLINE
void fmov(FPRegisterID vd
, double imm
)
2332 insn(floatingPointImmediate(DATASIZE
, encodeFPImm(imm
), vd
));
2335 ALWAYS_INLINE
void fmov_top(FPRegisterID vd
, RegisterID rn
)
2337 insn(floatingPointIntegerConversions(Datasize_64
, Datasize_64
, FPIntConvOp_FMOV_XtoQ_top
, rn
, vd
));
2340 ALWAYS_INLINE
void fmov_top(RegisterID rd
, FPRegisterID vn
)
2342 insn(floatingPointIntegerConversions(Datasize_64
, Datasize_64
, FPIntConvOp_FMOV_QtoX_top
, vn
, rd
));
2345 template<int datasize
>
2346 ALWAYS_INLINE
void fmsub(FPRegisterID vd
, FPRegisterID vn
, FPRegisterID vm
, FPRegisterID va
)
2349 insn(floatingPointDataProcessing3Source(DATASIZE
, false, vm
, AddOp_SUB
, va
, vn
, vd
));
2352 template<int datasize
>
2353 ALWAYS_INLINE
void fmul(FPRegisterID vd
, FPRegisterID vn
, FPRegisterID vm
)
2356 insn(floatingPointDataProcessing2Source(DATASIZE
, vm
, FPDataOp_FMUL
, vn
, vd
));
2359 template<int datasize
>
2360 ALWAYS_INLINE
void fneg(FPRegisterID vd
, FPRegisterID vn
)
2363 insn(floatingPointDataProcessing1Source(DATASIZE
, FPDataOp_FNEG
, vn
, vd
));
2366 template<int datasize
>
2367 ALWAYS_INLINE
void fnmadd(FPRegisterID vd
, FPRegisterID vn
, FPRegisterID vm
, FPRegisterID va
)
2370 insn(floatingPointDataProcessing3Source(DATASIZE
, true, vm
, AddOp_ADD
, va
, vn
, vd
));
2373 template<int datasize
>
2374 ALWAYS_INLINE
void fnmsub(FPRegisterID vd
, FPRegisterID vn
, FPRegisterID vm
, FPRegisterID va
)
2377 insn(floatingPointDataProcessing3Source(DATASIZE
, true, vm
, AddOp_SUB
, va
, vn
, vd
));
2380 template<int datasize
>
2381 ALWAYS_INLINE
void fnmul(FPRegisterID vd
, FPRegisterID vn
, FPRegisterID vm
)
2384 insn(floatingPointDataProcessing2Source(DATASIZE
, vm
, FPDataOp_FNMUL
, vn
, vd
));
2387 template<int datasize
>
2388 ALWAYS_INLINE
void frinta(FPRegisterID vd
, FPRegisterID vn
)
2391 insn(floatingPointDataProcessing1Source(DATASIZE
, FPDataOp_FRINTA
, vn
, vd
));
2394 template<int datasize
>
2395 ALWAYS_INLINE
void frinti(FPRegisterID vd
, FPRegisterID vn
)
2398 insn(floatingPointDataProcessing1Source(DATASIZE
, FPDataOp_FRINTI
, vn
, vd
));
2401 template<int datasize
>
2402 ALWAYS_INLINE
void frintm(FPRegisterID vd
, FPRegisterID vn
)
2405 insn(floatingPointDataProcessing1Source(DATASIZE
, FPDataOp_FRINTM
, vn
, vd
));
2408 template<int datasize
>
2409 ALWAYS_INLINE
void frintn(FPRegisterID vd
, FPRegisterID vn
)
2412 insn(floatingPointDataProcessing1Source(DATASIZE
, FPDataOp_FRINTN
, vn
, vd
));
2415 template<int datasize
>
2416 ALWAYS_INLINE
void frintp(FPRegisterID vd
, FPRegisterID vn
)
2419 insn(floatingPointDataProcessing1Source(DATASIZE
, FPDataOp_FRINTP
, vn
, vd
));
2422 template<int datasize
>
2423 ALWAYS_INLINE
void frintx(FPRegisterID vd
, FPRegisterID vn
)
2426 insn(floatingPointDataProcessing1Source(DATASIZE
, FPDataOp_FRINTX
, vn
, vd
));
2429 template<int datasize
>
2430 ALWAYS_INLINE
void frintz(FPRegisterID vd
, FPRegisterID vn
)
2433 insn(floatingPointDataProcessing1Source(DATASIZE
, FPDataOp_FRINTZ
, vn
, vd
));
2436 template<int datasize
>
2437 ALWAYS_INLINE
void fsqrt(FPRegisterID vd
, FPRegisterID vn
)
2440 insn(floatingPointDataProcessing1Source(DATASIZE
, FPDataOp_FSQRT
, vn
, vd
));
2443 template<int datasize
>
2444 ALWAYS_INLINE
void fsub(FPRegisterID vd
, FPRegisterID vn
, FPRegisterID vm
)
2447 insn(floatingPointDataProcessing2Source(DATASIZE
, vm
, FPDataOp_FSUB
, vn
, vd
));
2450 template<int datasize
>
2451 ALWAYS_INLINE
void ldr(FPRegisterID rt
, RegisterID rn
, RegisterID rm
)
2453 ldr
<datasize
>(rt
, rn
, rm
, UXTX
, 0);
2456 template<int datasize
>
2457 ALWAYS_INLINE
void ldr(FPRegisterID rt
, RegisterID rn
, RegisterID rm
, ExtendType extend
, int amount
)
2459 CHECK_FP_MEMOP_DATASIZE();
2460 insn(loadStoreRegisterRegisterOffset(MEMOPSIZE
, true, datasize
== 128 ? MemOp_LOAD_V128
: MemOp_LOAD
, rm
, extend
, encodeShiftAmount
<datasize
>(amount
), rn
, rt
));
2463 template<int datasize
>
2464 ALWAYS_INLINE
void ldr(FPRegisterID rt
, RegisterID rn
, unsigned pimm
)
2466 CHECK_FP_MEMOP_DATASIZE();
2467 insn(loadStoreRegisterUnsignedImmediate(MEMOPSIZE
, true, datasize
== 128 ? MemOp_LOAD_V128
: MemOp_LOAD
, encodePositiveImmediate
<datasize
>(pimm
), rn
, rt
));
2470 template<int datasize
>
2471 ALWAYS_INLINE
void ldr(FPRegisterID rt
, RegisterID rn
, PostIndex simm
)
2473 CHECK_FP_MEMOP_DATASIZE();
2474 insn(loadStoreRegisterPostIndex(MEMOPSIZE
, true, datasize
== 128 ? MemOp_LOAD_V128
: MemOp_LOAD
, simm
, rn
, rt
));
2477 template<int datasize
>
2478 ALWAYS_INLINE
void ldr(FPRegisterID rt
, RegisterID rn
, PreIndex simm
)
2480 CHECK_FP_MEMOP_DATASIZE();
2481 insn(loadStoreRegisterPreIndex(MEMOPSIZE
, true, datasize
== 128 ? MemOp_LOAD_V128
: MemOp_LOAD
, simm
, rn
, rt
));
2484 template<int datasize
>
2485 ALWAYS_INLINE
void ldr_literal(FPRegisterID rt
, int offset
= 0)
2487 CHECK_FP_MEMOP_DATASIZE();
2488 ASSERT(datasize
>= 32);
2489 ASSERT(!(offset
& 3));
2490 insn(loadRegisterLiteral(datasize
== 128 ? LdrLiteralOp_128BIT
: datasize
== 64 ? LdrLiteralOp_64BIT
: LdrLiteralOp_32BIT
, true, offset
>> 2, rt
));
2493 template<int datasize
>
2494 ALWAYS_INLINE
void ldur(FPRegisterID rt
, RegisterID rn
, int simm
)
2496 CHECK_FP_MEMOP_DATASIZE();
2497 insn(loadStoreRegisterUnscaledImmediate(MEMOPSIZE
, true, datasize
== 128 ? MemOp_LOAD_V128
: MemOp_LOAD
, simm
, rn
, rt
));
2500 template<int dstsize
, int srcsize
>
2501 ALWAYS_INLINE
void scvtf(FPRegisterID vd
, RegisterID rn
)
2503 CHECK_DATASIZE_OF(dstsize
);
2504 CHECK_DATASIZE_OF(srcsize
);
2505 insn(floatingPointIntegerConversions(DATASIZE_OF(srcsize
), DATASIZE_OF(dstsize
), FPIntConvOp_SCVTF
, rn
, vd
));
2508 template<int datasize
>
2509 ALWAYS_INLINE
void str(FPRegisterID rt
, RegisterID rn
, RegisterID rm
)
2511 str
<datasize
>(rt
, rn
, rm
, UXTX
, 0);
2514 template<int datasize
>
2515 ALWAYS_INLINE
void str(FPRegisterID rt
, RegisterID rn
, RegisterID rm
, ExtendType extend
, int amount
)
2517 CHECK_FP_MEMOP_DATASIZE();
2518 insn(loadStoreRegisterRegisterOffset(MEMOPSIZE
, true, datasize
== 128 ? MemOp_STORE_V128
: MemOp_STORE
, rm
, extend
, encodeShiftAmount
<datasize
>(amount
), rn
, rt
));
2521 template<int datasize
>
2522 ALWAYS_INLINE
void str(FPRegisterID rt
, RegisterID rn
, unsigned pimm
)
2524 CHECK_FP_MEMOP_DATASIZE();
2525 insn(loadStoreRegisterUnsignedImmediate(MEMOPSIZE
, true, datasize
== 128 ? MemOp_STORE_V128
: MemOp_STORE
, encodePositiveImmediate
<datasize
>(pimm
), rn
, rt
));
2528 template<int datasize
>
2529 ALWAYS_INLINE
void str(FPRegisterID rt
, RegisterID rn
, PostIndex simm
)
2531 CHECK_FP_MEMOP_DATASIZE();
2532 insn(loadStoreRegisterPostIndex(MEMOPSIZE
, true, datasize
== 128 ? MemOp_STORE_V128
: MemOp_STORE
, simm
, rn
, rt
));
2535 template<int datasize
>
2536 ALWAYS_INLINE
void str(FPRegisterID rt
, RegisterID rn
, PreIndex simm
)
2538 CHECK_FP_MEMOP_DATASIZE();
2539 insn(loadStoreRegisterPreIndex(MEMOPSIZE
, true, datasize
== 128 ? MemOp_STORE_V128
: MemOp_STORE
, simm
, rn
, rt
));
2542 template<int datasize
>
2543 ALWAYS_INLINE
void stur(FPRegisterID rt
, RegisterID rn
, int simm
)
2546 insn(loadStoreRegisterUnscaledImmediate(MEMOPSIZE
, true, datasize
== 128 ? MemOp_STORE_V128
: MemOp_STORE
, simm
, rn
, rt
));
2549 template<int dstsize
, int srcsize
>
2550 ALWAYS_INLINE
void ucvtf(FPRegisterID vd
, RegisterID rn
)
2552 CHECK_DATASIZE_OF(dstsize
);
2553 CHECK_DATASIZE_OF(srcsize
);
2554 insn(floatingPointIntegerConversions(DATASIZE_OF(srcsize
), DATASIZE_OF(dstsize
), FPIntConvOp_UCVTF
, rn
, vd
));
2559 AssemblerLabel
labelIgnoringWatchpoints()
2561 return m_buffer
.label();
2564 AssemblerLabel
labelForWatchpoint()
2566 AssemblerLabel result
= m_buffer
.label();
2567 if (static_cast<int>(result
.m_offset
) != m_indexOfLastWatchpoint
)
2569 m_indexOfLastWatchpoint
= result
.m_offset
;
2570 m_indexOfTailOfLastWatchpoint
= result
.m_offset
+ maxJumpReplacementSize();
2574 AssemblerLabel
label()
2576 AssemblerLabel result
= m_buffer
.label();
2577 while (UNLIKELY(static_cast<int>(result
.m_offset
) < m_indexOfTailOfLastWatchpoint
)) {
2579 result
= m_buffer
.label();
2584 AssemblerLabel
align(int alignment
)
2586 ASSERT(!(alignment
& 3));
2587 while (!m_buffer
.isAligned(alignment
))
2592 static void* getRelocatedAddress(void* code
, AssemblerLabel label
)
2594 ASSERT(label
.isSet());
2595 return reinterpret_cast<void*>(reinterpret_cast<ptrdiff_t>(code
) + label
.m_offset
);
2598 static int getDifferenceBetweenLabels(AssemblerLabel a
, AssemblerLabel b
)
2600 return b
.m_offset
- a
.m_offset
;
2603 void* unlinkedCode() { return m_buffer
.data(); }
2604 size_t codeSize() const { return m_buffer
.codeSize(); }
2606 static unsigned getCallReturnOffset(AssemblerLabel call
)
2608 ASSERT(call
.isSet());
2609 return call
.m_offset
;
2612 // Linking & patching:
2614 // 'link' and 'patch' methods are for use on unprotected code - such as the code
2615 // within the AssemblerBuffer, and code being patched by the patch buffer. Once
2616 // code has been finalized it is (platform support permitting) within a non-
2617 // writable region of memory; to modify the code in an execute-only execuable
2618 // pool the 'repatch' and 'relink' methods should be used.
2620 void linkJump(AssemblerLabel from
, AssemblerLabel to
, JumpType type
, Condition condition
)
2623 ASSERT(from
.isSet());
2624 m_jumpsToLink
.append(LinkRecord(from
.m_offset
, to
.m_offset
, type
, condition
));
2627 void linkJump(AssemblerLabel from
, AssemblerLabel to
, JumpType type
, Condition condition
, bool is64Bit
, RegisterID compareRegister
)
2630 ASSERT(from
.isSet());
2631 m_jumpsToLink
.append(LinkRecord(from
.m_offset
, to
.m_offset
, type
, condition
, is64Bit
, compareRegister
));
2634 void linkJump(AssemblerLabel from
, AssemblerLabel to
, JumpType type
, Condition condition
, unsigned bitNumber
, RegisterID compareRegister
)
2637 ASSERT(from
.isSet());
2638 m_jumpsToLink
.append(LinkRecord(from
.m_offset
, to
.m_offset
, type
, condition
, bitNumber
, compareRegister
));
2641 void linkJump(AssemblerLabel from
, AssemblerLabel to
)
2643 ASSERT(from
.isSet());
2645 relinkJumpOrCall
<false>(addressOf(from
), addressOf(to
));
2648 static void linkJump(void* code
, AssemblerLabel from
, void* to
)
2650 ASSERT(from
.isSet());
2651 relinkJumpOrCall
<false>(addressOf(code
, from
), to
);
2654 static void linkCall(void* code
, AssemblerLabel from
, void* to
)
2656 ASSERT(from
.isSet());
2657 linkJumpOrCall
<true>(addressOf(code
, from
) - 1, to
);
2660 static void linkPointer(void* code
, AssemblerLabel where
, void* valuePtr
)
2662 linkPointer(addressOf(code
, where
), valuePtr
);
2665 static void replaceWithJump(void* where
, void* to
)
2667 intptr_t offset
= (reinterpret_cast<intptr_t>(to
) - reinterpret_cast<intptr_t>(where
)) >> 2;
2668 ASSERT(static_cast<int>(offset
) == offset
);
2669 *static_cast<int*>(where
) = unconditionalBranchImmediate(false, static_cast<int>(offset
));
2670 cacheFlush(where
, sizeof(int));
2673 static ptrdiff_t maxJumpReplacementSize()
2678 static void replaceWithLoad(void* where
)
2687 if (disassembleAddSubtractImmediate(where
, sf
, op
, S
, shift
, imm12
, rn
, rd
)) {
2688 ASSERT(sf
== Datasize_64
);
2689 ASSERT(op
== AddOp_ADD
);
2692 ASSERT(!(imm12
& ~0xff8));
2693 *static_cast<int*>(where
) = loadStoreRegisterUnsignedImmediate(MemOpSize_64
, false, MemOp_LOAD
, encodePositiveImmediate
<64>(imm12
), rn
, rd
);
2694 cacheFlush(where
, sizeof(int));
2696 #if !ASSERT_DISABLED
2704 ASSERT(disassembleLoadStoreRegisterUnsignedImmediate(where
, size
, V
, opc
, imm12
, rn
, rt
));
2705 ASSERT(size
== MemOpSize_64
);
2707 ASSERT(opc
== MemOp_LOAD
);
2708 ASSERT(!(imm12
& ~0x1ff));
2713 static void replaceWithAddressComputation(void* where
)
2721 if (disassembleLoadStoreRegisterUnsignedImmediate(where
, size
, V
, opc
, imm12
, rn
, rt
)) {
2722 ASSERT(size
== MemOpSize_64
);
2724 ASSERT(opc
== MemOp_LOAD
);
2725 ASSERT(!(imm12
& ~0x1ff));
2726 *static_cast<int*>(where
) = addSubtractImmediate(Datasize_64
, AddOp_ADD
, DontSetFlags
, 0, imm12
* sizeof(void*), rn
, rt
);
2727 cacheFlush(where
, sizeof(int));
2729 #if !ASSERT_DISABLED
2738 ASSERT(disassembleAddSubtractImmediate(where
, sf
, op
, S
, shift
, imm12
, rn
, rd
));
2739 ASSERT(sf
== Datasize_64
);
2740 ASSERT(op
== AddOp_ADD
);
2743 ASSERT(!(imm12
& ~0xff8));
2748 static void repatchPointer(void* where
, void* valuePtr
)
2750 linkPointer(static_cast<int*>(where
), valuePtr
, true);
2753 static void setPointer(int* address
, void* valuePtr
, RegisterID rd
, bool flush
)
2755 uintptr_t value
= reinterpret_cast<uintptr_t>(valuePtr
);
2756 address
[0] = moveWideImediate(Datasize_64
, MoveWideOp_Z
, 0, getHalfword(value
, 0), rd
);
2757 address
[1] = moveWideImediate(Datasize_64
, MoveWideOp_K
, 1, getHalfword(value
, 1), rd
);
2758 address
[2] = moveWideImediate(Datasize_64
, MoveWideOp_K
, 2, getHalfword(value
, 2), rd
);
2761 cacheFlush(address
, sizeof(int) * 3);
2764 static void repatchInt32(void* where
, int32_t value
)
2766 int* address
= static_cast<int*>(where
);
2773 bool expected
= disassembleMoveWideImediate(address
, sf
, opc
, hw
, imm16
, rd
);
2774 ASSERT_UNUSED(expected
, expected
&& !sf
&& (opc
== MoveWideOp_Z
|| opc
== MoveWideOp_N
) && !hw
);
2775 ASSERT(checkMovk
<Datasize_32
>(address
[1], 1, rd
));
2778 address
[0] = moveWideImediate(Datasize_32
, MoveWideOp_Z
, 0, getHalfword(value
, 0), rd
);
2779 address
[1] = moveWideImediate(Datasize_32
, MoveWideOp_K
, 1, getHalfword(value
, 1), rd
);
2781 address
[0] = moveWideImediate(Datasize_32
, MoveWideOp_N
, 0, ~getHalfword(value
, 0), rd
);
2782 address
[1] = moveWideImediate(Datasize_32
, MoveWideOp_K
, 1, getHalfword(value
, 1), rd
);
2785 cacheFlush(where
, sizeof(int) * 2);
2788 static void* readPointer(void* where
)
2790 int* address
= static_cast<int*>(where
);
2796 RegisterID rdFirst
, rd
;
2798 bool expected
= disassembleMoveWideImediate(address
, sf
, opc
, hw
, imm16
, rdFirst
);
2799 ASSERT_UNUSED(expected
, expected
&& sf
&& opc
== MoveWideOp_Z
&& !hw
);
2800 uintptr_t result
= imm16
;
2802 expected
= disassembleMoveWideImediate(address
+ 1, sf
, opc
, hw
, imm16
, rd
);
2803 ASSERT_UNUSED(expected
, expected
&& sf
&& opc
== MoveWideOp_K
&& hw
== 1 && rd
== rdFirst
);
2804 result
|= static_cast<uintptr_t>(imm16
) << 16;
2806 expected
= disassembleMoveWideImediate(address
+ 2, sf
, opc
, hw
, imm16
, rd
);
2807 ASSERT_UNUSED(expected
, expected
&& sf
&& opc
== MoveWideOp_K
&& hw
== 2 && rd
== rdFirst
);
2808 result
|= static_cast<uintptr_t>(imm16
) << 32;
2810 return reinterpret_cast<void*>(result
);
2813 static void* readCallTarget(void* from
)
2815 return readPointer(reinterpret_cast<int*>(from
) - 4);
2818 static void relinkJump(void* from
, void* to
)
2820 relinkJumpOrCall
<false>(reinterpret_cast<int*>(from
), to
);
2821 cacheFlush(from
, sizeof(int));
2824 static void relinkCall(void* from
, void* to
)
2826 relinkJumpOrCall
<true>(reinterpret_cast<int*>(from
) - 1, to
);
2827 cacheFlush(reinterpret_cast<int*>(from
) - 1, sizeof(int));
2830 static void repatchCompact(void* where
, int32_t value
)
2832 ASSERT(!(value
& ~0x3ff8));
2840 bool expected
= disassembleLoadStoreRegisterUnsignedImmediate(where
, size
, V
, opc
, imm12
, rn
, rt
);
2841 ASSERT_UNUSED(expected
, expected
&& size
>= MemOpSize_32
&& !V
&& opc
== MemOp_LOAD
); // expect 32/64 bit load to GPR.
2843 if (size
== MemOpSize_32
)
2844 imm12
= encodePositiveImmediate
<32>(value
);
2846 imm12
= encodePositiveImmediate
<64>(value
);
2847 *static_cast<int*>(where
) = loadStoreRegisterUnsignedImmediate(size
, V
, opc
, imm12
, rn
, rt
);
2849 cacheFlush(where
, sizeof(int));
2852 unsigned debugOffset() { return m_buffer
.debugOffset(); }
2854 #if OS(LINUX) && COMPILER(GCC)
2855 static inline void linuxPageFlush(uintptr_t begin
, uintptr_t end
)
2857 __builtin___clear_cache(reinterpret_cast<void*>(begin
), reinterpret_cast<void*>(end
));
2861 static void cacheFlush(void* code
, size_t size
)
2864 sys_cache_control(kCacheFunctionPrepareForExecution
, code
, size
);
2866 size_t page
= pageSize();
2867 uintptr_t current
= reinterpret_cast<uintptr_t>(code
);
2868 uintptr_t end
= current
+ size
;
2869 uintptr_t firstPageEnd
= (current
& ~(page
- 1)) + page
;
2871 if (end
<= firstPageEnd
) {
2872 linuxPageFlush(current
, end
);
2876 linuxPageFlush(current
, firstPageEnd
);
2878 for (current
= firstPageEnd
; current
+ page
< end
; current
+= page
)
2879 linuxPageFlush(current
, current
+ page
);
2881 linuxPageFlush(current
, end
);
2883 #error "The cacheFlush support is missing on this platform."
2887 // Assembler admin methods:
2889 static int jumpSizeDelta(JumpType jumpType
, JumpLinkType jumpLinkType
) { return JUMP_ENUM_SIZE(jumpType
) - JUMP_ENUM_SIZE(jumpLinkType
); }
2891 static ALWAYS_INLINE
bool linkRecordSourceComparator(const LinkRecord
& a
, const LinkRecord
& b
)
2893 return a
.from() < b
.from();
2896 static bool canCompact(JumpType jumpType
)
2898 // Fixed jumps cannot be compacted
2899 return (jumpType
== JumpNoCondition
) || (jumpType
== JumpCondition
) || (jumpType
== JumpCompareAndBranch
) || (jumpType
== JumpTestBit
);
2902 static JumpLinkType
computeJumpType(JumpType jumpType
, const uint8_t* from
, const uint8_t* to
)
2907 case JumpNoConditionFixedSize
:
2908 return LinkJumpNoCondition
;
2909 case JumpConditionFixedSize
:
2910 return LinkJumpCondition
;
2911 case JumpCompareAndBranchFixedSize
:
2912 return LinkJumpCompareAndBranch
;
2913 case JumpTestBitFixedSize
:
2914 return LinkJumpTestBit
;
2915 case JumpNoCondition
:
2916 return LinkJumpNoCondition
;
2917 case JumpCondition
: {
2918 ASSERT(!(reinterpret_cast<intptr_t>(from
) & 0x3));
2919 ASSERT(!(reinterpret_cast<intptr_t>(to
) & 0x3));
2920 intptr_t relative
= reinterpret_cast<intptr_t>(to
) - (reinterpret_cast<intptr_t>(from
));
2922 if (((relative
<< 43) >> 43) == relative
)
2923 return LinkJumpConditionDirect
;
2925 return LinkJumpCondition
;
2927 case JumpCompareAndBranch
: {
2928 ASSERT(!(reinterpret_cast<intptr_t>(from
) & 0x3));
2929 ASSERT(!(reinterpret_cast<intptr_t>(to
) & 0x3));
2930 intptr_t relative
= reinterpret_cast<intptr_t>(to
) - (reinterpret_cast<intptr_t>(from
));
2932 if (((relative
<< 43) >> 43) == relative
)
2933 return LinkJumpCompareAndBranchDirect
;
2935 return LinkJumpCompareAndBranch
;
2938 ASSERT(!(reinterpret_cast<intptr_t>(from
) & 0x3));
2939 ASSERT(!(reinterpret_cast<intptr_t>(to
) & 0x3));
2940 intptr_t relative
= reinterpret_cast<intptr_t>(to
) - (reinterpret_cast<intptr_t>(from
));
2942 if (((relative
<< 50) >> 50) == relative
)
2943 return LinkJumpTestBitDirect
;
2945 return LinkJumpTestBit
;
2948 ASSERT_NOT_REACHED();
2951 return LinkJumpNoCondition
;
2954 static JumpLinkType
computeJumpType(LinkRecord
& record
, const uint8_t* from
, const uint8_t* to
)
2956 JumpLinkType linkType
= computeJumpType(record
.type(), from
, to
);
2957 record
.setLinkType(linkType
);
2961 Vector
<LinkRecord
, 0, UnsafeVectorOverflow
>& jumpsToLink()
2963 std::sort(m_jumpsToLink
.begin(), m_jumpsToLink
.end(), linkRecordSourceComparator
);
2964 return m_jumpsToLink
;
2967 static void ALWAYS_INLINE
link(LinkRecord
& record
, uint8_t* from
, uint8_t* to
)
2969 switch (record
.linkType()) {
2970 case LinkJumpNoCondition
:
2971 linkJumpOrCall
<false>(reinterpret_cast<int*>(from
), to
);
2973 case LinkJumpConditionDirect
:
2974 linkConditionalBranch
<true>(record
.condition(), reinterpret_cast<int*>(from
), to
);
2976 case LinkJumpCondition
:
2977 linkConditionalBranch
<false>(record
.condition(), reinterpret_cast<int*>(from
) - 1, to
);
2979 case LinkJumpCompareAndBranchDirect
:
2980 linkCompareAndBranch
<true>(record
.condition(), record
.is64Bit(), record
.compareRegister(), reinterpret_cast<int*>(from
), to
);
2982 case LinkJumpCompareAndBranch
:
2983 linkCompareAndBranch
<false>(record
.condition(), record
.is64Bit(), record
.compareRegister(), reinterpret_cast<int*>(from
) - 1, to
);
2985 case LinkJumpTestBitDirect
:
2986 linkTestAndBranch
<true>(record
.condition(), record
.bitNumber(), record
.compareRegister(), reinterpret_cast<int*>(from
), to
);
2988 case LinkJumpTestBit
:
2989 linkTestAndBranch
<false>(record
.condition(), record
.bitNumber(), record
.compareRegister(), reinterpret_cast<int*>(from
) - 1, to
);
2992 ASSERT_NOT_REACHED();
2998 template<Datasize size
>
2999 static bool checkMovk(int insn
, int _hw
, RegisterID _rd
)
3006 bool expected
= disassembleMoveWideImediate(&insn
, sf
, opc
, hw
, imm16
, rd
);
3010 && opc
== MoveWideOp_K
3015 static void linkPointer(int* address
, void* valuePtr
, bool flush
= false)
3022 bool expected
= disassembleMoveWideImediate(address
, sf
, opc
, hw
, imm16
, rd
);
3023 ASSERT_UNUSED(expected
, expected
&& sf
&& opc
== MoveWideOp_Z
&& !hw
);
3024 ASSERT(checkMovk
<Datasize_64
>(address
[1], 1, rd
));
3025 ASSERT(checkMovk
<Datasize_64
>(address
[2], 2, rd
));
3027 setPointer(address
, valuePtr
, rd
, flush
);
3030 template<bool isCall
>
3031 static void linkJumpOrCall(int* from
, void* to
)
3035 bool isUnconditionalBranchImmediateOrNop
= disassembleUnconditionalBranchImmediate(from
, link
, imm26
) || disassembleNop(from
);
3037 ASSERT_UNUSED(isUnconditionalBranchImmediateOrNop
, isUnconditionalBranchImmediateOrNop
);
3038 ASSERT_UNUSED(isCall
, (link
== isCall
) || disassembleNop(from
));
3039 ASSERT(!(reinterpret_cast<intptr_t>(from
) & 3));
3040 ASSERT(!(reinterpret_cast<intptr_t>(to
) & 3));
3041 intptr_t offset
= (reinterpret_cast<intptr_t>(to
) - reinterpret_cast<intptr_t>(from
)) >> 2;
3042 ASSERT(static_cast<int>(offset
) == offset
);
3044 *from
= unconditionalBranchImmediate(isCall
, static_cast<int>(offset
));
3047 template<bool isDirect
>
3048 static void linkCompareAndBranch(Condition condition
, bool is64Bit
, RegisterID rt
, int* from
, void* to
)
3050 ASSERT(!(reinterpret_cast<intptr_t>(from
) & 3));
3051 ASSERT(!(reinterpret_cast<intptr_t>(to
) & 3));
3052 intptr_t offset
= (reinterpret_cast<intptr_t>(to
) - reinterpret_cast<intptr_t>(from
)) >> 2;
3053 ASSERT(((offset
<< 38) >> 38) == offset
);
3055 bool useDirect
= ((offset
<< 45) >> 45) == offset
; // Fits in 19 bits
3056 ASSERT(!isDirect
|| useDirect
);
3058 if (useDirect
|| isDirect
) {
3059 *from
= compareAndBranchImmediate(is64Bit
? Datasize_64
: Datasize_32
, condition
== ConditionNE
, static_cast<int>(offset
), rt
);
3061 *(from
+ 1) = nopPseudo();
3063 *from
= compareAndBranchImmediate(is64Bit
? Datasize_64
: Datasize_32
, invert(condition
) == ConditionNE
, 2, rt
);
3064 linkJumpOrCall
<false>(from
+ 1, to
);
3068 template<bool isDirect
>
3069 static void linkConditionalBranch(Condition condition
, int* from
, void* to
)
3071 ASSERT(!(reinterpret_cast<intptr_t>(from
) & 3));
3072 ASSERT(!(reinterpret_cast<intptr_t>(to
) & 3));
3073 intptr_t offset
= (reinterpret_cast<intptr_t>(to
) - reinterpret_cast<intptr_t>(from
)) >> 2;
3074 ASSERT(((offset
<< 38) >> 38) == offset
);
3076 bool useDirect
= ((offset
<< 45) >> 45) == offset
; // Fits in 19 bits
3077 ASSERT(!isDirect
|| useDirect
);
3079 if (useDirect
|| isDirect
) {
3080 *from
= conditionalBranchImmediate(static_cast<int>(offset
), condition
);
3082 *(from
+ 1) = nopPseudo();
3084 *from
= conditionalBranchImmediate(2, invert(condition
));
3085 linkJumpOrCall
<false>(from
+ 1, to
);
3089 template<bool isDirect
>
3090 static void linkTestAndBranch(Condition condition
, unsigned bitNumber
, RegisterID rt
, int* from
, void* to
)
3092 ASSERT(!(reinterpret_cast<intptr_t>(from
) & 3));
3093 ASSERT(!(reinterpret_cast<intptr_t>(to
) & 3));
3094 intptr_t offset
= (reinterpret_cast<intptr_t>(to
) - reinterpret_cast<intptr_t>(from
)) >> 2;
3095 ASSERT(static_cast<int>(offset
) == offset
);
3096 ASSERT(((offset
<< 38) >> 38) == offset
);
3098 bool useDirect
= ((offset
<< 50) >> 50) == offset
; // Fits in 14 bits
3099 ASSERT(!isDirect
|| useDirect
);
3101 if (useDirect
|| isDirect
) {
3102 *from
= testAndBranchImmediate(condition
== ConditionNE
, static_cast<int>(bitNumber
), static_cast<int>(offset
), rt
);
3104 *(from
+ 1) = nopPseudo();
3106 *from
= testAndBranchImmediate(invert(condition
) == ConditionNE
, static_cast<int>(bitNumber
), 2, rt
);
3107 linkJumpOrCall
<false>(from
+ 1, to
);
3111 template<bool isCall
>
3112 static void relinkJumpOrCall(int* from
, void* to
)
3114 if (!isCall
&& disassembleNop(from
)) {
3117 Condition condition
;
3118 bool isConditionalBranchImmediate
= disassembleConditionalBranchImmediate(from
- 1, op01
, imm19
, condition
);
3120 if (isConditionalBranchImmediate
) {
3121 ASSERT_UNUSED(op01
, !op01
);
3122 ASSERT_UNUSED(isCall
, !isCall
);
3125 condition
= invert(condition
);
3127 linkConditionalBranch
<false>(condition
, from
- 1, to
);
3134 bool isCompareAndBranchImmediate
= disassembleCompareAndBranchImmediate(from
- 1, opSize
, op
, imm19
, rt
);
3136 if (isCompareAndBranchImmediate
) {
3140 linkCompareAndBranch
<false>(op
? ConditionNE
: ConditionEQ
, opSize
== Datasize_64
, rt
, from
- 1, to
);
3146 bool isTestAndBranchImmediate
= disassembleTestAndBranchImmediate(from
- 1, op
, bitNumber
, imm14
, rt
);
3148 if (isTestAndBranchImmediate
) {
3152 linkTestAndBranch
<false>(op
? ConditionNE
: ConditionEQ
, bitNumber
, rt
, from
- 1, to
);
3157 linkJumpOrCall
<isCall
>(from
, to
);
3160 static int* addressOf(void* code
, AssemblerLabel label
)
3162 return reinterpret_cast<int*>(static_cast<char*>(code
) + label
.m_offset
);
3165 int* addressOf(AssemblerLabel label
)
3167 return addressOf(m_buffer
.data(), label
);
3170 static RegisterID
disassembleXOrSp(int reg
) { return reg
== 31 ? ARM64Registers::sp
: static_cast<RegisterID
>(reg
); }
3171 static RegisterID
disassembleXOrZr(int reg
) { return reg
== 31 ? ARM64Registers::zr
: static_cast<RegisterID
>(reg
); }
3172 static RegisterID
disassembleXOrZrOrSp(bool useZr
, int reg
) { return reg
== 31 ? (useZr
? ARM64Registers::zr
: ARM64Registers::sp
) : static_cast<RegisterID
>(reg
); }
3174 static bool disassembleAddSubtractImmediate(void* address
, Datasize
& sf
, AddOp
& op
, SetFlags
& S
, int& shift
, int& imm12
, RegisterID
& rn
, RegisterID
& rd
)
3176 int insn
= *static_cast<int*>(address
);
3177 sf
= static_cast<Datasize
>((insn
>> 31) & 1);
3178 op
= static_cast<AddOp
>((insn
>> 30) & 1);
3179 S
= static_cast<SetFlags
>((insn
>> 29) & 1);
3180 shift
= (insn
>> 22) & 3;
3181 imm12
= (insn
>> 10) & 0x3ff;
3182 rn
= disassembleXOrSp((insn
>> 5) & 0x1f);
3183 rd
= disassembleXOrZrOrSp(S
, insn
& 0x1f);
3184 return (insn
& 0x1f000000) == 0x11000000;
3187 static bool disassembleLoadStoreRegisterUnsignedImmediate(void* address
, MemOpSize
& size
, bool& V
, MemOp
& opc
, int& imm12
, RegisterID
& rn
, RegisterID
& rt
)
3189 int insn
= *static_cast<int*>(address
);
3190 size
= static_cast<MemOpSize
>((insn
>> 30) & 3);
3191 V
= (insn
>> 26) & 1;
3192 opc
= static_cast<MemOp
>((insn
>> 22) & 3);
3193 imm12
= (insn
>> 10) & 0xfff;
3194 rn
= disassembleXOrSp((insn
>> 5) & 0x1f);
3195 rt
= disassembleXOrZr(insn
& 0x1f);
3196 return (insn
& 0x3b000000) == 0x39000000;
3199 static bool disassembleMoveWideImediate(void* address
, Datasize
& sf
, MoveWideOp
& opc
, int& hw
, uint16_t& imm16
, RegisterID
& rd
)
3201 int insn
= *static_cast<int*>(address
);
3202 sf
= static_cast<Datasize
>((insn
>> 31) & 1);
3203 opc
= static_cast<MoveWideOp
>((insn
>> 29) & 3);
3204 hw
= (insn
>> 21) & 3;
3206 rd
= disassembleXOrZr(insn
& 0x1f);
3207 return (insn
& 0x1f800000) == 0x12800000;
3210 static bool disassembleNop(void* address
)
3212 unsigned insn
= *static_cast<unsigned*>(address
);
3213 return insn
== 0xd503201f;
3216 static bool disassembleCompareAndBranchImmediate(void* address
, Datasize
& sf
, bool& op
, int& imm19
, RegisterID
& rt
)
3218 int insn
= *static_cast<int*>(address
);
3219 sf
= static_cast<Datasize
>((insn
>> 31) & 1);
3220 op
= (insn
>> 24) & 0x1;
3221 imm19
= (insn
<< 8) >> 13;
3222 rt
= static_cast<RegisterID
>(insn
& 0x1f);
3223 return (insn
& 0x7e000000) == 0x34000000;
3227 static bool disassembleConditionalBranchImmediate(void* address
, unsigned& op01
, int& imm19
, Condition
&condition
)
3229 int insn
= *static_cast<int*>(address
);
3230 op01
= ((insn
>> 23) & 0x2) | ((insn
>> 4) & 0x1);
3231 imm19
= (insn
<< 8) >> 13;
3232 condition
= static_cast<Condition
>(insn
& 0xf);
3233 return (insn
& 0xfe000000) == 0x54000000;
3236 static bool disassembleTestAndBranchImmediate(void* address
, bool& op
, unsigned& bitNumber
, int& imm14
, RegisterID
& rt
)
3238 int insn
= *static_cast<int*>(address
);
3239 op
= (insn
>> 24) & 0x1;
3240 imm14
= (insn
<< 13) >> 18;
3241 bitNumber
= static_cast<unsigned>((((insn
>> 26) & 0x20)) | ((insn
>> 19) & 0x1f));
3242 rt
= static_cast<RegisterID
>(insn
& 0x1f);
3243 return (insn
& 0x7e000000) == 0x36000000;
3247 static bool disassembleUnconditionalBranchImmediate(void* address
, bool& op
, int& imm26
)
3249 int insn
= *static_cast<int*>(address
);
3250 op
= (insn
>> 31) & 1;
3251 imm26
= (insn
<< 6) >> 6;
3252 return (insn
& 0x7c000000) == 0x14000000;
3255 static int xOrSp(RegisterID reg
) { ASSERT(!isZr(reg
)); return reg
; }
3256 static int xOrZr(RegisterID reg
) { ASSERT(!isSp(reg
)); return reg
& 31; }
3257 static FPRegisterID
xOrZrAsFPR(RegisterID reg
) { return static_cast<FPRegisterID
>(xOrZr(reg
)); }
3258 static int xOrZrOrSp(bool useZr
, RegisterID reg
) { return useZr
? xOrZr(reg
) : xOrSp(reg
); }
3260 ALWAYS_INLINE
void insn(int instruction
)
3262 m_buffer
.putInt(instruction
);
3265 ALWAYS_INLINE
static int addSubtractExtendedRegister(Datasize sf
, AddOp op
, SetFlags S
, RegisterID rm
, ExtendType option
, int imm3
, RegisterID rn
, RegisterID rd
)
3268 // The only allocated values for opt is 0.
3270 return (0x0b200000 | sf
<< 31 | op
<< 30 | S
<< 29 | opt
<< 22 | xOrZr(rm
) << 16 | option
<< 13 | (imm3
& 0x7) << 10 | xOrSp(rn
) << 5 | xOrZrOrSp(S
, rd
));
3273 ALWAYS_INLINE
static int addSubtractImmediate(Datasize sf
, AddOp op
, SetFlags S
, int shift
, int imm12
, RegisterID rn
, RegisterID rd
)
3276 ASSERT(isUInt12(imm12
));
3277 return (0x11000000 | sf
<< 31 | op
<< 30 | S
<< 29 | shift
<< 22 | (imm12
& 0xfff) << 10 | xOrSp(rn
) << 5 | xOrZrOrSp(S
, rd
));
3280 ALWAYS_INLINE
static int addSubtractShiftedRegister(Datasize sf
, AddOp op
, SetFlags S
, ShiftType shift
, RegisterID rm
, int imm6
, RegisterID rn
, RegisterID rd
)
3283 ASSERT(!(imm6
& (sf
? ~63 : ~31)));
3284 return (0x0b000000 | sf
<< 31 | op
<< 30 | S
<< 29 | shift
<< 22 | xOrZr(rm
) << 16 | (imm6
& 0x3f) << 10 | xOrZr(rn
) << 5 | xOrZr(rd
));
3287 ALWAYS_INLINE
static int addSubtractWithCarry(Datasize sf
, AddOp op
, SetFlags S
, RegisterID rm
, RegisterID rn
, RegisterID rd
)
3289 const int opcode2
= 0;
3290 return (0x1a000000 | sf
<< 31 | op
<< 30 | S
<< 29 | xOrZr(rm
) << 16 | opcode2
<< 10 | xOrZr(rn
) << 5 | xOrZr(rd
));
3293 ALWAYS_INLINE
static int bitfield(Datasize sf
, BitfieldOp opc
, int immr
, int imms
, RegisterID rn
, RegisterID rd
)
3295 ASSERT(immr
< (sf
? 64 : 32));
3296 ASSERT(imms
< (sf
? 64 : 32));
3298 return (0x13000000 | sf
<< 31 | opc
<< 29 | N
<< 22 | immr
<< 16 | imms
<< 10 | xOrZr(rn
) << 5 | xOrZr(rd
));
3301 // 'op' means negate
3302 ALWAYS_INLINE
static int compareAndBranchImmediate(Datasize sf
, bool op
, int32_t imm19
, RegisterID rt
)
3304 ASSERT(imm19
== (imm19
<< 13) >> 13);
3305 return (0x34000000 | sf
<< 31 | op
<< 24 | (imm19
& 0x7ffff) << 5 | xOrZr(rt
));
3308 ALWAYS_INLINE
static int conditionalBranchImmediate(int32_t imm19
, Condition cond
)
3310 ASSERT(imm19
== (imm19
<< 13) >> 13);
3311 ASSERT(!(cond
& ~15));
3312 // The only allocated values for o1 & o0 are 0.
3315 return (0x54000000 | o1
<< 24 | (imm19
& 0x7ffff) << 5 | o0
<< 4 | cond
);
3318 ALWAYS_INLINE
static int conditionalCompareImmediate(Datasize sf
, AddOp op
, int imm5
, Condition cond
, RegisterID rn
, int nzcv
)
3320 ASSERT(!(imm5
& ~0x1f));
3325 return (0x1a400800 | sf
<< 31 | op
<< 30 | S
<< 29 | (imm5
& 0x1f) << 16 | cond
<< 12 | o2
<< 10 | xOrZr(rn
) << 5 | o3
<< 4 | nzcv
);
3328 ALWAYS_INLINE
static int conditionalCompareRegister(Datasize sf
, AddOp op
, RegisterID rm
, Condition cond
, RegisterID rn
, int nzcv
)
3334 return (0x1a400000 | sf
<< 31 | op
<< 30 | S
<< 29 | xOrZr(rm
) << 16 | cond
<< 12 | o2
<< 10 | xOrZr(rn
) << 5 | o3
<< 4 | nzcv
);
3337 // 'op' means negate
3338 // 'op2' means increment
3339 ALWAYS_INLINE
static int conditionalSelect(Datasize sf
, bool op
, RegisterID rm
, Condition cond
, bool op2
, RegisterID rn
, RegisterID rd
)
3342 return (0x1a800000 | sf
<< 31 | op
<< 30 | S
<< 29 | xOrZr(rm
) << 16 | cond
<< 12 | op2
<< 10 | xOrZr(rn
) << 5 | xOrZr(rd
));
3345 ALWAYS_INLINE
static int dataProcessing1Source(Datasize sf
, DataOp1Source opcode
, RegisterID rn
, RegisterID rd
)
3348 const int opcode2
= 0;
3349 return (0x5ac00000 | sf
<< 31 | S
<< 29 | opcode2
<< 16 | opcode
<< 10 | xOrZr(rn
) << 5 | xOrZr(rd
));
3352 ALWAYS_INLINE
static int dataProcessing2Source(Datasize sf
, RegisterID rm
, DataOp2Source opcode
, RegisterID rn
, RegisterID rd
)
3355 return (0x1ac00000 | sf
<< 31 | S
<< 29 | xOrZr(rm
) << 16 | opcode
<< 10 | xOrZr(rn
) << 5 | xOrZr(rd
));
3358 ALWAYS_INLINE
static int dataProcessing3Source(Datasize sf
, DataOp3Source opcode
, RegisterID rm
, RegisterID ra
, RegisterID rn
, RegisterID rd
)
3360 int op54
= opcode
>> 4;
3361 int op31
= (opcode
>> 1) & 7;
3362 int op0
= opcode
& 1;
3363 return (0x1b000000 | sf
<< 31 | op54
<< 29 | op31
<< 21 | xOrZr(rm
) << 16 | op0
<< 15 | xOrZr(ra
) << 10 | xOrZr(rn
) << 5 | xOrZr(rd
));
3366 ALWAYS_INLINE
static int excepnGeneration(ExcepnOp opc
, uint16_t imm16
, int LL
)
3368 ASSERT((opc
== ExcepnOp_BREAKPOINT
|| opc
== ExcepnOp_HALT
) ? !LL
: (LL
&& (LL
< 4)));
3370 return (0xd4000000 | opc
<< 21 | imm16
<< 5 | op2
<< 2 | LL
);
3373 ALWAYS_INLINE
static int extract(Datasize sf
, RegisterID rm
, int imms
, RegisterID rn
, RegisterID rd
)
3375 ASSERT(imms
< (sf
? 64 : 32));
3379 return (0x13800000 | sf
<< 31 | op21
<< 29 | N
<< 22 | o0
<< 21 | xOrZr(rm
) << 16 | imms
<< 10 | xOrZr(rn
) << 5 | xOrZr(rd
));
3382 ALWAYS_INLINE
static int floatingPointCompare(Datasize type
, FPRegisterID rm
, FPRegisterID rn
, FPCmpOp opcode2
)
3387 return (0x1e202000 | M
<< 31 | S
<< 29 | type
<< 22 | rm
<< 16 | op
<< 14 | rn
<< 5 | opcode2
);
3390 ALWAYS_INLINE
static int floatingPointConditionalCompare(Datasize type
, FPRegisterID rm
, Condition cond
, FPRegisterID rn
, FPCondCmpOp op
, int nzcv
)
3395 return (0x1e200400 | M
<< 31 | S
<< 29 | type
<< 22 | rm
<< 16 | cond
<< 12 | rn
<< 5 | op
<< 4 | nzcv
);
3398 ALWAYS_INLINE
static int floatingPointConditionalSelect(Datasize type
, FPRegisterID rm
, Condition cond
, FPRegisterID rn
, FPRegisterID rd
)
3402 return (0x1e200c00 | M
<< 31 | S
<< 29 | type
<< 22 | rm
<< 16 | cond
<< 12 | rn
<< 5 | rd
);
3405 ALWAYS_INLINE
static int floatingPointImmediate(Datasize type
, int imm8
, FPRegisterID rd
)
3410 return (0x1e201000 | M
<< 31 | S
<< 29 | type
<< 22 | (imm8
& 0xff) << 13 | imm5
<< 5 | rd
);
3413 ALWAYS_INLINE
static int floatingPointIntegerConversions(Datasize sf
, Datasize type
, FPIntConvOp rmodeOpcode
, FPRegisterID rn
, FPRegisterID rd
)
3416 return (0x1e200000 | sf
<< 31 | S
<< 29 | type
<< 22 | rmodeOpcode
<< 16 | rn
<< 5 | rd
);
3419 ALWAYS_INLINE
static int floatingPointIntegerConversions(Datasize sf
, Datasize type
, FPIntConvOp rmodeOpcode
, FPRegisterID rn
, RegisterID rd
)
3421 return floatingPointIntegerConversions(sf
, type
, rmodeOpcode
, rn
, xOrZrAsFPR(rd
));
3424 ALWAYS_INLINE
static int floatingPointIntegerConversions(Datasize sf
, Datasize type
, FPIntConvOp rmodeOpcode
, RegisterID rn
, FPRegisterID rd
)
3426 return floatingPointIntegerConversions(sf
, type
, rmodeOpcode
, xOrZrAsFPR(rn
), rd
);
3429 ALWAYS_INLINE
static int floatingPointDataProcessing1Source(Datasize type
, FPDataOp1Source opcode
, FPRegisterID rn
, FPRegisterID rd
)
3433 return (0x1e204000 | M
<< 31 | S
<< 29 | type
<< 22 | opcode
<< 15 | rn
<< 5 | rd
);
3436 ALWAYS_INLINE
static int floatingPointDataProcessing2Source(Datasize type
, FPRegisterID rm
, FPDataOp2Source opcode
, FPRegisterID rn
, FPRegisterID rd
)
3440 return (0x1e200800 | M
<< 31 | S
<< 29 | type
<< 22 | rm
<< 16 | opcode
<< 12 | rn
<< 5 | rd
);
3443 // 'o1' means negate
3444 ALWAYS_INLINE
static int floatingPointDataProcessing3Source(Datasize type
, bool o1
, FPRegisterID rm
, AddOp o2
, FPRegisterID ra
, FPRegisterID rn
, FPRegisterID rd
)
3448 return (0x1f000000 | M
<< 31 | S
<< 29 | type
<< 22 | o1
<< 21 | rm
<< 16 | o2
<< 15 | ra
<< 10 | rn
<< 5 | rd
);
3452 ALWAYS_INLINE
static int loadRegisterLiteral(LdrLiteralOp opc
, bool V
, int imm19
, FPRegisterID rt
)
3454 ASSERT(((imm19
<< 13) >> 13) == imm19
);
3455 return (0x18000000 | opc
<< 30 | V
<< 26 | (imm19
& 0x7ffff) << 5 | rt
);
3458 ALWAYS_INLINE
static int loadRegisterLiteral(LdrLiteralOp opc
, bool V
, int imm19
, RegisterID rt
)
3460 return loadRegisterLiteral(opc
, V
, imm19
, xOrZrAsFPR(rt
));
3464 ALWAYS_INLINE
static int loadStoreRegisterPostIndex(MemOpSize size
, bool V
, MemOp opc
, int imm9
, RegisterID rn
, FPRegisterID rt
)
3466 ASSERT(!(size
&& V
&& (opc
& 2))); // Maximum vector size is 128 bits.
3467 ASSERT(!((size
& 2) && !V
&& (opc
== 3))); // signed 32-bit load must be extending from 8/16 bits.
3468 ASSERT(isInt9(imm9
));
3469 return (0x38000400 | size
<< 30 | V
<< 26 | opc
<< 22 | (imm9
& 0x1ff) << 12 | xOrSp(rn
) << 5 | rt
);
3472 ALWAYS_INLINE
static int loadStoreRegisterPostIndex(MemOpSize size
, bool V
, MemOp opc
, int imm9
, RegisterID rn
, RegisterID rt
)
3474 return loadStoreRegisterPostIndex(size
, V
, opc
, imm9
, rn
, xOrZrAsFPR(rt
));
3478 ALWAYS_INLINE
static int loadStoreRegisterPairPostIndex(MemPairOpSize size
, bool V
, MemOp opc
, int immediate
, RegisterID rn
, FPRegisterID rt
, FPRegisterID rt2
)
3481 ASSERT(opc
== (opc
& 1)); // Only load or store, load signed 64 is handled via size.
3482 ASSERT(V
|| (size
!= MemPairOp_LoadSigned_32
) || (opc
== MemOp_LOAD
)); // There isn't an integer store signed.
3483 unsigned immedShiftAmount
= memPairOffsetShift(V
, size
);
3484 int imm7
= immediate
>> immedShiftAmount
;
3485 ASSERT((imm7
<< immedShiftAmount
) == immediate
&& isInt7(imm7
));
3486 return (0x28800000 | size
<< 30 | V
<< 26 | opc
<< 22 | (imm7
& 0x7f) << 15 | rt2
<< 10 | xOrSp(rn
) << 5 | rt
);
3489 ALWAYS_INLINE
static int loadStoreRegisterPairPostIndex(MemPairOpSize size
, bool V
, MemOp opc
, int immediate
, RegisterID rn
, RegisterID rt
, RegisterID rt2
)
3491 return loadStoreRegisterPairPostIndex(size
, V
, opc
, immediate
, rn
, xOrZrAsFPR(rt
), xOrZrAsFPR(rt2
));
3495 ALWAYS_INLINE
static int loadStoreRegisterPreIndex(MemOpSize size
, bool V
, MemOp opc
, int imm9
, RegisterID rn
, FPRegisterID rt
)
3497 ASSERT(!(size
&& V
&& (opc
& 2))); // Maximum vector size is 128 bits.
3498 ASSERT(!((size
& 2) && !V
&& (opc
== 3))); // signed 32-bit load must be extending from 8/16 bits.
3499 ASSERT(isInt9(imm9
));
3500 return (0x38000c00 | size
<< 30 | V
<< 26 | opc
<< 22 | (imm9
& 0x1ff) << 12 | xOrSp(rn
) << 5 | rt
);
3503 ALWAYS_INLINE
static int loadStoreRegisterPreIndex(MemOpSize size
, bool V
, MemOp opc
, int imm9
, RegisterID rn
, RegisterID rt
)
3505 return loadStoreRegisterPreIndex(size
, V
, opc
, imm9
, rn
, xOrZrAsFPR(rt
));
3509 ALWAYS_INLINE
static int loadStoreRegisterPairPreIndex(MemPairOpSize size
, bool V
, MemOp opc
, int immediate
, RegisterID rn
, FPRegisterID rt
, FPRegisterID rt2
)
3512 ASSERT(opc
== (opc
& 1)); // Only load or store, load signed 64 is handled via size.
3513 ASSERT(V
|| (size
!= MemPairOp_LoadSigned_32
) || (opc
== MemOp_LOAD
)); // There isn't an integer store signed.
3514 unsigned immedShiftAmount
= memPairOffsetShift(V
, size
);
3515 int imm7
= immediate
>> immedShiftAmount
;
3516 ASSERT((imm7
<< immedShiftAmount
) == immediate
&& isInt7(imm7
));
3517 return (0x29800000 | size
<< 30 | V
<< 26 | opc
<< 22 | (imm7
& 0x7f) << 15 | rt2
<< 10 | xOrSp(rn
) << 5 | rt
);
3520 ALWAYS_INLINE
static int loadStoreRegisterPairPreIndex(MemPairOpSize size
, bool V
, MemOp opc
, int immediate
, RegisterID rn
, RegisterID rt
, RegisterID rt2
)
3522 return loadStoreRegisterPairPreIndex(size
, V
, opc
, immediate
, rn
, xOrZrAsFPR(rt
), xOrZrAsFPR(rt2
));
3526 // 'S' means shift rm
3527 ALWAYS_INLINE
static int loadStoreRegisterRegisterOffset(MemOpSize size
, bool V
, MemOp opc
, RegisterID rm
, ExtendType option
, bool S
, RegisterID rn
, FPRegisterID rt
)
3529 ASSERT(!(size
&& V
&& (opc
& 2))); // Maximum vector size is 128 bits.
3530 ASSERT(!((size
& 2) && !V
&& (opc
== 3))); // signed 32-bit load must be extending from 8/16 bits.
3531 ASSERT(option
& 2); // The ExtendType for the address must be 32/64 bit, signed or unsigned - not 8/16bit.
3532 return (0x38200800 | size
<< 30 | V
<< 26 | opc
<< 22 | xOrZr(rm
) << 16 | option
<< 13 | S
<< 12 | xOrSp(rn
) << 5 | rt
);
3535 ALWAYS_INLINE
static int loadStoreRegisterRegisterOffset(MemOpSize size
, bool V
, MemOp opc
, RegisterID rm
, ExtendType option
, bool S
, RegisterID rn
, RegisterID rt
)
3537 return loadStoreRegisterRegisterOffset(size
, V
, opc
, rm
, option
, S
, rn
, xOrZrAsFPR(rt
));
3541 ALWAYS_INLINE
static int loadStoreRegisterUnscaledImmediate(MemOpSize size
, bool V
, MemOp opc
, int imm9
, RegisterID rn
, FPRegisterID rt
)
3543 ASSERT(!(size
&& V
&& (opc
& 2))); // Maximum vector size is 128 bits.
3544 ASSERT(!((size
& 2) && !V
&& (opc
== 3))); // signed 32-bit load must be extending from 8/16 bits.
3545 ASSERT(isInt9(imm9
));
3546 return (0x38000000 | size
<< 30 | V
<< 26 | opc
<< 22 | (imm9
& 0x1ff) << 12 | xOrSp(rn
) << 5 | rt
);
3549 ALWAYS_INLINE
static int loadStoreRegisterUnscaledImmediate(MemOpSize size
, bool V
, MemOp opc
, int imm9
, RegisterID rn
, RegisterID rt
)
3551 ASSERT(isInt9(imm9
));
3552 return loadStoreRegisterUnscaledImmediate(size
, V
, opc
, imm9
, rn
, xOrZrAsFPR(rt
));
3556 ALWAYS_INLINE
static int loadStoreRegisterUnsignedImmediate(MemOpSize size
, bool V
, MemOp opc
, int imm12
, RegisterID rn
, FPRegisterID rt
)
3558 ASSERT(!(size
&& V
&& (opc
& 2))); // Maximum vector size is 128 bits.
3559 ASSERT(!((size
& 2) && !V
&& (opc
== 3))); // signed 32-bit load must be extending from 8/16 bits.
3560 ASSERT(isUInt12(imm12
));
3561 return (0x39000000 | size
<< 30 | V
<< 26 | opc
<< 22 | (imm12
& 0xfff) << 10 | xOrSp(rn
) << 5 | rt
);
3564 ALWAYS_INLINE
static int loadStoreRegisterUnsignedImmediate(MemOpSize size
, bool V
, MemOp opc
, int imm12
, RegisterID rn
, RegisterID rt
)
3566 return loadStoreRegisterUnsignedImmediate(size
, V
, opc
, imm12
, rn
, xOrZrAsFPR(rt
));
3569 ALWAYS_INLINE
static int logicalImmediate(Datasize sf
, LogicalOp opc
, int N_immr_imms
, RegisterID rn
, RegisterID rd
)
3571 ASSERT(!(N_immr_imms
& (sf
? ~0x1fff : ~0xfff)));
3572 return (0x12000000 | sf
<< 31 | opc
<< 29 | N_immr_imms
<< 10 | xOrZr(rn
) << 5 | xOrZrOrSp(opc
== LogicalOp_ANDS
, rd
));
3575 // 'N' means negate rm
3576 ALWAYS_INLINE
static int logicalShiftedRegister(Datasize sf
, LogicalOp opc
, ShiftType shift
, bool N
, RegisterID rm
, int imm6
, RegisterID rn
, RegisterID rd
)
3578 ASSERT(!(imm6
& (sf
? ~63 : ~31)));
3579 return (0x0a000000 | sf
<< 31 | opc
<< 29 | shift
<< 22 | N
<< 21 | xOrZr(rm
) << 16 | (imm6
& 0x3f) << 10 | xOrZr(rn
) << 5 | xOrZr(rd
));
3582 ALWAYS_INLINE
static int moveWideImediate(Datasize sf
, MoveWideOp opc
, int hw
, uint16_t imm16
, RegisterID rd
)
3584 ASSERT(hw
< (sf
? 4 : 2));
3585 return (0x12800000 | sf
<< 31 | opc
<< 29 | hw
<< 21 | (int)imm16
<< 5 | xOrZr(rd
));
3589 ALWAYS_INLINE
static int unconditionalBranchImmediate(bool op
, int32_t imm26
)
3591 ASSERT(imm26
== (imm26
<< 6) >> 6);
3592 return (0x14000000 | op
<< 31 | (imm26
& 0x3ffffff));
3596 ALWAYS_INLINE
static int pcRelative(bool op
, int32_t imm21
, RegisterID rd
)
3598 ASSERT(imm21
== (imm21
<< 11) >> 11);
3599 int32_t immlo
= imm21
& 3;
3600 int32_t immhi
= (imm21
>> 2) & 0x7ffff;
3601 return (0x10000000 | op
<< 31 | immlo
<< 29 | immhi
<< 5 | xOrZr(rd
));
3604 ALWAYS_INLINE
static int system(bool L
, int op0
, int op1
, int crn
, int crm
, int op2
, RegisterID rt
)
3606 return (0xd5000000 | L
<< 21 | op0
<< 19 | op1
<< 16 | crn
<< 12 | crm
<< 8 | op2
<< 5 | xOrZr(rt
));
3609 ALWAYS_INLINE
static int hintPseudo(int imm
)
3611 ASSERT(!(imm
& ~0x7f));
3612 return system(0, 0, 3, 2, (imm
>> 3) & 0xf, imm
& 0x7, ARM64Registers::zr
);
3615 ALWAYS_INLINE
static int nopPseudo()
3617 return hintPseudo(0);
3620 // 'op' means negate
3621 ALWAYS_INLINE
static int testAndBranchImmediate(bool op
, int b50
, int imm14
, RegisterID rt
)
3623 ASSERT(!(b50
& ~0x3f));
3624 ASSERT(imm14
== (imm14
<< 18) >> 18);
3626 int b40
= b50
& 0x1f;
3627 return (0x36000000 | b5
<< 31 | op
<< 24 | b40
<< 19 | (imm14
& 0x3fff) << 5 | xOrZr(rt
));
3630 ALWAYS_INLINE
static int unconditionalBranchRegister(BranchType opc
, RegisterID rn
)
3632 // The only allocated values for op2 is 0x1f, for op3 & op4 are 0.
3633 const int op2
= 0x1f;
3636 return (0xd6000000 | opc
<< 21 | op2
<< 16 | op3
<< 10 | xOrZr(rn
) << 5 | op4
);
3639 // Workaround for Cortex-A53 erratum (835769). Emit an extra nop if the
3640 // last instruction in the buffer is a load, store or prefetch. Needed
3641 // before 64-bit multiply-accumulate instructions.
3642 template<int datasize
>
3643 ALWAYS_INLINE
void nopCortexA53Fix835769()
3645 #if CPU(ARM64_CORTEXA53)
3647 if (datasize
== 64) {
3648 if (LIKELY(m_buffer
.codeSize() >= sizeof(int32_t))) {
3649 // From ARMv8 Reference Manual, Section C4.1: the encoding of the
3650 // instructions in the Loads and stores instruction group is:
3651 // ---- 1-0- ---- ---- ---- ---- ---- ----
3652 if (UNLIKELY((*reinterpret_cast_ptr
<int32_t*>(reinterpret_cast_ptr
<char*>(m_buffer
.data()) + m_buffer
.codeSize() - sizeof(int32_t)) & 0x0a000000) == 0x08000000))
3659 // Workaround for Cortex-A53 erratum (843419). Emit extra nops to avoid
3660 // wrong address access after ADRP instruction.
3661 ALWAYS_INLINE
void nopCortexA53Fix843419()
3663 #if CPU(ARM64_CORTEXA53)
3670 AssemblerBuffer m_buffer
;
3671 Vector
<LinkRecord
, 0, UnsafeVectorOverflow
> m_jumpsToLink
;
3672 int m_indexOfLastWatchpoint
;
3673 int m_indexOfTailOfLastWatchpoint
;
3678 #undef CHECK_DATASIZE_OF
3681 #undef CHECK_DATASIZE
3684 #undef CHECK_FP_MEMOP_DATASIZE
3686 #endif // ENABLE(ASSEMBLER) && CPU(ARM64)
3688 #endif // ARM64Assembler_h