2 * Copyright (C) 2012 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 #ifndef ARMAssembler_h
27 #define ARMAssembler_h
29 #if ENABLE(ASSEMBLER) && CPU(ARM64)
31 #include "AssemblerBuffer.h"
32 #include <wtf/Assertions.h>
33 #include <wtf/Vector.h>
36 #define CHECK_DATASIZE_OF(datasize) ASSERT(datasize == 32 || datasize == 64)
37 #define DATASIZE_OF(datasize) ((datasize == 64) ? Datasize_64 : Datasize_32)
38 #define MEMOPSIZE_OF(datasize) ((datasize == 8 || datasize == 128) ? MemOpSize_8_or_128 : (datasize == 16) ? MemOpSize_16 : (datasize == 32) ? MemOpSize_32 : MemOpSize_64)
39 #define CHECK_DATASIZE() CHECK_DATASIZE_OF(datasize)
40 #define DATASIZE DATASIZE_OF(datasize)
41 #define MEMOPSIZE MEMOPSIZE_OF(datasize)
42 #define CHECK_FP_MEMOP_DATASIZE() ASSERT(datasize == 8 || datasize == 16 || datasize == 32 || datasize == 64 || datasize == 128)
46 ALWAYS_INLINE
bool isInt9(int32_t value
)
48 return value
== ((value
<< 23) >> 23);
51 ALWAYS_INLINE
bool isUInt5(int32_t value
)
53 return !(value
& ~0x1f);
56 ALWAYS_INLINE
bool isUInt12(int32_t value
)
58 return !(value
& ~0xfff);
61 ALWAYS_INLINE
bool isUInt12(intptr_t value
)
63 return !(value
& ~0xfffL
);
68 explicit UInt5(int value
)
71 ASSERT(isUInt5(value
));
74 operator int() { return m_value
; }
82 explicit UInt12(int value
)
85 ASSERT(isUInt12(value
));
88 operator int() { return m_value
; }
96 explicit PostIndex(int value
)
99 ASSERT(isInt9(value
));
102 operator int() { return m_value
; }
110 explicit PreIndex(int value
)
113 ASSERT(isInt9(value
));
116 operator int() { return m_value
; }
122 class LogicalImmediate
{
124 static LogicalImmediate
create32(uint32_t value
)
126 // Check for 0, -1 - these cannot be encoded.
127 if (!value
|| !~value
)
128 return InvalidLogicalImmediate
;
130 // First look for a 32-bit pattern, then for repeating 16-bit
131 // patterns, 8-bit, 4-bit, and finally 2-bit.
135 if (findBitRange
<32>(value
, hsb
, lsb
, inverted
))
136 return encodeLogicalImmediate
<32>(hsb
, lsb
, inverted
);
138 if ((value
& 0xffff) != (value
>> 16))
139 return InvalidLogicalImmediate
;
142 if (findBitRange
<16>(value
, hsb
, lsb
, inverted
))
143 return encodeLogicalImmediate
<16>(hsb
, lsb
, inverted
);
145 if ((value
& 0xff) != (value
>> 8))
146 return InvalidLogicalImmediate
;
149 if (findBitRange
<8>(value
, hsb
, lsb
, inverted
))
150 return encodeLogicalImmediate
<8>(hsb
, lsb
, inverted
);
152 if ((value
& 0xf) != (value
>> 4))
153 return InvalidLogicalImmediate
;
156 if (findBitRange
<4>(value
, hsb
, lsb
, inverted
))
157 return encodeLogicalImmediate
<4>(hsb
, lsb
, inverted
);
159 if ((value
& 0x3) != (value
>> 2))
160 return InvalidLogicalImmediate
;
163 if (findBitRange
<2>(value
, hsb
, lsb
, inverted
))
164 return encodeLogicalImmediate
<2>(hsb
, lsb
, inverted
);
166 return InvalidLogicalImmediate
;
169 static LogicalImmediate
create64(uint64_t value
)
171 // Check for 0, -1 - these cannot be encoded.
172 if (!value
|| !~value
)
173 return InvalidLogicalImmediate
;
175 // Look for a contiguous bit range.
178 if (findBitRange
<64>(value
, hsb
, lsb
, inverted
))
179 return encodeLogicalImmediate
<64>(hsb
, lsb
, inverted
);
181 // If the high & low 32 bits are equal, we can try for a 32-bit (or narrower) pattern.
182 if (static_cast<uint32_t>(value
) == static_cast<uint32_t>(value
>> 32))
183 return create32(static_cast<uint32_t>(value
));
184 return InvalidLogicalImmediate
;
195 return m_value
!= InvalidLogicalImmediate
;
200 return m_value
& (1 << 12);
204 LogicalImmediate(int value
)
209 // Generate a mask with bits in the range hsb..0 set, for example:
210 // hsb:63 = 0xffffffffffffffff
211 // hsb:42 = 0x000007ffffffffff
212 // hsb: 0 = 0x0000000000000001
213 static uint64_t mask(unsigned hsb
)
216 return 0xffffffffffffffffull
>> (63 - hsb
);
220 static void partialHSB(uint64_t& value
, unsigned&result
)
222 if (value
& (0xffffffffffffffffull
<< N
)) {
228 // Find the bit number of the highest bit set in a non-zero value, for example:
229 // 0x8080808080808080 = hsb:63
230 // 0x0000000000000001 = hsb: 0
231 // 0x000007ffffe00000 = hsb:42
232 static unsigned highestSetBit(uint64_t value
)
236 partialHSB
<32>(value
, hsb
);
237 partialHSB
<16>(value
, hsb
);
238 partialHSB
<8>(value
, hsb
);
239 partialHSB
<4>(value
, hsb
);
240 partialHSB
<2>(value
, hsb
);
241 partialHSB
<1>(value
, hsb
);
245 // This function takes a value and a bit width, where value obeys the following constraints:
246 // * bits outside of the width of the value must be zero.
247 // * bits within the width of value must neither be all clear or all set.
248 // The input is inspected to detect values that consist of either two or three contiguous
249 // ranges of bits. The output range hsb..lsb will describe the second range of the value.
250 // if the range is set, inverted will be false, and if the range is clear, inverted will
251 // be true. For example (with width 8):
252 // 00001111 = hsb:3, lsb:0, inverted:false
253 // 11110000 = hsb:3, lsb:0, inverted:true
254 // 00111100 = hsb:5, lsb:2, inverted:false
255 // 11000011 = hsb:5, lsb:2, inverted:true
256 template<unsigned width
>
257 static bool findBitRange(uint64_t value
, unsigned& hsb
, unsigned& lsb
, bool& inverted
)
259 ASSERT(value
& mask(width
- 1));
260 ASSERT(value
!= mask(width
- 1));
261 ASSERT(!(value
& ~mask(width
- 1)));
263 // Detect cases where the top bit is set; if so, flip all the bits & set invert.
264 // This halves the number of patterns we need to look for.
265 const uint64_t msb
= 1ull << (width
- 1);
266 if ((inverted
= (value
& msb
)))
267 value
^= mask(width
- 1);
269 // Find the highest set bit in value, generate a corresponding mask & flip all
271 hsb
= highestSetBit(value
);
274 // If this cleared the value, then the range hsb..0 was all set.
279 // Try making one more mask, and flipping the bits!
280 lsb
= highestSetBit(value
);
283 // Success - but lsb actually points to the hsb of a third range - add one
284 // to get to the lsb of the mid range.
292 // Encodes the set of immN:immr:imms fields found in a logical immediate.
293 template<unsigned width
>
294 static int encodeLogicalImmediate(unsigned hsb
, unsigned lsb
, bool inverted
)
296 // Check width is a power of 2!
297 ASSERT(!(width
& (width
-1)));
298 ASSERT(width
<= 64 && width
>= 2);
306 // For 64-bit values this is easy - just set immN to true, and imms just
307 // contains the bit number of the highest set bit of the set range. For
308 // values with narrower widths, these are encoded by a leading set of
309 // one bits, followed by a zero bit, followed by the remaining set of bits
310 // being the high bit of the range. For a 32-bit immediate there are no
311 // leading one bits, just a zero followed by a five bit number. For a
312 // 16-bit immediate there is one one bit, a zero bit, and then a four bit
313 // bit-position, etc.
317 imms
= 63 & ~(width
+ width
- 1);
320 // if width is 64 & hsb is 62, then we have a value something like:
321 // 0x80000000ffffffff (in this case with lsb 32).
322 // The ror should be by 1, imms (effectively set width minus 1) is
323 // 32. Set width is full width minus cleared width.
324 immr
= (width
- 1) - hsb
;
325 imms
|= (width
- ((hsb
- lsb
) + 1)) - 1;
327 // if width is 64 & hsb is 62, then we have a value something like:
328 // 0x7fffffff00000000 (in this case with lsb 32).
329 // The value is effectively rol'ed by lsb, which is equivalent to
330 // a ror by width - lsb (or 0, in the case where lsb is 0). imms
332 immr
= (width
- lsb
) & (width
- 1);
336 return immN
<< 12 | immr
<< 6 | imms
;
339 static const int InvalidLogicalImmediate
= -1;
344 inline uint16_t getHalfword(uint64_t value
, int which
)
346 return value
>> (which
<< 4);
349 namespace ARM64Registers
{
351 // Parameter/result registers
360 // Indirect result location register
362 // Temporary registers
370 // Intra-procedure-call scratch registers (temporary)
373 // Platform Register (temporary)
394 // Parameter/result registers
403 // Callee-saved (up to 64-bits only!)
412 // Temporary registers
431 static bool isSp(RegisterID reg
) { return reg
== sp
; }
432 static bool isZr(RegisterID reg
) { return reg
== zr
; }
435 class ARM64Assembler
{
437 typedef ARM64Registers::RegisterID RegisterID
;
438 typedef ARM64Registers::FPRegisterID FPRegisterID
;
441 static bool isSp(RegisterID reg
) { return ARM64Registers::isSp(reg
); }
442 static bool isZr(RegisterID reg
) { return ARM64Registers::isZr(reg
); }
446 : m_indexOfLastWatchpoint(INT_MIN
)
447 , m_indexOfTailOfLastWatchpoint(INT_MIN
)
451 // (HS, LO, HI, LS) -> (AE, B, A, BE)
452 // (VS, VC) -> (O, NO)
456 ConditionHS
, ConditionCS
= ConditionHS
,
457 ConditionLO
, ConditionCC
= ConditionLO
,
472 static Condition
invert(Condition cond
)
474 return static_cast<Condition
>(cond
^ 1);
500 #define JUMP_ENUM_WITH_SIZE(index, value) (((value) << 4) | (index))
501 #define JUMP_ENUM_SIZE(jump) ((jump) >> 4)
502 enum JumpType
{ JumpFixed
= JUMP_ENUM_WITH_SIZE(0, 0),
503 JumpNoCondition
= JUMP_ENUM_WITH_SIZE(1, 1 * sizeof(uint32_t)),
504 JumpCondition
= JUMP_ENUM_WITH_SIZE(2, 2 * sizeof(uint32_t)),
505 JumpCompareAndBranch
= JUMP_ENUM_WITH_SIZE(3, 2 * sizeof(uint32_t)),
506 JumpTestBit
= JUMP_ENUM_WITH_SIZE(4, 2 * sizeof(uint32_t)),
507 JumpNoConditionFixedSize
= JUMP_ENUM_WITH_SIZE(5, 1 * sizeof(uint32_t)),
508 JumpConditionFixedSize
= JUMP_ENUM_WITH_SIZE(6, 2 * sizeof(uint32_t)),
509 JumpCompareAndBranchFixedSize
= JUMP_ENUM_WITH_SIZE(7, 2 * sizeof(uint32_t)),
510 JumpTestBitFixedSize
= JUMP_ENUM_WITH_SIZE(8, 2 * sizeof(uint32_t)),
513 LinkInvalid
= JUMP_ENUM_WITH_SIZE(0, 0),
514 LinkJumpNoCondition
= JUMP_ENUM_WITH_SIZE(1, 1 * sizeof(uint32_t)),
515 LinkJumpConditionDirect
= JUMP_ENUM_WITH_SIZE(2, 1 * sizeof(uint32_t)),
516 LinkJumpCondition
= JUMP_ENUM_WITH_SIZE(3, 2 * sizeof(uint32_t)),
517 LinkJumpCompareAndBranch
= JUMP_ENUM_WITH_SIZE(4, 2 * sizeof(uint32_t)),
518 LinkJumpCompareAndBranchDirect
= JUMP_ENUM_WITH_SIZE(5, 1 * sizeof(uint32_t)),
519 LinkJumpTestBit
= JUMP_ENUM_WITH_SIZE(6, 2 * sizeof(uint32_t)),
520 LinkJumpTestBitDirect
= JUMP_ENUM_WITH_SIZE(7, 1 * sizeof(uint32_t)),
525 LinkRecord(intptr_t from
, intptr_t to
, JumpType type
, Condition condition
)
527 data
.realTypes
.m_from
= from
;
528 data
.realTypes
.m_to
= to
;
529 data
.realTypes
.m_type
= type
;
530 data
.realTypes
.m_linkType
= LinkInvalid
;
531 data
.realTypes
.m_condition
= condition
;
533 LinkRecord(intptr_t from
, intptr_t to
, JumpType type
, Condition condition
, bool is64Bit
, RegisterID compareRegister
)
535 data
.realTypes
.m_from
= from
;
536 data
.realTypes
.m_to
= to
;
537 data
.realTypes
.m_type
= type
;
538 data
.realTypes
.m_linkType
= LinkInvalid
;
539 data
.realTypes
.m_condition
= condition
;
540 data
.realTypes
.m_is64Bit
= is64Bit
;
541 data
.realTypes
.m_compareRegister
= compareRegister
;
543 LinkRecord(intptr_t from
, intptr_t to
, JumpType type
, Condition condition
, unsigned bitNumber
, RegisterID compareRegister
)
545 data
.realTypes
.m_from
= from
;
546 data
.realTypes
.m_to
= to
;
547 data
.realTypes
.m_type
= type
;
548 data
.realTypes
.m_linkType
= LinkInvalid
;
549 data
.realTypes
.m_condition
= condition
;
550 data
.realTypes
.m_bitNumber
= bitNumber
;
551 data
.realTypes
.m_compareRegister
= compareRegister
;
553 void operator=(const LinkRecord
& other
)
555 data
.copyTypes
.content
[0] = other
.data
.copyTypes
.content
[0];
556 data
.copyTypes
.content
[1] = other
.data
.copyTypes
.content
[1];
557 data
.copyTypes
.content
[2] = other
.data
.copyTypes
.content
[2];
559 intptr_t from() const { return data
.realTypes
.m_from
; }
560 void setFrom(intptr_t from
) { data
.realTypes
.m_from
= from
; }
561 intptr_t to() const { return data
.realTypes
.m_to
; }
562 JumpType
type() const { return data
.realTypes
.m_type
; }
563 JumpLinkType
linkType() const { return data
.realTypes
.m_linkType
; }
564 void setLinkType(JumpLinkType linkType
) { ASSERT(data
.realTypes
.m_linkType
== LinkInvalid
); data
.realTypes
.m_linkType
= linkType
; }
565 Condition
condition() const { return data
.realTypes
.m_condition
; }
566 bool is64Bit() const { return data
.realTypes
.m_is64Bit
; }
567 unsigned bitNumber() const { return data
.realTypes
.m_bitNumber
; }
568 RegisterID
compareRegister() const { return data
.realTypes
.m_compareRegister
; }
573 intptr_t m_from
: 48;
576 JumpLinkType m_linkType
: 8;
577 Condition m_condition
: 4;
579 unsigned m_bitNumber
: 6;
580 RegisterID m_compareRegister
: 5;
585 COMPILE_ASSERT(sizeof(RealTypes
) == sizeof(CopyTypes
), LinkRecordCopyStructSizeEqualsRealStruct
);
589 // bits(N) VFPExpandImm(bits(8) imm8);
591 // Encoding of floating point immediates is a litte complicated. Here's a
592 // high level description:
593 // +/-m*2-n where m and n are integers, 16 <= m <= 31, 0 <= n <= 7
594 // and the algirithm for expanding to a single precision float:
595 // return imm8<7>:NOT(imm8<6>):Replicate(imm8<6>,5):imm8<5:0>:Zeros(19);
597 // The trickiest bit is how the exponent is handled. The following table
598 // may help clarify things a little:
600 // 100 01111100 124 -3 1020 01111111100
601 // 101 01111101 125 -2 1021 01111111101
602 // 110 01111110 126 -1 1022 01111111110
603 // 111 01111111 127 0 1023 01111111111
604 // 000 10000000 128 1 1024 10000000000
605 // 001 10000001 129 2 1025 10000000001
606 // 010 10000010 130 3 1026 10000000010
607 // 011 10000011 131 4 1027 10000000011
608 // The first column shows the bit pattern stored in bits 6-4 of the arm
609 // encoded immediate. The second column shows the 8-bit IEEE 754 single
610 // -precision exponent in binary, the third column shows the raw decimal
611 // value. IEEE 754 single-precision numbers are stored with a bias of 127
612 // to the exponent, so the fourth column shows the resulting exponent.
613 // From this was can see that the exponent can be in the range -3..4,
614 // which agrees with the high level description given above. The fifth
615 // and sixth columns shows the value stored in a IEEE 754 double-precision
616 // number to represent these exponents in decimal and binary, given the
619 // Ultimately, detecting doubles that can be encoded as immediates on arm
620 // and encoding doubles is actually not too bad. A floating point value can
621 // be encoded by retaining the sign bit, the low three bits of the exponent
622 // and the high 4 bits of the mantissa. To validly be able to encode an
623 // immediate the remainder of the mantissa must be zero, and the high part
624 // of the exponent must match the top bit retained, bar the highest bit
625 // which must be its inverse.
626 static bool canEncodeFPImm(double d
)
628 // Discard the sign bit, the low two bits of the exponent & the highest
629 // four bits of the mantissa.
630 uint64_t masked
= bitwise_cast
<uint64_t>(d
) & 0x7fc0ffffffffffffull
;
631 return (masked
== 0x3fc0000000000000ull
) || (masked
== 0x4000000000000000ull
);
634 template<int datasize
>
635 static bool canEncodePImmOffset(int32_t offset
)
637 int32_t maxPImm
= 4095 * (datasize
/ 8);
640 if (offset
> maxPImm
)
642 if (offset
& ((datasize
/ 8 ) - 1))
647 static bool canEncodeSImmOffset(int32_t offset
)
649 return isInt9(offset
);
653 int encodeFPImm(double d
)
655 ASSERT(canEncodeFPImm(d
));
656 uint64_t u64
= bitwise_cast
<uint64_t>(d
);
657 return (static_cast<int>(u64
>> 56) & 0x80) | (static_cast<int>(u64
>> 48) & 0x7f);
660 template<int datasize
>
661 int encodeShiftAmount(int amount
)
663 ASSERT(!amount
|| datasize
== (8 << amount
));
667 template<int datasize
>
668 static int encodePositiveImmediate(unsigned pimm
)
670 ASSERT(!(pimm
& ((datasize
/ 8) - 1)));
671 return pimm
/ (datasize
/ 8);
735 ExcepnOp_EXCEPTION
= 0,
736 ExcepnOp_BREAKPOINT
= 1,
743 FPCmpOp_FCMP0
= 0x08,
744 FPCmpOp_FCMPE
= 0x10,
745 FPCmpOp_FCMPE0
= 0x18
753 enum FPDataOp1Source
{
758 FPDataOp_FCVT_toSingle
= 4,
759 FPDataOp_FCVT_toDouble
= 5,
760 FPDataOp_FCVT_toHalf
= 7,
763 FPDataOp_FRINTM
= 10,
764 FPDataOp_FRINTZ
= 11,
765 FPDataOp_FRINTA
= 12,
766 FPDataOp_FRINTX
= 14,
770 enum FPDataOp2Source
{
783 FPIntConvOp_FCVTNS
= 0x00,
784 FPIntConvOp_FCVTNU
= 0x01,
785 FPIntConvOp_SCVTF
= 0x02,
786 FPIntConvOp_UCVTF
= 0x03,
787 FPIntConvOp_FCVTAS
= 0x04,
788 FPIntConvOp_FCVTAU
= 0x05,
789 FPIntConvOp_FMOV_QtoX
= 0x06,
790 FPIntConvOp_FMOV_XtoQ
= 0x07,
791 FPIntConvOp_FCVTPS
= 0x08,
792 FPIntConvOp_FCVTPU
= 0x09,
793 FPIntConvOp_FMOV_QtoX_top
= 0x0e,
794 FPIntConvOp_FMOV_XtoQ_top
= 0x0f,
795 FPIntConvOp_FCVTMS
= 0x10,
796 FPIntConvOp_FCVTMU
= 0x11,
797 FPIntConvOp_FCVTZS
= 0x18,
798 FPIntConvOp_FCVTZU
= 0x19,
813 MemOp_PREFETCH
= 2, // size must be 3
814 MemOp_LOAD_signed64
= 2, // size may be 0, 1 or 2
815 MemOp_LOAD_signed32
= 3 // size may be 0 or 1
825 LdrLiteralOp_32BIT
= 0,
826 LdrLiteralOp_64BIT
= 1,
827 LdrLiteralOp_LDRSW
= 2,
828 LdrLiteralOp_128BIT
= 2
832 // Integer Instructions:
834 template<int datasize
, SetFlags setFlags
= DontSetFlags
>
835 ALWAYS_INLINE
void adc(RegisterID rd
, RegisterID rn
, RegisterID rm
)
838 insn(addSubtractWithCarry(DATASIZE
, AddOp_ADD
, setFlags
, rm
, rn
, rd
));
841 template<int datasize
, SetFlags setFlags
= DontSetFlags
>
842 ALWAYS_INLINE
void add(RegisterID rd
, RegisterID rn
, UInt12 imm12
, int shift
= 0)
845 ASSERT(!shift
|| shift
== 12);
846 insn(addSubtractImmediate(DATASIZE
, AddOp_ADD
, setFlags
, shift
== 12, imm12
, rn
, rd
));
849 template<int datasize
, SetFlags setFlags
= DontSetFlags
>
850 ALWAYS_INLINE
void add(RegisterID rd
, RegisterID rn
, RegisterID rm
)
852 add
<datasize
, setFlags
>(rd
, rn
, rm
, LSL
, 0);
855 template<int datasize
, SetFlags setFlags
= DontSetFlags
>
856 ALWAYS_INLINE
void add(RegisterID rd
, RegisterID rn
, RegisterID rm
, ExtendType extend
, int amount
)
859 insn(addSubtractExtendedRegister(DATASIZE
, AddOp_ADD
, setFlags
, rm
, extend
, amount
, rn
, rd
));
862 template<int datasize
, SetFlags setFlags
= DontSetFlags
>
863 ALWAYS_INLINE
void add(RegisterID rd
, RegisterID rn
, RegisterID rm
, ShiftType shift
, int amount
)
867 ASSERT(shift
== LSL
);
868 add
<datasize
, setFlags
>(rd
, rn
, rm
, UXTX
, amount
);
870 insn(addSubtractShiftedRegister(DATASIZE
, AddOp_ADD
, setFlags
, shift
, rm
, amount
, rn
, rd
));
873 ALWAYS_INLINE
void adr(RegisterID rd
, int offset
)
875 insn(pcRelative(false, offset
, rd
));
878 ALWAYS_INLINE
void adrp(RegisterID rd
, int offset
)
880 ASSERT(!(offset
& 0xfff));
881 insn(pcRelative(true, offset
>> 12, rd
));
884 template<int datasize
, SetFlags setFlags
= DontSetFlags
>
885 ALWAYS_INLINE
void and_(RegisterID rd
, RegisterID rn
, RegisterID rm
)
887 and_
<datasize
, setFlags
>(rd
, rn
, rm
, LSL
, 0);
890 template<int datasize
, SetFlags setFlags
= DontSetFlags
>
891 ALWAYS_INLINE
void and_(RegisterID rd
, RegisterID rn
, RegisterID rm
, ShiftType shift
, int amount
)
894 insn(logicalShiftedRegister(DATASIZE
, setFlags
? LogicalOp_ANDS
: LogicalOp_AND
, shift
, false, rm
, amount
, rn
, rd
));
897 template<int datasize
, SetFlags setFlags
= DontSetFlags
>
898 ALWAYS_INLINE
void and_(RegisterID rd
, RegisterID rn
, LogicalImmediate imm
)
901 insn(logicalImmediate(DATASIZE
, setFlags
? LogicalOp_ANDS
: LogicalOp_AND
, imm
.value(), rn
, rd
));
904 template<int datasize
>
905 ALWAYS_INLINE
void asr(RegisterID rd
, RegisterID rn
, int shift
)
907 ASSERT(shift
< datasize
);
908 sbfm
<datasize
>(rd
, rn
, shift
, datasize
- 1);
911 template<int datasize
>
912 ALWAYS_INLINE
void asr(RegisterID rd
, RegisterID rn
, RegisterID rm
)
914 asrv
<datasize
>(rd
, rn
, rm
);
917 template<int datasize
>
918 ALWAYS_INLINE
void asrv(RegisterID rd
, RegisterID rn
, RegisterID rm
)
921 insn(dataProcessing2Source(DATASIZE
, rm
, DataOp_ASRV
, rn
, rd
));
924 ALWAYS_INLINE
void b(int32_t offset
= 0)
926 ASSERT(!(offset
& 3));
928 ASSERT(offset
== (offset
<< 6) >> 6);
929 insn(unconditionalBranchImmediate(false, offset
));
932 ALWAYS_INLINE
void b_cond(Condition cond
, int32_t offset
= 0)
934 ASSERT(!(offset
& 3));
936 ASSERT(offset
== (offset
<< 13) >> 13);
937 insn(conditionalBranchImmediate(offset
, cond
));
940 template<int datasize
>
941 ALWAYS_INLINE
void bfi(RegisterID rd
, RegisterID rn
, int lsb
, int width
)
943 bfm
<datasize
>(rd
, rn
, (datasize
- lsb
) & (datasize
- 1), width
- 1);
946 template<int datasize
>
947 ALWAYS_INLINE
void bfm(RegisterID rd
, RegisterID rn
, int immr
, int imms
)
950 insn(bitfield(DATASIZE
, BitfieldOp_BFM
, immr
, imms
, rn
, rd
));
953 template<int datasize
>
954 ALWAYS_INLINE
void bfxil(RegisterID rd
, RegisterID rn
, int lsb
, int width
)
956 bfm
<datasize
>(rd
, rn
, lsb
, lsb
+ width
- 1);
959 template<int datasize
, SetFlags setFlags
= DontSetFlags
>
960 ALWAYS_INLINE
void bic(RegisterID rd
, RegisterID rn
, RegisterID rm
)
962 bic
<datasize
, setFlags
>(rd
, rn
, rm
, LSL
, 0);
965 template<int datasize
, SetFlags setFlags
= DontSetFlags
>
966 ALWAYS_INLINE
void bic(RegisterID rd
, RegisterID rn
, RegisterID rm
, ShiftType shift
, int amount
)
969 insn(logicalShiftedRegister(DATASIZE
, setFlags
? LogicalOp_ANDS
: LogicalOp_AND
, shift
, true, rm
, amount
, rn
, rd
));
972 ALWAYS_INLINE
void bl(int32_t offset
= 0)
974 ASSERT(!(offset
& 3));
976 insn(unconditionalBranchImmediate(true, offset
));
979 ALWAYS_INLINE
void blr(RegisterID rn
)
981 insn(unconditionalBranchRegister(BranchType_CALL
, rn
));
984 ALWAYS_INLINE
void br(RegisterID rn
)
986 insn(unconditionalBranchRegister(BranchType_JMP
, rn
));
989 ALWAYS_INLINE
void brk(uint16_t imm
)
991 insn(excepnGeneration(ExcepnOp_BREAKPOINT
, imm
, 0));
994 template<int datasize
>
995 ALWAYS_INLINE
void cbnz(RegisterID rt
, int32_t offset
= 0)
998 ASSERT(!(offset
& 3));
1000 insn(compareAndBranchImmediate(DATASIZE
, true, offset
, rt
));
1003 template<int datasize
>
1004 ALWAYS_INLINE
void cbz(RegisterID rt
, int32_t offset
= 0)
1007 ASSERT(!(offset
& 3));
1009 insn(compareAndBranchImmediate(DATASIZE
, false, offset
, rt
));
1012 template<int datasize
>
1013 ALWAYS_INLINE
void ccmn(RegisterID rn
, RegisterID rm
, int nzcv
, Condition cond
)
1016 insn(conditionalCompareRegister(DATASIZE
, AddOp_ADD
, rm
, cond
, rn
, nzcv
));
1019 template<int datasize
>
1020 ALWAYS_INLINE
void ccmn(RegisterID rn
, UInt5 imm
, int nzcv
, Condition cond
)
1023 insn(conditionalCompareImmediate(DATASIZE
, AddOp_ADD
, imm
, cond
, rn
, nzcv
));
1026 template<int datasize
>
1027 ALWAYS_INLINE
void ccmp(RegisterID rn
, RegisterID rm
, int nzcv
, Condition cond
)
1030 insn(conditionalCompareRegister(DATASIZE
, AddOp_SUB
, rm
, cond
, rn
, nzcv
));
1033 template<int datasize
>
1034 ALWAYS_INLINE
void ccmp(RegisterID rn
, UInt5 imm
, int nzcv
, Condition cond
)
1037 insn(conditionalCompareImmediate(DATASIZE
, AddOp_SUB
, imm
, cond
, rn
, nzcv
));
1040 template<int datasize
>
1041 ALWAYS_INLINE
void cinc(RegisterID rd
, RegisterID rn
, Condition cond
)
1043 csinc
<datasize
>(rd
, rn
, rn
, invert(cond
));
1046 template<int datasize
>
1047 ALWAYS_INLINE
void cinv(RegisterID rd
, RegisterID rn
, Condition cond
)
1049 csinv
<datasize
>(rd
, rn
, rn
, invert(cond
));
1052 template<int datasize
>
1053 ALWAYS_INLINE
void cls(RegisterID rd
, RegisterID rn
)
1056 insn(dataProcessing1Source(DATASIZE
, DataOp_CLS
, rn
, rd
));
1059 template<int datasize
>
1060 ALWAYS_INLINE
void clz(RegisterID rd
, RegisterID rn
)
1063 insn(dataProcessing1Source(DATASIZE
, DataOp_CLZ
, rn
, rd
));
1066 template<int datasize
>
1067 ALWAYS_INLINE
void cmn(RegisterID rn
, UInt12 imm12
, int shift
= 0)
1069 add
<datasize
, S
>(ARM64Registers::zr
, rn
, imm12
, shift
);
1072 template<int datasize
>
1073 ALWAYS_INLINE
void cmn(RegisterID rn
, RegisterID rm
)
1075 add
<datasize
, S
>(ARM64Registers::zr
, rn
, rm
);
1078 template<int datasize
>
1079 ALWAYS_INLINE
void cmn(RegisterID rn
, RegisterID rm
, ExtendType extend
, int amount
)
1081 add
<datasize
, S
>(ARM64Registers::zr
, rn
, rm
, extend
, amount
);
1084 template<int datasize
>
1085 ALWAYS_INLINE
void cmn(RegisterID rn
, RegisterID rm
, ShiftType shift
, int amount
)
1087 add
<datasize
, S
>(ARM64Registers::zr
, rn
, rm
, shift
, amount
);
1090 template<int datasize
>
1091 ALWAYS_INLINE
void cmp(RegisterID rn
, UInt12 imm12
, int shift
= 0)
1093 sub
<datasize
, S
>(ARM64Registers::zr
, rn
, imm12
, shift
);
1096 template<int datasize
>
1097 ALWAYS_INLINE
void cmp(RegisterID rn
, RegisterID rm
)
1099 sub
<datasize
, S
>(ARM64Registers::zr
, rn
, rm
);
1102 template<int datasize
>
1103 ALWAYS_INLINE
void cmp(RegisterID rn
, RegisterID rm
, ExtendType extend
, int amount
)
1105 sub
<datasize
, S
>(ARM64Registers::zr
, rn
, rm
, extend
, amount
);
1108 template<int datasize
>
1109 ALWAYS_INLINE
void cmp(RegisterID rn
, RegisterID rm
, ShiftType shift
, int amount
)
1111 sub
<datasize
, S
>(ARM64Registers::zr
, rn
, rm
, shift
, amount
);
1114 template<int datasize
>
1115 ALWAYS_INLINE
void cneg(RegisterID rd
, RegisterID rn
, Condition cond
)
1117 csneg
<datasize
>(rd
, rn
, rn
, invert(cond
));
1120 template<int datasize
>
1121 ALWAYS_INLINE
void csel(RegisterID rd
, RegisterID rn
, RegisterID rm
, Condition cond
)
1124 insn(conditionalSelect(DATASIZE
, false, rm
, cond
, false, rn
, rd
));
1127 template<int datasize
>
1128 ALWAYS_INLINE
void cset(RegisterID rd
, Condition cond
)
1130 csinc
<datasize
>(rd
, ARM64Registers::zr
, ARM64Registers::zr
, invert(cond
));
1133 template<int datasize
>
1134 ALWAYS_INLINE
void csetm(RegisterID rd
, Condition cond
)
1136 csinv
<datasize
>(rd
, ARM64Registers::zr
, ARM64Registers::zr
, invert(cond
));
1139 template<int datasize
>
1140 ALWAYS_INLINE
void csinc(RegisterID rd
, RegisterID rn
, RegisterID rm
, Condition cond
)
1143 insn(conditionalSelect(DATASIZE
, false, rm
, cond
, true, rn
, rd
));
1146 template<int datasize
>
1147 ALWAYS_INLINE
void csinv(RegisterID rd
, RegisterID rn
, RegisterID rm
, Condition cond
)
1150 insn(conditionalSelect(DATASIZE
, true, rm
, cond
, false, rn
, rd
));
1153 template<int datasize
>
1154 ALWAYS_INLINE
void csneg(RegisterID rd
, RegisterID rn
, RegisterID rm
, Condition cond
)
1157 insn(conditionalSelect(DATASIZE
, true, rm
, cond
, true, rn
, rd
));
1160 template<int datasize
>
1161 ALWAYS_INLINE
void eon(RegisterID rd
, RegisterID rn
, RegisterID rm
)
1163 eon
<datasize
>(rd
, rn
, rm
, LSL
, 0);
1166 template<int datasize
>
1167 ALWAYS_INLINE
void eon(RegisterID rd
, RegisterID rn
, RegisterID rm
, ShiftType shift
, int amount
)
1170 insn(logicalShiftedRegister(DATASIZE
, LogicalOp_EOR
, shift
, true, rm
, amount
, rn
, rd
));
1173 template<int datasize
>
1174 ALWAYS_INLINE
void eor(RegisterID rd
, RegisterID rn
, RegisterID rm
)
1176 eor
<datasize
>(rd
, rn
, rm
, LSL
, 0);
1179 template<int datasize
>
1180 ALWAYS_INLINE
void eor(RegisterID rd
, RegisterID rn
, RegisterID rm
, ShiftType shift
, int amount
)
1183 insn(logicalShiftedRegister(DATASIZE
, LogicalOp_EOR
, shift
, false, rm
, amount
, rn
, rd
));
1186 template<int datasize
>
1187 ALWAYS_INLINE
void eor(RegisterID rd
, RegisterID rn
, LogicalImmediate imm
)
1190 insn(logicalImmediate(DATASIZE
, LogicalOp_EOR
, imm
.value(), rn
, rd
));
1193 template<int datasize
>
1194 ALWAYS_INLINE
void extr(RegisterID rd
, RegisterID rn
, RegisterID rm
, int lsb
)
1197 insn(extract(DATASIZE
, rm
, lsb
, rn
, rd
));
1200 ALWAYS_INLINE
void hint(int imm
)
1202 insn(hintPseudo(imm
));
1205 ALWAYS_INLINE
void hlt(uint16_t imm
)
1207 insn(excepnGeneration(ExcepnOp_HALT
, imm
, 0));
1210 template<int datasize
>
1211 ALWAYS_INLINE
void ldr(RegisterID rt
, RegisterID rn
, RegisterID rm
)
1213 ldr
<datasize
>(rt
, rn
, rm
, UXTX
, 0);
1216 template<int datasize
>
1217 ALWAYS_INLINE
void ldr(RegisterID rt
, RegisterID rn
, RegisterID rm
, ExtendType extend
, int amount
)
1220 insn(loadStoreRegisterRegisterOffset(MEMOPSIZE
, false, MemOp_LOAD
, rm
, extend
, encodeShiftAmount
<datasize
>(amount
), rn
, rt
));
1223 template<int datasize
>
1224 ALWAYS_INLINE
void ldr(RegisterID rt
, RegisterID rn
, unsigned pimm
)
1227 insn(loadStoreRegisterUnsignedImmediate(MEMOPSIZE
, false, MemOp_LOAD
, encodePositiveImmediate
<datasize
>(pimm
), rn
, rt
));
1230 template<int datasize
>
1231 ALWAYS_INLINE
void ldr(RegisterID rt
, RegisterID rn
, PostIndex simm
)
1234 insn(loadStoreRegisterPostIndex(MEMOPSIZE
, false, MemOp_LOAD
, simm
, rn
, rt
));
1237 template<int datasize
>
1238 ALWAYS_INLINE
void ldr(RegisterID rt
, RegisterID rn
, PreIndex simm
)
1241 insn(loadStoreRegisterPreIndex(MEMOPSIZE
, false, MemOp_LOAD
, simm
, rn
, rt
));
1244 template<int datasize
>
1245 ALWAYS_INLINE
void ldr_literal(RegisterID rt
, int offset
= 0)
1248 ASSERT(!(offset
& 3));
1249 insn(loadRegisterLiteral(datasize
== 64 ? LdrLiteralOp_64BIT
: LdrLiteralOp_32BIT
, false, offset
>> 2, rt
));
1252 ALWAYS_INLINE
void ldrb(RegisterID rt
, RegisterID rn
, RegisterID rm
)
1254 // Not calling the 5 argument form of ldrb, since is amount is ommitted S is false.
1255 insn(loadStoreRegisterRegisterOffset(MemOpSize_8_or_128
, false, MemOp_LOAD
, rm
, UXTX
, false, rn
, rt
));
1258 ALWAYS_INLINE
void ldrb(RegisterID rt
, RegisterID rn
, RegisterID rm
, ExtendType extend
, int amount
)
1260 ASSERT_UNUSED(amount
, !amount
);
1261 insn(loadStoreRegisterRegisterOffset(MemOpSize_8_or_128
, false, MemOp_LOAD
, rm
, extend
, true, rn
, rt
));
1264 ALWAYS_INLINE
void ldrb(RegisterID rt
, RegisterID rn
, unsigned pimm
)
1266 insn(loadStoreRegisterUnsignedImmediate(MemOpSize_8_or_128
, false, MemOp_LOAD
, encodePositiveImmediate
<8>(pimm
), rn
, rt
));
1269 ALWAYS_INLINE
void ldrb(RegisterID rt
, RegisterID rn
, PostIndex simm
)
1271 insn(loadStoreRegisterPostIndex(MemOpSize_8_or_128
, false, MemOp_LOAD
, simm
, rn
, rt
));
1274 ALWAYS_INLINE
void ldrb(RegisterID rt
, RegisterID rn
, PreIndex simm
)
1276 insn(loadStoreRegisterPreIndex(MemOpSize_8_or_128
, false, MemOp_LOAD
, simm
, rn
, rt
));
1279 ALWAYS_INLINE
void ldrh(RegisterID rt
, RegisterID rn
, RegisterID rm
)
1281 ldrh(rt
, rn
, rm
, UXTX
, 0);
1284 ALWAYS_INLINE
void ldrh(RegisterID rt
, RegisterID rn
, RegisterID rm
, ExtendType extend
, int amount
)
1286 ASSERT(!amount
|| amount
== 1);
1287 insn(loadStoreRegisterRegisterOffset(MemOpSize_16
, false, MemOp_LOAD
, rm
, extend
, amount
== 1, rn
, rt
));
1290 ALWAYS_INLINE
void ldrh(RegisterID rt
, RegisterID rn
, unsigned pimm
)
1292 insn(loadStoreRegisterUnsignedImmediate(MemOpSize_16
, false, MemOp_LOAD
, encodePositiveImmediate
<16>(pimm
), rn
, rt
));
1295 ALWAYS_INLINE
void ldrh(RegisterID rt
, RegisterID rn
, PostIndex simm
)
1297 insn(loadStoreRegisterPostIndex(MemOpSize_16
, false, MemOp_LOAD
, simm
, rn
, rt
));
1300 ALWAYS_INLINE
void ldrh(RegisterID rt
, RegisterID rn
, PreIndex simm
)
1302 insn(loadStoreRegisterPreIndex(MemOpSize_16
, false, MemOp_LOAD
, simm
, rn
, rt
));
1305 template<int datasize
>
1306 ALWAYS_INLINE
void ldrsb(RegisterID rt
, RegisterID rn
, RegisterID rm
)
1309 // Not calling the 5 argument form of ldrsb, since is amount is ommitted S is false.
1310 insn(loadStoreRegisterRegisterOffset(MemOpSize_8_or_128
, false, (datasize
== 64) ? MemOp_LOAD_signed64
: MemOp_LOAD_signed32
, rm
, UXTX
, false, rn
, rt
));
1313 template<int datasize
>
1314 ALWAYS_INLINE
void ldrsb(RegisterID rt
, RegisterID rn
, RegisterID rm
, ExtendType extend
, int amount
)
1317 ASSERT_UNUSED(amount
, !amount
);
1318 insn(loadStoreRegisterRegisterOffset(MemOpSize_8_or_128
, false, (datasize
== 64) ? MemOp_LOAD_signed64
: MemOp_LOAD_signed32
, rm
, extend
, true, rn
, rt
));
1321 template<int datasize
>
1322 ALWAYS_INLINE
void ldrsb(RegisterID rt
, RegisterID rn
, unsigned pimm
)
1325 insn(loadStoreRegisterUnsignedImmediate(MemOpSize_8_or_128
, false, (datasize
== 64) ? MemOp_LOAD_signed64
: MemOp_LOAD_signed32
, encodePositiveImmediate
<8>(pimm
), rn
, rt
));
1328 template<int datasize
>
1329 ALWAYS_INLINE
void ldrsb(RegisterID rt
, RegisterID rn
, PostIndex simm
)
1332 insn(loadStoreRegisterPostIndex(MemOpSize_8_or_128
, false, (datasize
== 64) ? MemOp_LOAD_signed64
: MemOp_LOAD_signed32
, simm
, rn
, rt
));
1335 template<int datasize
>
1336 ALWAYS_INLINE
void ldrsb(RegisterID rt
, RegisterID rn
, PreIndex simm
)
1339 insn(loadStoreRegisterPreIndex(MemOpSize_8_or_128
, false, (datasize
== 64) ? MemOp_LOAD_signed64
: MemOp_LOAD_signed32
, simm
, rn
, rt
));
1342 template<int datasize
>
1343 ALWAYS_INLINE
void ldrsh(RegisterID rt
, RegisterID rn
, RegisterID rm
)
1345 ldrsh
<datasize
>(rt
, rn
, rm
, UXTX
, 0);
1348 template<int datasize
>
1349 ALWAYS_INLINE
void ldrsh(RegisterID rt
, RegisterID rn
, RegisterID rm
, ExtendType extend
, int amount
)
1352 ASSERT(!amount
|| amount
== 1);
1353 insn(loadStoreRegisterRegisterOffset(MemOpSize_16
, false, (datasize
== 64) ? MemOp_LOAD_signed64
: MemOp_LOAD_signed32
, rm
, extend
, amount
== 1, rn
, rt
));
1356 template<int datasize
>
1357 ALWAYS_INLINE
void ldrsh(RegisterID rt
, RegisterID rn
, unsigned pimm
)
1360 insn(loadStoreRegisterUnsignedImmediate(MemOpSize_16
, false, (datasize
== 64) ? MemOp_LOAD_signed64
: MemOp_LOAD_signed32
, encodePositiveImmediate
<16>(pimm
), rn
, rt
));
1363 template<int datasize
>
1364 ALWAYS_INLINE
void ldrsh(RegisterID rt
, RegisterID rn
, PostIndex simm
)
1367 insn(loadStoreRegisterPostIndex(MemOpSize_16
, false, (datasize
== 64) ? MemOp_LOAD_signed64
: MemOp_LOAD_signed32
, simm
, rn
, rt
));
1370 template<int datasize
>
1371 ALWAYS_INLINE
void ldrsh(RegisterID rt
, RegisterID rn
, PreIndex simm
)
1374 insn(loadStoreRegisterPreIndex(MemOpSize_16
, false, (datasize
== 64) ? MemOp_LOAD_signed64
: MemOp_LOAD_signed32
, simm
, rn
, rt
));
1377 ALWAYS_INLINE
void ldrsw(RegisterID rt
, RegisterID rn
, RegisterID rm
)
1379 ldrsw(rt
, rn
, rm
, UXTX
, 0);
1382 ALWAYS_INLINE
void ldrsw(RegisterID rt
, RegisterID rn
, RegisterID rm
, ExtendType extend
, int amount
)
1384 ASSERT(!amount
|| amount
== 2);
1385 insn(loadStoreRegisterRegisterOffset(MemOpSize_32
, false, MemOp_LOAD_signed64
, rm
, extend
, amount
== 2, rn
, rt
));
1388 ALWAYS_INLINE
void ldrsw(RegisterID rt
, RegisterID rn
, unsigned pimm
)
1390 insn(loadStoreRegisterUnsignedImmediate(MemOpSize_32
, false, MemOp_LOAD_signed64
, encodePositiveImmediate
<32>(pimm
), rn
, rt
));
1393 ALWAYS_INLINE
void ldrsw(RegisterID rt
, RegisterID rn
, PostIndex simm
)
1395 insn(loadStoreRegisterPostIndex(MemOpSize_32
, false, MemOp_LOAD_signed64
, simm
, rn
, rt
));
1398 ALWAYS_INLINE
void ldrsw(RegisterID rt
, RegisterID rn
, PreIndex simm
)
1400 insn(loadStoreRegisterPreIndex(MemOpSize_32
, false, MemOp_LOAD_signed64
, simm
, rn
, rt
));
1403 ALWAYS_INLINE
void ldrsw_literal(RegisterID rt
, int offset
= 0)
1405 ASSERT(!(offset
& 3));
1406 insn(loadRegisterLiteral(LdrLiteralOp_LDRSW
, false, offset
>> 2, rt
));
1409 template<int datasize
>
1410 ALWAYS_INLINE
void ldur(RegisterID rt
, RegisterID rn
, int simm
)
1413 insn(loadStoreRegisterUnscaledImmediate(MEMOPSIZE
, false, MemOp_LOAD
, simm
, rn
, rt
));
1416 ALWAYS_INLINE
void ldurb(RegisterID rt
, RegisterID rn
, int simm
)
1418 insn(loadStoreRegisterUnscaledImmediate(MemOpSize_8_or_128
, false, MemOp_LOAD
, simm
, rn
, rt
));
1421 ALWAYS_INLINE
void ldurh(RegisterID rt
, RegisterID rn
, int simm
)
1423 insn(loadStoreRegisterUnscaledImmediate(MemOpSize_16
, false, MemOp_LOAD
, simm
, rn
, rt
));
1426 template<int datasize
>
1427 ALWAYS_INLINE
void ldursb(RegisterID rt
, RegisterID rn
, int simm
)
1430 insn(loadStoreRegisterUnscaledImmediate(MemOpSize_8_or_128
, false, (datasize
== 64) ? MemOp_LOAD_signed64
: MemOp_LOAD_signed32
, simm
, rn
, rt
));
1433 template<int datasize
>
1434 ALWAYS_INLINE
void ldursh(RegisterID rt
, RegisterID rn
, int simm
)
1437 insn(loadStoreRegisterUnscaledImmediate(MemOpSize_16
, false, (datasize
== 64) ? MemOp_LOAD_signed64
: MemOp_LOAD_signed32
, simm
, rn
, rt
));
1440 ALWAYS_INLINE
void ldursw(RegisterID rt
, RegisterID rn
, int simm
)
1442 insn(loadStoreRegisterUnscaledImmediate(MemOpSize_32
, false, MemOp_LOAD_signed64
, simm
, rn
, rt
));
1445 template<int datasize
>
1446 ALWAYS_INLINE
void lsl(RegisterID rd
, RegisterID rn
, int shift
)
1448 ASSERT(shift
< datasize
);
1449 ubfm
<datasize
>(rd
, rn
, (datasize
- shift
) & (datasize
- 1), datasize
- 1 - shift
);
1452 template<int datasize
>
1453 ALWAYS_INLINE
void lsl(RegisterID rd
, RegisterID rn
, RegisterID rm
)
1455 lslv
<datasize
>(rd
, rn
, rm
);
1458 template<int datasize
>
1459 ALWAYS_INLINE
void lslv(RegisterID rd
, RegisterID rn
, RegisterID rm
)
1462 insn(dataProcessing2Source(DATASIZE
, rm
, DataOp_LSLV
, rn
, rd
));
1465 template<int datasize
>
1466 ALWAYS_INLINE
void lsr(RegisterID rd
, RegisterID rn
, int shift
)
1468 ASSERT(shift
< datasize
);
1469 ubfm
<datasize
>(rd
, rn
, shift
, datasize
- 1);
1472 template<int datasize
>
1473 ALWAYS_INLINE
void lsr(RegisterID rd
, RegisterID rn
, RegisterID rm
)
1475 lsrv
<datasize
>(rd
, rn
, rm
);
1478 template<int datasize
>
1479 ALWAYS_INLINE
void lsrv(RegisterID rd
, RegisterID rn
, RegisterID rm
)
1482 insn(dataProcessing2Source(DATASIZE
, rm
, DataOp_LSRV
, rn
, rd
));
1485 template<int datasize
>
1486 ALWAYS_INLINE
void madd(RegisterID rd
, RegisterID rn
, RegisterID rm
, RegisterID ra
)
1489 insn(dataProcessing3Source(DATASIZE
, DataOp_MADD
, rm
, ra
, rn
, rd
));
1492 template<int datasize
>
1493 ALWAYS_INLINE
void mneg(RegisterID rd
, RegisterID rn
, RegisterID rm
)
1495 msub
<datasize
>(rd
, rn
, rm
, ARM64Registers::zr
);
1498 template<int datasize
>
1499 ALWAYS_INLINE
void mov(RegisterID rd
, RegisterID rm
)
1501 if (isSp(rd
) || isSp(rm
))
1502 add
<datasize
>(rd
, rm
, UInt12(0));
1504 orr
<datasize
>(rd
, ARM64Registers::zr
, rm
);
1507 template<int datasize
>
1508 ALWAYS_INLINE
void movi(RegisterID rd
, LogicalImmediate imm
)
1510 orr
<datasize
>(rd
, ARM64Registers::zr
, imm
);
1513 template<int datasize
>
1514 ALWAYS_INLINE
void movk(RegisterID rd
, uint16_t value
, int shift
= 0)
1517 ASSERT(!(shift
& 0xf));
1518 insn(moveWideImediate(DATASIZE
, MoveWideOp_K
, shift
>> 4, value
, rd
));
1521 template<int datasize
>
1522 ALWAYS_INLINE
void movn(RegisterID rd
, uint16_t value
, int shift
= 0)
1525 ASSERT(!(shift
& 0xf));
1526 insn(moveWideImediate(DATASIZE
, MoveWideOp_N
, shift
>> 4, value
, rd
));
1529 template<int datasize
>
1530 ALWAYS_INLINE
void movz(RegisterID rd
, uint16_t value
, int shift
= 0)
1533 ASSERT(!(shift
& 0xf));
1534 insn(moveWideImediate(DATASIZE
, MoveWideOp_Z
, shift
>> 4, value
, rd
));
1537 template<int datasize
>
1538 ALWAYS_INLINE
void msub(RegisterID rd
, RegisterID rn
, RegisterID rm
, RegisterID ra
)
1541 insn(dataProcessing3Source(DATASIZE
, DataOp_MSUB
, rm
, ra
, rn
, rd
));
1544 template<int datasize
>
1545 ALWAYS_INLINE
void mul(RegisterID rd
, RegisterID rn
, RegisterID rm
)
1547 madd
<datasize
>(rd
, rn
, rm
, ARM64Registers::zr
);
1550 template<int datasize
>
1551 ALWAYS_INLINE
void mvn(RegisterID rd
, RegisterID rm
)
1553 orn
<datasize
>(rd
, ARM64Registers::zr
, rm
);
1556 template<int datasize
>
1557 ALWAYS_INLINE
void mvn(RegisterID rd
, RegisterID rm
, ShiftType shift
, int amount
)
1559 orn
<datasize
>(rd
, ARM64Registers::zr
, rm
, shift
, amount
);
1562 template<int datasize
, SetFlags setFlags
= DontSetFlags
>
1563 ALWAYS_INLINE
void neg(RegisterID rd
, RegisterID rm
)
1565 sub
<datasize
, setFlags
>(rd
, ARM64Registers::zr
, rm
);
1568 template<int datasize
, SetFlags setFlags
= DontSetFlags
>
1569 ALWAYS_INLINE
void neg(RegisterID rd
, RegisterID rm
, ShiftType shift
, int amount
)
1571 sub
<datasize
, setFlags
>(rd
, ARM64Registers::zr
, rm
, shift
, amount
);
1574 template<int datasize
, SetFlags setFlags
= DontSetFlags
>
1575 ALWAYS_INLINE
void ngc(RegisterID rd
, RegisterID rm
)
1577 sbc
<datasize
, setFlags
>(rd
, ARM64Registers::zr
, rm
);
1580 template<int datasize
, SetFlags setFlags
= DontSetFlags
>
1581 ALWAYS_INLINE
void ngc(RegisterID rd
, RegisterID rm
, ShiftType shift
, int amount
)
1583 sbc
<datasize
, setFlags
>(rd
, ARM64Registers::zr
, rm
, shift
, amount
);
1586 ALWAYS_INLINE
void nop()
1591 template<int datasize
>
1592 ALWAYS_INLINE
void orn(RegisterID rd
, RegisterID rn
, RegisterID rm
)
1594 orn
<datasize
>(rd
, rn
, rm
, LSL
, 0);
1597 template<int datasize
>
1598 ALWAYS_INLINE
void orn(RegisterID rd
, RegisterID rn
, RegisterID rm
, ShiftType shift
, int amount
)
1601 insn(logicalShiftedRegister(DATASIZE
, LogicalOp_ORR
, shift
, true, rm
, amount
, rn
, rd
));
1604 template<int datasize
>
1605 ALWAYS_INLINE
void orr(RegisterID rd
, RegisterID rn
, RegisterID rm
)
1607 orr
<datasize
>(rd
, rn
, rm
, LSL
, 0);
1610 template<int datasize
>
1611 ALWAYS_INLINE
void orr(RegisterID rd
, RegisterID rn
, RegisterID rm
, ShiftType shift
, int amount
)
1614 insn(logicalShiftedRegister(DATASIZE
, LogicalOp_ORR
, shift
, false, rm
, amount
, rn
, rd
));
1617 template<int datasize
>
1618 ALWAYS_INLINE
void orr(RegisterID rd
, RegisterID rn
, LogicalImmediate imm
)
1621 insn(logicalImmediate(DATASIZE
, LogicalOp_ORR
, imm
.value(), rn
, rd
));
1624 template<int datasize
>
1625 ALWAYS_INLINE
void rbit(RegisterID rd
, RegisterID rn
)
1628 insn(dataProcessing1Source(DATASIZE
, DataOp_RBIT
, rn
, rd
));
1631 ALWAYS_INLINE
void ret(RegisterID rn
= ARM64Registers::lr
)
1633 insn(unconditionalBranchRegister(BranchType_RET
, rn
));
1636 template<int datasize
>
1637 ALWAYS_INLINE
void rev(RegisterID rd
, RegisterID rn
)
1640 if (datasize
== 32) // 'rev' mnemonic means REV32 or REV64 depending on the operand width.
1641 insn(dataProcessing1Source(Datasize_32
, DataOp_REV32
, rn
, rd
));
1643 insn(dataProcessing1Source(Datasize_64
, DataOp_REV64
, rn
, rd
));
1646 template<int datasize
>
1647 ALWAYS_INLINE
void rev16(RegisterID rd
, RegisterID rn
)
1650 insn(dataProcessing1Source(DATASIZE
, DataOp_REV16
, rn
, rd
));
1653 template<int datasize
>
1654 ALWAYS_INLINE
void rev32(RegisterID rd
, RegisterID rn
)
1656 ASSERT(datasize
== 64); // 'rev32' only valid with 64-bit operands.
1657 insn(dataProcessing1Source(Datasize_64
, DataOp_REV32
, rn
, rd
));
1660 template<int datasize
>
1661 ALWAYS_INLINE
void ror(RegisterID rd
, RegisterID rn
, RegisterID rm
)
1663 rorv
<datasize
>(rd
, rn
, rm
);
1666 template<int datasize
>
1667 ALWAYS_INLINE
void ror(RegisterID rd
, RegisterID rs
, int shift
)
1669 extr
<datasize
>(rd
, rs
, rs
, shift
);
1672 template<int datasize
>
1673 ALWAYS_INLINE
void rorv(RegisterID rd
, RegisterID rn
, RegisterID rm
)
1676 insn(dataProcessing2Source(DATASIZE
, rm
, DataOp_RORV
, rn
, rd
));
1679 template<int datasize
, SetFlags setFlags
= DontSetFlags
>
1680 ALWAYS_INLINE
void sbc(RegisterID rd
, RegisterID rn
, RegisterID rm
)
1683 insn(addSubtractWithCarry(DATASIZE
, AddOp_SUB
, setFlags
, rm
, rn
, rd
));
1686 template<int datasize
>
1687 ALWAYS_INLINE
void sbfiz(RegisterID rd
, RegisterID rn
, int lsb
, int width
)
1689 sbfm
<datasize
>(rd
, rn
, (datasize
- lsb
) & (datasize
- 1), width
- 1);
1692 template<int datasize
>
1693 ALWAYS_INLINE
void sbfm(RegisterID rd
, RegisterID rn
, int immr
, int imms
)
1696 insn(bitfield(DATASIZE
, BitfieldOp_SBFM
, immr
, imms
, rn
, rd
));
1699 template<int datasize
>
1700 ALWAYS_INLINE
void sbfx(RegisterID rd
, RegisterID rn
, int lsb
, int width
)
1702 sbfm
<datasize
>(rd
, rn
, lsb
, lsb
+ width
- 1);
1705 template<int datasize
>
1706 ALWAYS_INLINE
void sdiv(RegisterID rd
, RegisterID rn
, RegisterID rm
)
1709 insn(dataProcessing2Source(DATASIZE
, rm
, DataOp_SDIV
, rn
, rd
));
1712 ALWAYS_INLINE
void smaddl(RegisterID rd
, RegisterID rn
, RegisterID rm
, RegisterID ra
)
1714 insn(dataProcessing3Source(Datasize_64
, DataOp_SMADDL
, rm
, ra
, rn
, rd
));
1717 ALWAYS_INLINE
void smnegl(RegisterID rd
, RegisterID rn
, RegisterID rm
)
1719 smsubl(rd
, rn
, rm
, ARM64Registers::zr
);
1722 ALWAYS_INLINE
void smsubl(RegisterID rd
, RegisterID rn
, RegisterID rm
, RegisterID ra
)
1724 insn(dataProcessing3Source(Datasize_64
, DataOp_SMSUBL
, rm
, ra
, rn
, rd
));
1727 ALWAYS_INLINE
void smulh(RegisterID rd
, RegisterID rn
, RegisterID rm
)
1729 insn(dataProcessing3Source(Datasize_64
, DataOp_SMULH
, rm
, ARM64Registers::zr
, rn
, rd
));
1732 ALWAYS_INLINE
void smull(RegisterID rd
, RegisterID rn
, RegisterID rm
)
1734 smaddl(rd
, rn
, rm
, ARM64Registers::zr
);
1737 template<int datasize
>
1738 ALWAYS_INLINE
void str(RegisterID rt
, RegisterID rn
, RegisterID rm
)
1740 str
<datasize
>(rt
, rn
, rm
, UXTX
, 0);
1743 template<int datasize
>
1744 ALWAYS_INLINE
void str(RegisterID rt
, RegisterID rn
, RegisterID rm
, ExtendType extend
, int amount
)
1747 insn(loadStoreRegisterRegisterOffset(MEMOPSIZE
, false, MemOp_STORE
, rm
, extend
, encodeShiftAmount
<datasize
>(amount
), rn
, rt
));
1750 template<int datasize
>
1751 ALWAYS_INLINE
void str(RegisterID rt
, RegisterID rn
, unsigned pimm
)
1754 insn(loadStoreRegisterUnsignedImmediate(MEMOPSIZE
, false, MemOp_STORE
, encodePositiveImmediate
<datasize
>(pimm
), rn
, rt
));
1757 template<int datasize
>
1758 ALWAYS_INLINE
void str(RegisterID rt
, RegisterID rn
, PostIndex simm
)
1761 insn(loadStoreRegisterPostIndex(MEMOPSIZE
, false, MemOp_STORE
, simm
, rn
, rt
));
1764 template<int datasize
>
1765 ALWAYS_INLINE
void str(RegisterID rt
, RegisterID rn
, PreIndex simm
)
1768 insn(loadStoreRegisterPreIndex(MEMOPSIZE
, false, MemOp_STORE
, simm
, rn
, rt
));
1771 ALWAYS_INLINE
void strb(RegisterID rt
, RegisterID rn
, RegisterID rm
)
1773 // Not calling the 5 argument form of strb, since is amount is ommitted S is false.
1774 insn(loadStoreRegisterRegisterOffset(MemOpSize_8_or_128
, false, MemOp_STORE
, rm
, UXTX
, false, rn
, rt
));
1777 ALWAYS_INLINE
void strb(RegisterID rt
, RegisterID rn
, RegisterID rm
, ExtendType extend
, int amount
)
1779 ASSERT_UNUSED(amount
, !amount
);
1780 insn(loadStoreRegisterRegisterOffset(MemOpSize_8_or_128
, false, MemOp_STORE
, rm
, extend
, true, rn
, rt
));
1783 ALWAYS_INLINE
void strb(RegisterID rt
, RegisterID rn
, unsigned pimm
)
1785 insn(loadStoreRegisterUnsignedImmediate(MemOpSize_8_or_128
, false, MemOp_STORE
, encodePositiveImmediate
<8>(pimm
), rn
, rt
));
1788 ALWAYS_INLINE
void strb(RegisterID rt
, RegisterID rn
, PostIndex simm
)
1790 insn(loadStoreRegisterPostIndex(MemOpSize_8_or_128
, false, MemOp_STORE
, simm
, rn
, rt
));
1793 ALWAYS_INLINE
void strb(RegisterID rt
, RegisterID rn
, PreIndex simm
)
1795 insn(loadStoreRegisterPreIndex(MemOpSize_8_or_128
, false, MemOp_STORE
, simm
, rn
, rt
));
1798 ALWAYS_INLINE
void strh(RegisterID rt
, RegisterID rn
, RegisterID rm
)
1800 strh(rt
, rn
, rm
, UXTX
, 0);
1803 ALWAYS_INLINE
void strh(RegisterID rt
, RegisterID rn
, RegisterID rm
, ExtendType extend
, int amount
)
1805 ASSERT(!amount
|| amount
== 1);
1806 insn(loadStoreRegisterRegisterOffset(MemOpSize_16
, false, MemOp_STORE
, rm
, extend
, amount
== 1, rn
, rt
));
1809 ALWAYS_INLINE
void strh(RegisterID rt
, RegisterID rn
, unsigned pimm
)
1811 insn(loadStoreRegisterUnsignedImmediate(MemOpSize_16
, false, MemOp_STORE
, encodePositiveImmediate
<16>(pimm
), rn
, rt
));
1814 ALWAYS_INLINE
void strh(RegisterID rt
, RegisterID rn
, PostIndex simm
)
1816 insn(loadStoreRegisterPostIndex(MemOpSize_16
, false, MemOp_STORE
, simm
, rn
, rt
));
1819 ALWAYS_INLINE
void strh(RegisterID rt
, RegisterID rn
, PreIndex simm
)
1821 insn(loadStoreRegisterPreIndex(MemOpSize_16
, false, MemOp_STORE
, simm
, rn
, rt
));
1824 template<int datasize
>
1825 ALWAYS_INLINE
void stur(RegisterID rt
, RegisterID rn
, int simm
)
1828 insn(loadStoreRegisterUnscaledImmediate(MEMOPSIZE
, false, MemOp_STORE
, simm
, rn
, rt
));
1831 ALWAYS_INLINE
void sturb(RegisterID rt
, RegisterID rn
, int simm
)
1833 insn(loadStoreRegisterUnscaledImmediate(MemOpSize_8_or_128
, false, MemOp_STORE
, simm
, rn
, rt
));
1836 ALWAYS_INLINE
void sturh(RegisterID rt
, RegisterID rn
, int simm
)
1838 insn(loadStoreRegisterUnscaledImmediate(MemOpSize_16
, false, MemOp_STORE
, simm
, rn
, rt
));
1841 template<int datasize
, SetFlags setFlags
= DontSetFlags
>
1842 ALWAYS_INLINE
void sub(RegisterID rd
, RegisterID rn
, UInt12 imm12
, int shift
= 0)
1845 ASSERT(!shift
|| shift
== 12);
1846 insn(addSubtractImmediate(DATASIZE
, AddOp_SUB
, setFlags
, shift
== 12, imm12
, rn
, rd
));
1849 template<int datasize
, SetFlags setFlags
= DontSetFlags
>
1850 ALWAYS_INLINE
void sub(RegisterID rd
, RegisterID rn
, RegisterID rm
)
1852 sub
<datasize
, setFlags
>(rd
, rn
, rm
, LSL
, 0);
1855 template<int datasize
, SetFlags setFlags
= DontSetFlags
>
1856 ALWAYS_INLINE
void sub(RegisterID rd
, RegisterID rn
, RegisterID rm
, ExtendType extend
, int amount
)
1859 insn(addSubtractExtendedRegister(DATASIZE
, AddOp_SUB
, setFlags
, rm
, extend
, amount
, rn
, rd
));
1862 template<int datasize
, SetFlags setFlags
= DontSetFlags
>
1863 ALWAYS_INLINE
void sub(RegisterID rd
, RegisterID rn
, RegisterID rm
, ShiftType shift
, int amount
)
1867 ASSERT(shift
== LSL
);
1868 sub
<datasize
, setFlags
>(rd
, rn
, rm
, UXTX
, amount
);
1870 insn(addSubtractShiftedRegister(DATASIZE
, AddOp_SUB
, setFlags
, shift
, rm
, amount
, rn
, rd
));
1873 template<int datasize
>
1874 ALWAYS_INLINE
void sxtb(RegisterID rd
, RegisterID rn
)
1876 sbfm
<datasize
>(rd
, rn
, 0, 7);
1879 template<int datasize
>
1880 ALWAYS_INLINE
void sxth(RegisterID rd
, RegisterID rn
)
1882 sbfm
<datasize
>(rd
, rn
, 0, 15);
1885 ALWAYS_INLINE
void sxtw(RegisterID rd
, RegisterID rn
)
1887 sbfm
<64>(rd
, rn
, 0, 31);
1890 ALWAYS_INLINE
void tbz(RegisterID rt
, int imm
, int offset
= 0)
1892 ASSERT(!(offset
& 3));
1894 insn(testAndBranchImmediate(false, imm
, offset
, rt
));
1897 ALWAYS_INLINE
void tbnz(RegisterID rt
, int imm
, int offset
= 0)
1899 ASSERT(!(offset
& 3));
1901 insn(testAndBranchImmediate(true, imm
, offset
, rt
));
1904 template<int datasize
>
1905 ALWAYS_INLINE
void tst(RegisterID rn
, RegisterID rm
)
1907 and_
<datasize
, S
>(ARM64Registers::zr
, rn
, rm
);
1910 template<int datasize
>
1911 ALWAYS_INLINE
void tst(RegisterID rn
, RegisterID rm
, ShiftType shift
, int amount
)
1913 and_
<datasize
, S
>(ARM64Registers::zr
, rn
, rm
, shift
, amount
);
1916 template<int datasize
>
1917 ALWAYS_INLINE
void tst(RegisterID rn
, LogicalImmediate imm
)
1919 and_
<datasize
, S
>(ARM64Registers::zr
, rn
, imm
);
1922 template<int datasize
>
1923 ALWAYS_INLINE
void ubfiz(RegisterID rd
, RegisterID rn
, int lsb
, int width
)
1925 ubfm
<datasize
>(rd
, rn
, (datasize
- lsb
) & (datasize
- 1), width
- 1);
1928 template<int datasize
>
1929 ALWAYS_INLINE
void ubfm(RegisterID rd
, RegisterID rn
, int immr
, int imms
)
1932 insn(bitfield(DATASIZE
, BitfieldOp_UBFM
, immr
, imms
, rn
, rd
));
1935 template<int datasize
>
1936 ALWAYS_INLINE
void ubfx(RegisterID rd
, RegisterID rn
, int lsb
, int width
)
1938 ubfm
<datasize
>(rd
, rn
, lsb
, lsb
+ width
- 1);
1941 template<int datasize
>
1942 ALWAYS_INLINE
void udiv(RegisterID rd
, RegisterID rn
, RegisterID rm
)
1945 insn(dataProcessing2Source(DATASIZE
, rm
, DataOp_UDIV
, rn
, rd
));
1948 ALWAYS_INLINE
void umaddl(RegisterID rd
, RegisterID rn
, RegisterID rm
, RegisterID ra
)
1950 insn(dataProcessing3Source(Datasize_64
, DataOp_UMADDL
, rm
, ra
, rn
, rd
));
1953 ALWAYS_INLINE
void umnegl(RegisterID rd
, RegisterID rn
, RegisterID rm
)
1955 umsubl(rd
, rn
, rm
, ARM64Registers::zr
);
1958 ALWAYS_INLINE
void umsubl(RegisterID rd
, RegisterID rn
, RegisterID rm
, RegisterID ra
)
1960 insn(dataProcessing3Source(Datasize_64
, DataOp_UMSUBL
, rm
, ra
, rn
, rd
));
1963 ALWAYS_INLINE
void umulh(RegisterID rd
, RegisterID rn
, RegisterID rm
)
1965 insn(dataProcessing3Source(Datasize_64
, DataOp_UMULH
, rm
, ARM64Registers::zr
, rn
, rd
));
1968 ALWAYS_INLINE
void umull(RegisterID rd
, RegisterID rn
, RegisterID rm
)
1970 umaddl(rd
, rn
, rm
, ARM64Registers::zr
);
1973 template<int datasize
>
1974 ALWAYS_INLINE
void uxtb(RegisterID rd
, RegisterID rn
)
1976 ubfm
<datasize
>(rd
, rn
, 0, 7);
1979 template<int datasize
>
1980 ALWAYS_INLINE
void uxth(RegisterID rd
, RegisterID rn
)
1982 ubfm
<datasize
>(rd
, rn
, 0, 15);
1985 ALWAYS_INLINE
void uxtw(RegisterID rd
, RegisterID rn
)
1987 ubfm
<64>(rd
, rn
, 0, 31);
1990 // Floating Point Instructions:
1992 template<int datasize
>
1993 ALWAYS_INLINE
void fabs(FPRegisterID vd
, FPRegisterID vn
)
1996 insn(floatingPointDataProcessing1Source(DATASIZE
, FPDataOp_FABS
, vn
, vd
));
1999 template<int datasize
>
2000 ALWAYS_INLINE
void fadd(FPRegisterID vd
, FPRegisterID vn
, FPRegisterID vm
)
2003 insn(floatingPointDataProcessing2Source(DATASIZE
, vm
, FPDataOp_FADD
, vn
, vd
));
2006 template<int datasize
>
2007 ALWAYS_INLINE
void fccmp(FPRegisterID vn
, FPRegisterID vm
, int nzcv
, Condition cond
)
2010 insn(floatingPointConditionalCompare(DATASIZE
, vm
, cond
, vn
, FPCondCmpOp_FCMP
, nzcv
));
2013 template<int datasize
>
2014 ALWAYS_INLINE
void fccmpe(FPRegisterID vn
, FPRegisterID vm
, int nzcv
, Condition cond
)
2017 insn(floatingPointConditionalCompare(DATASIZE
, vm
, cond
, vn
, FPCondCmpOp_FCMPE
, nzcv
));
2020 template<int datasize
>
2021 ALWAYS_INLINE
void fcmp(FPRegisterID vn
, FPRegisterID vm
)
2024 insn(floatingPointCompare(DATASIZE
, vm
, vn
, FPCmpOp_FCMP
));
2027 template<int datasize
>
2028 ALWAYS_INLINE
void fcmp_0(FPRegisterID vn
)
2031 insn(floatingPointCompare(DATASIZE
, static_cast<FPRegisterID
>(0), vn
, FPCmpOp_FCMP0
));
2034 template<int datasize
>
2035 ALWAYS_INLINE
void fcmpe(FPRegisterID vn
, FPRegisterID vm
)
2038 insn(floatingPointCompare(DATASIZE
, vm
, vn
, FPCmpOp_FCMPE
));
2041 template<int datasize
>
2042 ALWAYS_INLINE
void fcmpe_0(FPRegisterID vn
)
2045 insn(floatingPointCompare(DATASIZE
, static_cast<FPRegisterID
>(0), vn
, FPCmpOp_FCMPE0
));
2048 template<int datasize
>
2049 ALWAYS_INLINE
void fcsel(FPRegisterID vd
, FPRegisterID vn
, FPRegisterID vm
, Condition cond
)
2052 insn(floatingPointConditionalSelect(DATASIZE
, vm
, cond
, vn
, vd
));
2055 template<int dstsize
, int srcsize
>
2056 ALWAYS_INLINE
void fcvt(FPRegisterID vd
, FPRegisterID vn
)
2058 ASSERT(dstsize
== 16 || dstsize
== 32 || dstsize
== 64);
2059 ASSERT(srcsize
== 16 || srcsize
== 32 || srcsize
== 64);
2060 ASSERT(dstsize
!= srcsize
);
2061 Datasize type
= (srcsize
== 64) ? Datasize_64
: (srcsize
== 32) ? Datasize_32
: Datasize_16
;
2062 FPDataOp1Source opcode
= (dstsize
== 64) ? FPDataOp_FCVT_toDouble
: (dstsize
== 32) ? FPDataOp_FCVT_toSingle
: FPDataOp_FCVT_toHalf
;
2063 insn(floatingPointDataProcessing1Source(type
, opcode
, vn
, vd
));
2066 template<int dstsize
, int srcsize
>
2067 ALWAYS_INLINE
void fcvtas(RegisterID rd
, FPRegisterID vn
)
2069 CHECK_DATASIZE_OF(dstsize
);
2070 CHECK_DATASIZE_OF(srcsize
);
2071 insn(floatingPointIntegerConversions(DATASIZE_OF(dstsize
), DATASIZE_OF(srcsize
), FPIntConvOp_FCVTAS
, vn
, rd
));
2074 template<int dstsize
, int srcsize
>
2075 ALWAYS_INLINE
void fcvtau(RegisterID rd
, FPRegisterID vn
)
2077 CHECK_DATASIZE_OF(dstsize
);
2078 CHECK_DATASIZE_OF(srcsize
);
2079 insn(floatingPointIntegerConversions(DATASIZE_OF(dstsize
), DATASIZE_OF(srcsize
), FPIntConvOp_FCVTAU
, vn
, rd
));
2082 template<int dstsize
, int srcsize
>
2083 ALWAYS_INLINE
void fcvtms(RegisterID rd
, FPRegisterID vn
)
2085 CHECK_DATASIZE_OF(dstsize
);
2086 CHECK_DATASIZE_OF(srcsize
);
2087 insn(floatingPointIntegerConversions(DATASIZE_OF(dstsize
), DATASIZE_OF(srcsize
), FPIntConvOp_FCVTMS
, vn
, rd
));
2090 template<int dstsize
, int srcsize
>
2091 ALWAYS_INLINE
void fcvtmu(RegisterID rd
, FPRegisterID vn
)
2093 CHECK_DATASIZE_OF(dstsize
);
2094 CHECK_DATASIZE_OF(srcsize
);
2095 insn(floatingPointIntegerConversions(DATASIZE_OF(dstsize
), DATASIZE_OF(srcsize
), FPIntConvOp_FCVTMU
, vn
, rd
));
2098 template<int dstsize
, int srcsize
>
2099 ALWAYS_INLINE
void fcvtns(RegisterID rd
, FPRegisterID vn
)
2101 CHECK_DATASIZE_OF(dstsize
);
2102 CHECK_DATASIZE_OF(srcsize
);
2103 insn(floatingPointIntegerConversions(DATASIZE_OF(dstsize
), DATASIZE_OF(srcsize
), FPIntConvOp_FCVTNS
, vn
, rd
));
2106 template<int dstsize
, int srcsize
>
2107 ALWAYS_INLINE
void fcvtnu(RegisterID rd
, FPRegisterID vn
)
2109 CHECK_DATASIZE_OF(dstsize
);
2110 CHECK_DATASIZE_OF(srcsize
);
2111 insn(floatingPointIntegerConversions(DATASIZE_OF(dstsize
), DATASIZE_OF(srcsize
), FPIntConvOp_FCVTNU
, vn
, rd
));
2114 template<int dstsize
, int srcsize
>
2115 ALWAYS_INLINE
void fcvtps(RegisterID rd
, FPRegisterID vn
)
2117 CHECK_DATASIZE_OF(dstsize
);
2118 CHECK_DATASIZE_OF(srcsize
);
2119 insn(floatingPointIntegerConversions(DATASIZE_OF(dstsize
), DATASIZE_OF(srcsize
), FPIntConvOp_FCVTPS
, vn
, rd
));
2122 template<int dstsize
, int srcsize
>
2123 ALWAYS_INLINE
void fcvtpu(RegisterID rd
, FPRegisterID vn
)
2125 CHECK_DATASIZE_OF(dstsize
);
2126 CHECK_DATASIZE_OF(srcsize
);
2127 insn(floatingPointIntegerConversions(DATASIZE_OF(dstsize
), DATASIZE_OF(srcsize
), FPIntConvOp_FCVTPU
, vn
, rd
));
2130 template<int dstsize
, int srcsize
>
2131 ALWAYS_INLINE
void fcvtzs(RegisterID rd
, FPRegisterID vn
)
2133 CHECK_DATASIZE_OF(dstsize
);
2134 CHECK_DATASIZE_OF(srcsize
);
2135 insn(floatingPointIntegerConversions(DATASIZE_OF(dstsize
), DATASIZE_OF(srcsize
), FPIntConvOp_FCVTZS
, vn
, rd
));
2138 template<int dstsize
, int srcsize
>
2139 ALWAYS_INLINE
void fcvtzu(RegisterID rd
, FPRegisterID vn
)
2141 CHECK_DATASIZE_OF(dstsize
);
2142 CHECK_DATASIZE_OF(srcsize
);
2143 insn(floatingPointIntegerConversions(DATASIZE_OF(dstsize
), DATASIZE_OF(srcsize
), FPIntConvOp_FCVTZU
, vn
, rd
));
2146 template<int datasize
>
2147 ALWAYS_INLINE
void fdiv(FPRegisterID vd
, FPRegisterID vn
, FPRegisterID vm
)
2150 insn(floatingPointDataProcessing2Source(DATASIZE
, vm
, FPDataOp_FDIV
, vn
, vd
));
2153 template<int datasize
>
2154 ALWAYS_INLINE
void fmadd(FPRegisterID vd
, FPRegisterID vn
, FPRegisterID vm
, FPRegisterID va
)
2157 insn(floatingPointDataProcessing3Source(DATASIZE
, false, vm
, AddOp_ADD
, va
, vn
, vd
));
2160 template<int datasize
>
2161 ALWAYS_INLINE
void fmax(FPRegisterID vd
, FPRegisterID vn
, FPRegisterID vm
)
2164 insn(floatingPointDataProcessing2Source(DATASIZE
, vm
, FPDataOp_FMAX
, vn
, vd
));
2167 template<int datasize
>
2168 ALWAYS_INLINE
void fmaxnm(FPRegisterID vd
, FPRegisterID vn
, FPRegisterID vm
)
2171 insn(floatingPointDataProcessing2Source(DATASIZE
, vm
, FPDataOp_FMAXNM
, vn
, vd
));
2174 template<int datasize
>
2175 ALWAYS_INLINE
void fmin(FPRegisterID vd
, FPRegisterID vn
, FPRegisterID vm
)
2178 insn(floatingPointDataProcessing2Source(DATASIZE
, vm
, FPDataOp_FMIN
, vn
, vd
));
2181 template<int datasize
>
2182 ALWAYS_INLINE
void fminnm(FPRegisterID vd
, FPRegisterID vn
, FPRegisterID vm
)
2185 insn(floatingPointDataProcessing2Source(DATASIZE
, vm
, FPDataOp_FMINNM
, vn
, vd
));
2188 template<int datasize
>
2189 ALWAYS_INLINE
void fmov(FPRegisterID vd
, FPRegisterID vn
)
2192 insn(floatingPointDataProcessing1Source(DATASIZE
, FPDataOp_FMOV
, vn
, vd
));
2195 template<int datasize
>
2196 ALWAYS_INLINE
void fmov(FPRegisterID vd
, RegisterID rn
)
2199 insn(floatingPointIntegerConversions(DATASIZE
, DATASIZE
, FPIntConvOp_FMOV_XtoQ
, rn
, vd
));
2202 template<int datasize
>
2203 ALWAYS_INLINE
void fmov(RegisterID rd
, FPRegisterID vn
)
2206 insn(floatingPointIntegerConversions(DATASIZE
, DATASIZE
, FPIntConvOp_FMOV_QtoX
, vn
, rd
));
2209 template<int datasize
>
2210 ALWAYS_INLINE
void fmov(FPRegisterID vd
, double imm
)
2213 insn(floatingPointImmediate(DATASIZE
, encodeFPImm(imm
), vd
));
2216 ALWAYS_INLINE
void fmov_top(FPRegisterID vd
, RegisterID rn
)
2218 insn(floatingPointIntegerConversions(Datasize_64
, Datasize_64
, FPIntConvOp_FMOV_XtoQ_top
, rn
, vd
));
2221 ALWAYS_INLINE
void fmov_top(RegisterID rd
, FPRegisterID vn
)
2223 insn(floatingPointIntegerConversions(Datasize_64
, Datasize_64
, FPIntConvOp_FMOV_QtoX_top
, vn
, rd
));
2226 template<int datasize
>
2227 ALWAYS_INLINE
void fmsub(FPRegisterID vd
, FPRegisterID vn
, FPRegisterID vm
, FPRegisterID va
)
2230 insn(floatingPointDataProcessing3Source(DATASIZE
, false, vm
, AddOp_SUB
, va
, vn
, vd
));
2233 template<int datasize
>
2234 ALWAYS_INLINE
void fmul(FPRegisterID vd
, FPRegisterID vn
, FPRegisterID vm
)
2237 insn(floatingPointDataProcessing2Source(DATASIZE
, vm
, FPDataOp_FMUL
, vn
, vd
));
2240 template<int datasize
>
2241 ALWAYS_INLINE
void fneg(FPRegisterID vd
, FPRegisterID vn
)
2244 insn(floatingPointDataProcessing1Source(DATASIZE
, FPDataOp_FNEG
, vn
, vd
));
2247 template<int datasize
>
2248 ALWAYS_INLINE
void fnmadd(FPRegisterID vd
, FPRegisterID vn
, FPRegisterID vm
, FPRegisterID va
)
2251 insn(floatingPointDataProcessing3Source(DATASIZE
, true, vm
, AddOp_ADD
, va
, vn
, vd
));
2254 template<int datasize
>
2255 ALWAYS_INLINE
void fnmsub(FPRegisterID vd
, FPRegisterID vn
, FPRegisterID vm
, FPRegisterID va
)
2258 insn(floatingPointDataProcessing3Source(DATASIZE
, true, vm
, AddOp_SUB
, va
, vn
, vd
));
2261 template<int datasize
>
2262 ALWAYS_INLINE
void fnmul(FPRegisterID vd
, FPRegisterID vn
, FPRegisterID vm
)
2265 insn(floatingPointDataProcessing2Source(DATASIZE
, vm
, FPDataOp_FNMUL
, vn
, vd
));
2268 template<int datasize
>
2269 ALWAYS_INLINE
void frinta(FPRegisterID vd
, FPRegisterID vn
)
2272 insn(floatingPointDataProcessing1Source(DATASIZE
, FPDataOp_FRINTA
, vn
, vd
));
2275 template<int datasize
>
2276 ALWAYS_INLINE
void frinti(FPRegisterID vd
, FPRegisterID vn
)
2279 insn(floatingPointDataProcessing1Source(DATASIZE
, FPDataOp_FRINTI
, vn
, vd
));
2282 template<int datasize
>
2283 ALWAYS_INLINE
void frintm(FPRegisterID vd
, FPRegisterID vn
)
2286 insn(floatingPointDataProcessing1Source(DATASIZE
, FPDataOp_FRINTM
, vn
, vd
));
2289 template<int datasize
>
2290 ALWAYS_INLINE
void frintn(FPRegisterID vd
, FPRegisterID vn
)
2293 insn(floatingPointDataProcessing1Source(DATASIZE
, FPDataOp_FRINTN
, vn
, vd
));
2296 template<int datasize
>
2297 ALWAYS_INLINE
void frintp(FPRegisterID vd
, FPRegisterID vn
)
2300 insn(floatingPointDataProcessing1Source(DATASIZE
, FPDataOp_FRINTP
, vn
, vd
));
2303 template<int datasize
>
2304 ALWAYS_INLINE
void frintx(FPRegisterID vd
, FPRegisterID vn
)
2307 insn(floatingPointDataProcessing1Source(DATASIZE
, FPDataOp_FRINTX
, vn
, vd
));
2310 template<int datasize
>
2311 ALWAYS_INLINE
void frintz(FPRegisterID vd
, FPRegisterID vn
)
2314 insn(floatingPointDataProcessing1Source(DATASIZE
, FPDataOp_FRINTZ
, vn
, vd
));
2317 template<int datasize
>
2318 ALWAYS_INLINE
void fsqrt(FPRegisterID vd
, FPRegisterID vn
)
2321 insn(floatingPointDataProcessing1Source(DATASIZE
, FPDataOp_FSQRT
, vn
, vd
));
2324 template<int datasize
>
2325 ALWAYS_INLINE
void fsub(FPRegisterID vd
, FPRegisterID vn
, FPRegisterID vm
)
2328 insn(floatingPointDataProcessing2Source(DATASIZE
, vm
, FPDataOp_FSUB
, vn
, vd
));
2331 template<int datasize
>
2332 ALWAYS_INLINE
void ldr(FPRegisterID rt
, RegisterID rn
, RegisterID rm
)
2334 ldr
<datasize
>(rt
, rn
, rm
, UXTX
, 0);
2337 template<int datasize
>
2338 ALWAYS_INLINE
void ldr(FPRegisterID rt
, RegisterID rn
, RegisterID rm
, ExtendType extend
, int amount
)
2340 CHECK_FP_MEMOP_DATASIZE();
2341 insn(loadStoreRegisterRegisterOffset(MEMOPSIZE
, true, datasize
== 128 ? MemOp_LOAD_V128
: MemOp_LOAD
, rm
, extend
, encodeShiftAmount
<datasize
>(amount
), rn
, rt
));
2344 template<int datasize
>
2345 ALWAYS_INLINE
void ldr(FPRegisterID rt
, RegisterID rn
, unsigned pimm
)
2347 CHECK_FP_MEMOP_DATASIZE();
2348 insn(loadStoreRegisterUnsignedImmediate(MEMOPSIZE
, true, datasize
== 128 ? MemOp_LOAD_V128
: MemOp_LOAD
, encodePositiveImmediate
<datasize
>(pimm
), rn
, rt
));
2351 template<int datasize
>
2352 ALWAYS_INLINE
void ldr(FPRegisterID rt
, RegisterID rn
, PostIndex simm
)
2354 CHECK_FP_MEMOP_DATASIZE();
2355 insn(loadStoreRegisterPostIndex(MEMOPSIZE
, true, datasize
== 128 ? MemOp_LOAD_V128
: MemOp_LOAD
, simm
, rn
, rt
));
2358 template<int datasize
>
2359 ALWAYS_INLINE
void ldr(FPRegisterID rt
, RegisterID rn
, PreIndex simm
)
2361 CHECK_FP_MEMOP_DATASIZE();
2362 insn(loadStoreRegisterPreIndex(MEMOPSIZE
, true, datasize
== 128 ? MemOp_LOAD_V128
: MemOp_LOAD
, simm
, rn
, rt
));
2365 template<int datasize
>
2366 ALWAYS_INLINE
void ldr_literal(FPRegisterID rt
, int offset
= 0)
2368 CHECK_FP_MEMOP_DATASIZE();
2369 ASSERT(datasize
>= 32);
2370 ASSERT(!(offset
& 3));
2371 insn(loadRegisterLiteral(datasize
== 128 ? LdrLiteralOp_128BIT
: datasize
== 64 ? LdrLiteralOp_64BIT
: LdrLiteralOp_32BIT
, true, offset
>> 2, rt
));
2374 template<int datasize
>
2375 ALWAYS_INLINE
void ldur(FPRegisterID rt
, RegisterID rn
, int simm
)
2377 CHECK_FP_MEMOP_DATASIZE();
2378 insn(loadStoreRegisterUnscaledImmediate(MEMOPSIZE
, true, datasize
== 128 ? MemOp_LOAD_V128
: MemOp_LOAD
, simm
, rn
, rt
));
2381 template<int dstsize
, int srcsize
>
2382 ALWAYS_INLINE
void scvtf(FPRegisterID vd
, RegisterID rn
)
2384 CHECK_DATASIZE_OF(dstsize
);
2385 CHECK_DATASIZE_OF(srcsize
);
2386 insn(floatingPointIntegerConversions(DATASIZE_OF(srcsize
), DATASIZE_OF(dstsize
), FPIntConvOp_SCVTF
, rn
, vd
));
2389 template<int datasize
>
2390 ALWAYS_INLINE
void str(FPRegisterID rt
, RegisterID rn
, RegisterID rm
)
2392 str
<datasize
>(rt
, rn
, rm
, UXTX
, 0);
2395 template<int datasize
>
2396 ALWAYS_INLINE
void str(FPRegisterID rt
, RegisterID rn
, RegisterID rm
, ExtendType extend
, int amount
)
2398 CHECK_FP_MEMOP_DATASIZE();
2399 insn(loadStoreRegisterRegisterOffset(MEMOPSIZE
, true, datasize
== 128 ? MemOp_STORE_V128
: MemOp_STORE
, rm
, extend
, encodeShiftAmount
<datasize
>(amount
), rn
, rt
));
2402 template<int datasize
>
2403 ALWAYS_INLINE
void str(FPRegisterID rt
, RegisterID rn
, unsigned pimm
)
2405 CHECK_FP_MEMOP_DATASIZE();
2406 insn(loadStoreRegisterUnsignedImmediate(MEMOPSIZE
, true, datasize
== 128 ? MemOp_STORE_V128
: MemOp_STORE
, encodePositiveImmediate
<datasize
>(pimm
), rn
, rt
));
2409 template<int datasize
>
2410 ALWAYS_INLINE
void str(FPRegisterID rt
, RegisterID rn
, PostIndex simm
)
2412 CHECK_FP_MEMOP_DATASIZE();
2413 insn(loadStoreRegisterPostIndex(MEMOPSIZE
, true, datasize
== 128 ? MemOp_STORE_V128
: MemOp_STORE
, simm
, rn
, rt
));
2416 template<int datasize
>
2417 ALWAYS_INLINE
void str(FPRegisterID rt
, RegisterID rn
, PreIndex simm
)
2419 CHECK_FP_MEMOP_DATASIZE();
2420 insn(loadStoreRegisterPreIndex(MEMOPSIZE
, true, datasize
== 128 ? MemOp_STORE_V128
: MemOp_STORE
, simm
, rn
, rt
));
2423 template<int datasize
>
2424 ALWAYS_INLINE
void stur(FPRegisterID rt
, RegisterID rn
, int simm
)
2427 insn(loadStoreRegisterUnscaledImmediate(MEMOPSIZE
, true, datasize
== 128 ? MemOp_STORE_V128
: MemOp_STORE
, simm
, rn
, rt
));
2430 template<int dstsize
, int srcsize
>
2431 ALWAYS_INLINE
void ucvtf(FPRegisterID vd
, RegisterID rn
)
2433 CHECK_DATASIZE_OF(dstsize
);
2434 CHECK_DATASIZE_OF(srcsize
);
2435 insn(floatingPointIntegerConversions(DATASIZE_OF(srcsize
), DATASIZE_OF(dstsize
), FPIntConvOp_UCVTF
, rn
, vd
));
2440 AssemblerLabel
labelIgnoringWatchpoints()
2442 return m_buffer
.label();
2445 AssemblerLabel
labelForWatchpoint()
2447 AssemblerLabel result
= m_buffer
.label();
2448 if (static_cast<int>(result
.m_offset
) != m_indexOfLastWatchpoint
)
2450 m_indexOfLastWatchpoint
= result
.m_offset
;
2451 m_indexOfTailOfLastWatchpoint
= result
.m_offset
+ maxJumpReplacementSize();
2455 AssemblerLabel
label()
2457 AssemblerLabel result
= m_buffer
.label();
2458 while (UNLIKELY(static_cast<int>(result
.m_offset
) < m_indexOfTailOfLastWatchpoint
)) {
2460 result
= m_buffer
.label();
2465 AssemblerLabel
align(int alignment
)
2467 ASSERT(!(alignment
& 3));
2468 while (!m_buffer
.isAligned(alignment
))
2473 static void* getRelocatedAddress(void* code
, AssemblerLabel label
)
2475 ASSERT(label
.isSet());
2476 return reinterpret_cast<void*>(reinterpret_cast<ptrdiff_t>(code
) + label
.m_offset
);
2479 static int getDifferenceBetweenLabels(AssemblerLabel a
, AssemblerLabel b
)
2481 return b
.m_offset
- a
.m_offset
;
2484 int executableOffsetFor(int location
)
2488 return static_cast<int32_t*>(m_buffer
.data())[location
/ sizeof(int32_t) - 1];
2491 PassRefPtr
<ExecutableMemoryHandle
> executableCopy(VM
& vm
, void* ownerUID
, JITCompilationEffort effort
)
2493 return m_buffer
.executableCopy(vm
, ownerUID
, effort
);
2496 void* unlinkedCode() { return m_buffer
.data(); }
2497 size_t codeSize() const { return m_buffer
.codeSize(); }
2499 static unsigned getCallReturnOffset(AssemblerLabel call
)
2501 ASSERT(call
.isSet());
2502 return call
.m_offset
;
2505 // Linking & patching:
2507 // 'link' and 'patch' methods are for use on unprotected code - such as the code
2508 // within the AssemblerBuffer, and code being patched by the patch buffer. Once
2509 // code has been finalized it is (platform support permitting) within a non-
2510 // writable region of memory; to modify the code in an execute-only execuable
2511 // pool the 'repatch' and 'relink' methods should be used.
2513 void linkJump(AssemblerLabel from
, AssemblerLabel to
, JumpType type
, Condition condition
)
2516 ASSERT(from
.isSet());
2517 m_jumpsToLink
.append(LinkRecord(from
.m_offset
, to
.m_offset
, type
, condition
));
2520 void linkJump(AssemblerLabel from
, AssemblerLabel to
, JumpType type
, Condition condition
, bool is64Bit
, RegisterID compareRegister
)
2523 ASSERT(from
.isSet());
2524 m_jumpsToLink
.append(LinkRecord(from
.m_offset
, to
.m_offset
, type
, condition
, is64Bit
, compareRegister
));
2527 void linkJump(AssemblerLabel from
, AssemblerLabel to
, JumpType type
, Condition condition
, unsigned bitNumber
, RegisterID compareRegister
)
2530 ASSERT(from
.isSet());
2531 m_jumpsToLink
.append(LinkRecord(from
.m_offset
, to
.m_offset
, type
, condition
, bitNumber
, compareRegister
));
2534 void linkJump(AssemblerLabel from
, AssemblerLabel to
)
2536 ASSERT(from
.isSet());
2538 relinkJumpOrCall
<false>(addressOf(from
), addressOf(to
));
2541 static void linkJump(void* code
, AssemblerLabel from
, void* to
)
2543 ASSERT(from
.isSet());
2544 relinkJumpOrCall
<false>(addressOf(code
, from
), to
);
2547 static void linkCall(void* code
, AssemblerLabel from
, void* to
)
2549 ASSERT(from
.isSet());
2550 linkJumpOrCall
<true>(addressOf(code
, from
) - 1, to
);
2553 static void linkPointer(void* code
, AssemblerLabel where
, void* valuePtr
)
2555 linkPointer(addressOf(code
, where
), valuePtr
);
2558 static void replaceWithJump(void* where
, void* to
)
2560 intptr_t offset
= (reinterpret_cast<intptr_t>(to
) - reinterpret_cast<intptr_t>(where
)) >> 2;
2561 ASSERT(static_cast<int>(offset
) == offset
);
2562 *static_cast<int*>(where
) = unconditionalBranchImmediate(false, static_cast<int>(offset
));
2563 cacheFlush(where
, sizeof(int));
2566 static ptrdiff_t maxJumpReplacementSize()
2571 static void replaceWithLoad(void* where
)
2580 if (disassembleAddSubtractImmediate(where
, sf
, op
, S
, shift
, imm12
, rn
, rd
)) {
2581 ASSERT(sf
== Datasize_64
);
2582 ASSERT(op
== AddOp_ADD
);
2585 ASSERT(!(imm12
& ~0xff8));
2586 *static_cast<int*>(where
) = loadStoreRegisterUnsignedImmediate(MemOpSize_64
, false, MemOp_LOAD
, encodePositiveImmediate
<64>(imm12
), rn
, rd
);
2587 cacheFlush(where
, sizeof(int));
2589 #if !ASSERT_DISABLED
2597 ASSERT(disassembleLoadStoreRegisterUnsignedImmediate(where
, size
, V
, opc
, imm12
, rn
, rt
));
2598 ASSERT(size
== MemOpSize_64
);
2600 ASSERT(opc
== MemOp_LOAD
);
2601 ASSERT(!(imm12
& ~0x1ff));
2606 static void replaceWithAddressComputation(void* where
)
2614 if (disassembleLoadStoreRegisterUnsignedImmediate(where
, size
, V
, opc
, imm12
, rn
, rt
)) {
2615 ASSERT(size
== MemOpSize_64
);
2617 ASSERT(opc
== MemOp_LOAD
);
2618 ASSERT(!(imm12
& ~0x1ff));
2619 *static_cast<int*>(where
) = addSubtractImmediate(Datasize_64
, AddOp_ADD
, DontSetFlags
, 0, imm12
* sizeof(void*), rn
, rt
);
2620 cacheFlush(where
, sizeof(int));
2622 #if !ASSERT_DISABLED
2631 ASSERT(disassembleAddSubtractImmediate(where
, sf
, op
, S
, shift
, imm12
, rn
, rd
));
2632 ASSERT(sf
== Datasize_64
);
2633 ASSERT(op
== AddOp_ADD
);
2636 ASSERT(!(imm12
& ~0xff8));
2641 static void repatchPointer(void* where
, void* valuePtr
)
2643 linkPointer(static_cast<int*>(where
), valuePtr
, true);
2646 static void setPointer(int* address
, void* valuePtr
, RegisterID rd
, bool flush
)
2648 uintptr_t value
= reinterpret_cast<uintptr_t>(valuePtr
);
2649 address
[0] = moveWideImediate(Datasize_64
, MoveWideOp_Z
, 0, getHalfword(value
, 0), rd
);
2650 address
[1] = moveWideImediate(Datasize_64
, MoveWideOp_K
, 1, getHalfword(value
, 1), rd
);
2651 address
[2] = moveWideImediate(Datasize_64
, MoveWideOp_K
, 2, getHalfword(value
, 2), rd
);
2654 cacheFlush(address
, sizeof(int) * 3);
2657 static void repatchInt32(void* where
, int32_t value
)
2659 int* address
= static_cast<int*>(where
);
2666 bool expected
= disassembleMoveWideImediate(address
, sf
, opc
, hw
, imm16
, rd
);
2667 ASSERT_UNUSED(expected
, expected
&& !sf
&& (opc
== MoveWideOp_Z
|| opc
== MoveWideOp_N
) && !hw
);
2668 ASSERT(checkMovk
<Datasize_32
>(address
[1], 1, rd
));
2671 address
[0] = moveWideImediate(Datasize_32
, MoveWideOp_Z
, 0, getHalfword(value
, 0), rd
);
2672 address
[1] = moveWideImediate(Datasize_32
, MoveWideOp_K
, 1, getHalfword(value
, 1), rd
);
2674 address
[0] = moveWideImediate(Datasize_32
, MoveWideOp_N
, 0, ~getHalfword(value
, 0), rd
);
2675 address
[1] = moveWideImediate(Datasize_32
, MoveWideOp_K
, 1, getHalfword(value
, 1), rd
);
2678 cacheFlush(where
, sizeof(int) * 2);
2681 static void* readPointer(void* where
)
2683 int* address
= static_cast<int*>(where
);
2689 RegisterID rdFirst
, rd
;
2691 bool expected
= disassembleMoveWideImediate(address
, sf
, opc
, hw
, imm16
, rdFirst
);
2692 ASSERT_UNUSED(expected
, expected
&& sf
&& opc
== MoveWideOp_Z
&& !hw
);
2693 uintptr_t result
= imm16
;
2695 expected
= disassembleMoveWideImediate(address
+ 1, sf
, opc
, hw
, imm16
, rd
);
2696 ASSERT_UNUSED(expected
, expected
&& sf
&& opc
== MoveWideOp_K
&& hw
== 1 && rd
== rdFirst
);
2697 result
|= static_cast<uintptr_t>(imm16
) << 16;
2699 expected
= disassembleMoveWideImediate(address
+ 2, sf
, opc
, hw
, imm16
, rd
);
2700 ASSERT_UNUSED(expected
, expected
&& sf
&& opc
== MoveWideOp_K
&& hw
== 2 && rd
== rdFirst
);
2701 result
|= static_cast<uintptr_t>(imm16
) << 32;
2703 return reinterpret_cast<void*>(result
);
2706 static void* readCallTarget(void* from
)
2708 return readPointer(reinterpret_cast<int*>(from
) - 4);
2711 static void relinkJump(void* from
, void* to
)
2713 relinkJumpOrCall
<false>(reinterpret_cast<int*>(from
), to
);
2714 cacheFlush(from
, sizeof(int));
2717 static void relinkCall(void* from
, void* to
)
2719 relinkJumpOrCall
<true>(reinterpret_cast<int*>(from
) - 1, to
);
2720 cacheFlush(reinterpret_cast<int*>(from
) - 1, sizeof(int));
2723 static void repatchCompact(void* where
, int32_t value
)
2725 ASSERT(!(value
& ~0x3ff8));
2733 bool expected
= disassembleLoadStoreRegisterUnsignedImmediate(where
, size
, V
, opc
, imm12
, rn
, rt
);
2734 ASSERT_UNUSED(expected
, expected
&& size
>= MemOpSize_32
&& !V
&& opc
== MemOp_LOAD
); // expect 32/64 bit load to GPR.
2736 if (size
== MemOpSize_32
)
2737 imm12
= encodePositiveImmediate
<32>(value
);
2739 imm12
= encodePositiveImmediate
<64>(value
);
2740 *static_cast<int*>(where
) = loadStoreRegisterUnsignedImmediate(size
, V
, opc
, imm12
, rn
, rt
);
2742 cacheFlush(where
, sizeof(int));
2745 unsigned debugOffset() { return m_buffer
.debugOffset(); }
2747 static void cacheFlush(void* code
, size_t size
)
2750 sys_cache_control(kCacheFunctionPrepareForExecution
, code
, size
);
2752 #error "The cacheFlush support is missing on this platform."
2756 // Assembler admin methods:
2758 int jumpSizeDelta(JumpType jumpType
, JumpLinkType jumpLinkType
) { return JUMP_ENUM_SIZE(jumpType
) - JUMP_ENUM_SIZE(jumpLinkType
); }
2760 static ALWAYS_INLINE
bool linkRecordSourceComparator(const LinkRecord
& a
, const LinkRecord
& b
)
2762 return a
.from() < b
.from();
2765 bool canCompact(JumpType jumpType
)
2767 // Fixed jumps cannot be compacted
2768 return (jumpType
== JumpNoCondition
) || (jumpType
== JumpCondition
) || (jumpType
== JumpCompareAndBranch
) || (jumpType
== JumpTestBit
);
2771 JumpLinkType
computeJumpType(JumpType jumpType
, const uint8_t* from
, const uint8_t* to
)
2776 case JumpNoConditionFixedSize
:
2777 return LinkJumpNoCondition
;
2778 case JumpConditionFixedSize
:
2779 return LinkJumpCondition
;
2780 case JumpCompareAndBranchFixedSize
:
2781 return LinkJumpCompareAndBranch
;
2782 case JumpTestBitFixedSize
:
2783 return LinkJumpTestBit
;
2784 case JumpNoCondition
:
2785 return LinkJumpNoCondition
;
2786 case JumpCondition
: {
2787 ASSERT(!(reinterpret_cast<intptr_t>(from
) & 0x3));
2788 ASSERT(!(reinterpret_cast<intptr_t>(to
) & 0x3));
2789 intptr_t relative
= reinterpret_cast<intptr_t>(to
) - (reinterpret_cast<intptr_t>(from
));
2791 if (((relative
<< 43) >> 43) == relative
)
2792 return LinkJumpConditionDirect
;
2794 return LinkJumpCondition
;
2796 case JumpCompareAndBranch
: {
2797 ASSERT(!(reinterpret_cast<intptr_t>(from
) & 0x3));
2798 ASSERT(!(reinterpret_cast<intptr_t>(to
) & 0x3));
2799 intptr_t relative
= reinterpret_cast<intptr_t>(to
) - (reinterpret_cast<intptr_t>(from
));
2801 if (((relative
<< 43) >> 43) == relative
)
2802 return LinkJumpCompareAndBranchDirect
;
2804 return LinkJumpCompareAndBranch
;
2807 ASSERT(!(reinterpret_cast<intptr_t>(from
) & 0x3));
2808 ASSERT(!(reinterpret_cast<intptr_t>(to
) & 0x3));
2809 intptr_t relative
= reinterpret_cast<intptr_t>(to
) - (reinterpret_cast<intptr_t>(from
));
2811 if (((relative
<< 50) >> 50) == relative
)
2812 return LinkJumpTestBitDirect
;
2814 return LinkJumpTestBit
;
2817 ASSERT_NOT_REACHED();
2820 return LinkJumpNoCondition
;
2823 JumpLinkType
computeJumpType(LinkRecord
& record
, const uint8_t* from
, const uint8_t* to
)
2825 JumpLinkType linkType
= computeJumpType(record
.type(), from
, to
);
2826 record
.setLinkType(linkType
);
2830 void recordLinkOffsets(int32_t regionStart
, int32_t regionEnd
, int32_t offset
)
2832 int32_t ptr
= regionStart
/ sizeof(int32_t);
2833 const int32_t end
= regionEnd
/ sizeof(int32_t);
2834 int32_t* offsets
= static_cast<int32_t*>(m_buffer
.data());
2836 offsets
[ptr
++] = offset
;
2839 Vector
<LinkRecord
, 0, UnsafeVectorOverflow
>& jumpsToLink()
2841 std::sort(m_jumpsToLink
.begin(), m_jumpsToLink
.end(), linkRecordSourceComparator
);
2842 return m_jumpsToLink
;
2845 void ALWAYS_INLINE
link(LinkRecord
& record
, uint8_t* from
, uint8_t* to
)
2847 switch (record
.linkType()) {
2848 case LinkJumpNoCondition
:
2849 linkJumpOrCall
<false>(reinterpret_cast<int*>(from
), to
);
2851 case LinkJumpConditionDirect
:
2852 linkConditionalBranch
<true>(record
.condition(), reinterpret_cast<int*>(from
), to
);
2854 case LinkJumpCondition
:
2855 linkConditionalBranch
<false>(record
.condition(), reinterpret_cast<int*>(from
) - 1, to
);
2857 case LinkJumpCompareAndBranchDirect
:
2858 linkCompareAndBranch
<true>(record
.condition(), record
.is64Bit(), record
.compareRegister(), reinterpret_cast<int*>(from
), to
);
2860 case LinkJumpCompareAndBranch
:
2861 linkCompareAndBranch
<false>(record
.condition(), record
.is64Bit(), record
.compareRegister(), reinterpret_cast<int*>(from
) - 1, to
);
2863 case LinkJumpTestBitDirect
:
2864 linkTestAndBranch
<true>(record
.condition(), record
.bitNumber(), record
.compareRegister(), reinterpret_cast<int*>(from
), to
);
2866 case LinkJumpTestBit
:
2867 linkTestAndBranch
<false>(record
.condition(), record
.bitNumber(), record
.compareRegister(), reinterpret_cast<int*>(from
) - 1, to
);
2870 ASSERT_NOT_REACHED();
2876 template<Datasize size
>
2877 static bool checkMovk(int insn
, int _hw
, RegisterID _rd
)
2884 bool expected
= disassembleMoveWideImediate(&insn
, sf
, opc
, hw
, imm16
, rd
);
2888 opc
== MoveWideOp_K
&&
2893 static void linkPointer(int* address
, void* valuePtr
, bool flush
= false)
2900 bool expected
= disassembleMoveWideImediate(address
, sf
, opc
, hw
, imm16
, rd
);
2901 ASSERT_UNUSED(expected
, expected
&& sf
&& opc
== MoveWideOp_Z
&& !hw
);
2902 ASSERT(checkMovk
<Datasize_64
>(address
[1], 1, rd
));
2903 ASSERT(checkMovk
<Datasize_64
>(address
[2], 2, rd
));
2905 setPointer(address
, valuePtr
, rd
, flush
);
2908 template<bool isCall
>
2909 static void linkJumpOrCall(int* from
, void* to
)
2913 bool isUnconditionalBranchImmediateOrNop
= disassembleUnconditionalBranchImmediate(from
, link
, imm26
) || disassembleNop(from
);
2915 ASSERT_UNUSED(isUnconditionalBranchImmediateOrNop
, isUnconditionalBranchImmediateOrNop
);
2916 ASSERT_UNUSED(isCall
, (link
== isCall
) || disassembleNop(from
));
2917 ASSERT(!(reinterpret_cast<intptr_t>(from
) & 3));
2918 ASSERT(!(reinterpret_cast<intptr_t>(to
) & 3));
2919 intptr_t offset
= (reinterpret_cast<intptr_t>(to
) - reinterpret_cast<intptr_t>(from
)) >> 2;
2920 ASSERT(static_cast<int>(offset
) == offset
);
2922 *from
= unconditionalBranchImmediate(isCall
, static_cast<int>(offset
));
2925 template<bool isDirect
>
2926 static void linkCompareAndBranch(Condition condition
, bool is64Bit
, RegisterID rt
, int* from
, void* to
)
2928 ASSERT(!(reinterpret_cast<intptr_t>(from
) & 3));
2929 ASSERT(!(reinterpret_cast<intptr_t>(to
) & 3));
2930 intptr_t offset
= (reinterpret_cast<intptr_t>(to
) - reinterpret_cast<intptr_t>(from
)) >> 2;
2931 ASSERT(((offset
<< 38) >> 38) == offset
);
2933 bool useDirect
= ((offset
<< 45) >> 45) == offset
; // Fits in 19 bits
2934 ASSERT(!isDirect
|| useDirect
);
2936 if (useDirect
|| isDirect
) {
2937 *from
= compareAndBranchImmediate(is64Bit
? Datasize_64
: Datasize_32
, condition
== ConditionNE
, static_cast<int>(offset
), rt
);
2939 *(from
+ 1) = nopPseudo();
2941 *from
= compareAndBranchImmediate(is64Bit
? Datasize_64
: Datasize_32
, invert(condition
) == ConditionNE
, 2, rt
);
2942 linkJumpOrCall
<false>(from
+ 1, to
);
2946 template<bool isDirect
>
2947 static void linkConditionalBranch(Condition condition
, int* from
, void* to
)
2949 ASSERT(!(reinterpret_cast<intptr_t>(from
) & 3));
2950 ASSERT(!(reinterpret_cast<intptr_t>(to
) & 3));
2951 intptr_t offset
= (reinterpret_cast<intptr_t>(to
) - reinterpret_cast<intptr_t>(from
)) >> 2;
2952 ASSERT(((offset
<< 38) >> 38) == offset
);
2954 bool useDirect
= ((offset
<< 45) >> 45) == offset
; // Fits in 19 bits
2955 ASSERT(!isDirect
|| useDirect
);
2957 if (useDirect
|| isDirect
) {
2958 *from
= conditionalBranchImmediate(static_cast<int>(offset
), condition
);
2960 *(from
+ 1) = nopPseudo();
2962 *from
= conditionalBranchImmediate(2, invert(condition
));
2963 linkJumpOrCall
<false>(from
+ 1, to
);
2967 template<bool isDirect
>
2968 static void linkTestAndBranch(Condition condition
, unsigned bitNumber
, RegisterID rt
, int* from
, void* to
)
2970 ASSERT(!(reinterpret_cast<intptr_t>(from
) & 3));
2971 ASSERT(!(reinterpret_cast<intptr_t>(to
) & 3));
2972 intptr_t offset
= (reinterpret_cast<intptr_t>(to
) - reinterpret_cast<intptr_t>(from
)) >> 2;
2973 ASSERT(static_cast<int>(offset
) == offset
);
2974 ASSERT(((offset
<< 38) >> 38) == offset
);
2976 bool useDirect
= ((offset
<< 50) >> 50) == offset
; // Fits in 14 bits
2977 ASSERT(!isDirect
|| useDirect
);
2979 if (useDirect
|| isDirect
) {
2980 *from
= testAndBranchImmediate(condition
== ConditionNE
, static_cast<int>(bitNumber
), static_cast<int>(offset
), rt
);
2982 *(from
+ 1) = nopPseudo();
2984 *from
= testAndBranchImmediate(invert(condition
) == ConditionNE
, static_cast<int>(bitNumber
), 2, rt
);
2985 linkJumpOrCall
<false>(from
+ 1, to
);
2989 template<bool isCall
>
2990 static void relinkJumpOrCall(int* from
, void* to
)
2992 if (!isCall
&& disassembleNop(from
)) {
2995 Condition condition
;
2996 bool isConditionalBranchImmediate
= disassembleConditionalBranchImmediate(from
- 1, op01
, imm19
, condition
);
2998 if (isConditionalBranchImmediate
) {
2999 ASSERT_UNUSED(op01
, !op01
);
3000 ASSERT_UNUSED(isCall
, !isCall
);
3003 condition
= invert(condition
);
3005 linkConditionalBranch
<false>(condition
, from
- 1, to
);
3012 bool isCompareAndBranchImmediate
= disassembleCompareAndBranchImmediate(from
- 1, opSize
, op
, imm19
, rt
);
3014 if (isCompareAndBranchImmediate
) {
3018 linkCompareAndBranch
<false>(op
? ConditionNE
: ConditionEQ
, opSize
== Datasize_64
, rt
, from
- 1, to
);
3024 bool isTestAndBranchImmediate
= disassembleTestAndBranchImmediate(from
- 1, op
, bitNumber
, imm14
, rt
);
3026 if (isTestAndBranchImmediate
) {
3030 linkTestAndBranch
<false>(op
? ConditionNE
: ConditionEQ
, bitNumber
, rt
, from
- 1, to
);
3035 linkJumpOrCall
<isCall
>(from
, to
);
3038 static int* addressOf(void* code
, AssemblerLabel label
)
3040 return reinterpret_cast<int*>(static_cast<char*>(code
) + label
.m_offset
);
3043 int* addressOf(AssemblerLabel label
)
3045 return addressOf(m_buffer
.data(), label
);
3048 static RegisterID
disassembleXOrSp(int reg
) { return reg
== 31 ? ARM64Registers::sp
: static_cast<RegisterID
>(reg
); }
3049 static RegisterID
disassembleXOrZr(int reg
) { return reg
== 31 ? ARM64Registers::zr
: static_cast<RegisterID
>(reg
); }
3050 static RegisterID
disassembleXOrZrOrSp(bool useZr
, int reg
) { return reg
== 31 ? (useZr
? ARM64Registers::zr
: ARM64Registers::sp
) : static_cast<RegisterID
>(reg
); }
3052 static bool disassembleAddSubtractImmediate(void* address
, Datasize
& sf
, AddOp
& op
, SetFlags
& S
, int& shift
, int& imm12
, RegisterID
& rn
, RegisterID
& rd
)
3054 int insn
= *static_cast<int*>(address
);
3055 sf
= static_cast<Datasize
>((insn
>> 31) & 1);
3056 op
= static_cast<AddOp
>((insn
>> 30) & 1);
3057 S
= static_cast<SetFlags
>((insn
>> 29) & 1);
3058 shift
= (insn
>> 22) & 3;
3059 imm12
= (insn
>> 10) & 0x3ff;
3060 rn
= disassembleXOrSp((insn
>> 5) & 0x1f);
3061 rd
= disassembleXOrZrOrSp(S
, insn
& 0x1f);
3062 return (insn
& 0x1f000000) == 0x11000000;
3065 static bool disassembleLoadStoreRegisterUnsignedImmediate(void* address
, MemOpSize
& size
, bool& V
, MemOp
& opc
, int& imm12
, RegisterID
& rn
, RegisterID
& rt
)
3067 int insn
= *static_cast<int*>(address
);
3068 size
= static_cast<MemOpSize
>((insn
>> 30) & 3);
3069 V
= (insn
>> 26) & 1;
3070 opc
= static_cast<MemOp
>((insn
>> 22) & 3);
3071 imm12
= (insn
>> 10) & 0xfff;
3072 rn
= disassembleXOrSp((insn
>> 5) & 0x1f);
3073 rt
= disassembleXOrZr(insn
& 0x1f);
3074 return (insn
& 0x3b000000) == 0x39000000;
3077 static bool disassembleMoveWideImediate(void* address
, Datasize
& sf
, MoveWideOp
& opc
, int& hw
, uint16_t& imm16
, RegisterID
& rd
)
3079 int insn
= *static_cast<int*>(address
);
3080 sf
= static_cast<Datasize
>((insn
>> 31) & 1);
3081 opc
= static_cast<MoveWideOp
>((insn
>> 29) & 3);
3082 hw
= (insn
>> 21) & 3;
3084 rd
= disassembleXOrZr(insn
& 0x1f);
3085 return (insn
& 0x1f800000) == 0x12800000;
3088 static bool disassembleNop(void* address
)
3090 unsigned int insn
= *static_cast<unsigned int*>(address
);
3091 return insn
== 0xd503201f;
3094 static bool disassembleCompareAndBranchImmediate(void* address
, Datasize
& sf
, bool& op
, int& imm19
, RegisterID
& rt
)
3096 int insn
= *static_cast<int*>(address
);
3097 sf
= static_cast<Datasize
>((insn
>> 31) & 1);
3098 op
= (insn
>> 24) & 0x1;
3099 imm19
= (insn
<< 8) >> 13;
3100 rt
= static_cast<RegisterID
>(insn
& 0x1f);
3101 return (insn
& 0x7e000000) == 0x34000000;
3105 static bool disassembleConditionalBranchImmediate(void* address
, unsigned& op01
, int& imm19
, Condition
&condition
)
3107 int insn
= *static_cast<int*>(address
);
3108 op01
= ((insn
>> 23) & 0x2) | ((insn
>> 4) & 0x1);
3109 imm19
= (insn
<< 8) >> 13;
3110 condition
= static_cast<Condition
>(insn
& 0xf);
3111 return (insn
& 0xfe000000) == 0x54000000;
3114 static bool disassembleTestAndBranchImmediate(void* address
, bool& op
, unsigned& bitNumber
, int& imm14
, RegisterID
& rt
)
3116 int insn
= *static_cast<int*>(address
);
3117 op
= (insn
>> 24) & 0x1;
3118 imm14
= (insn
<< 13) >> 18;
3119 bitNumber
= static_cast<unsigned>((((insn
>> 26) & 0x20)) | ((insn
> 19) & 0x1f));
3120 rt
= static_cast<RegisterID
>(insn
& 0x1f);
3121 return (insn
& 0x7e000000) == 0x36000000;
3125 static bool disassembleUnconditionalBranchImmediate(void* address
, bool& op
, int& imm26
)
3127 int insn
= *static_cast<int*>(address
);
3128 op
= (insn
>> 31) & 1;
3129 imm26
= (insn
<< 6) >> 6;
3130 return (insn
& 0x7c000000) == 0x14000000;
3133 static int xOrSp(RegisterID reg
) { ASSERT(!isZr(reg
)); return reg
; }
3134 static int xOrZr(RegisterID reg
) { ASSERT(!isSp(reg
)); return reg
& 31; }
3135 static FPRegisterID
xOrZrAsFPR(RegisterID reg
) { return static_cast<FPRegisterID
>(xOrZr(reg
)); }
3136 static int xOrZrOrSp(bool useZr
, RegisterID reg
) { return useZr
? xOrZr(reg
) : xOrSp(reg
); }
3138 ALWAYS_INLINE
void insn(int instruction
)
3140 m_buffer
.putInt(instruction
);
3143 ALWAYS_INLINE
static int addSubtractExtendedRegister(Datasize sf
, AddOp op
, SetFlags S
, RegisterID rm
, ExtendType option
, int imm3
, RegisterID rn
, RegisterID rd
)
3146 // The only allocated values for opt is 0.
3148 return (0x0b200000 | sf
<< 31 | op
<< 30 | S
<< 29 | opt
<< 22 | xOrZr(rm
) << 16 | option
<< 13 | (imm3
& 0x7) << 10 | xOrSp(rn
) << 5 | xOrZrOrSp(S
, rd
));
3151 ALWAYS_INLINE
static int addSubtractImmediate(Datasize sf
, AddOp op
, SetFlags S
, int shift
, int imm12
, RegisterID rn
, RegisterID rd
)
3154 ASSERT(isUInt12(imm12
));
3155 return (0x11000000 | sf
<< 31 | op
<< 30 | S
<< 29 | shift
<< 22 | (imm12
& 0xfff) << 10 | xOrSp(rn
) << 5 | xOrZrOrSp(S
, rd
));
3158 ALWAYS_INLINE
static int addSubtractShiftedRegister(Datasize sf
, AddOp op
, SetFlags S
, ShiftType shift
, RegisterID rm
, int imm6
, RegisterID rn
, RegisterID rd
)
3161 ASSERT(!(imm6
& (sf
? ~63 : ~31)));
3162 return (0x0b000000 | sf
<< 31 | op
<< 30 | S
<< 29 | shift
<< 22 | xOrZr(rm
) << 16 | (imm6
& 0x3f) << 10 | xOrZr(rn
) << 5 | xOrZr(rd
));
3165 ALWAYS_INLINE
static int addSubtractWithCarry(Datasize sf
, AddOp op
, SetFlags S
, RegisterID rm
, RegisterID rn
, RegisterID rd
)
3167 const int opcode2
= 0;
3168 return (0x1a000000 | sf
<< 31 | op
<< 30 | S
<< 29 | xOrZr(rm
) << 16 | opcode2
<< 10 | xOrZr(rn
) << 5 | xOrZr(rd
));
3171 ALWAYS_INLINE
static int bitfield(Datasize sf
, BitfieldOp opc
, int immr
, int imms
, RegisterID rn
, RegisterID rd
)
3173 ASSERT(immr
< (sf
? 64 : 32));
3174 ASSERT(imms
< (sf
? 64 : 32));
3176 return (0x13000000 | sf
<< 31 | opc
<< 29 | N
<< 22 | immr
<< 16 | imms
<< 10 | xOrZr(rn
) << 5 | xOrZr(rd
));
3179 // 'op' means negate
3180 ALWAYS_INLINE
static int compareAndBranchImmediate(Datasize sf
, bool op
, int32_t imm19
, RegisterID rt
)
3182 ASSERT(imm19
== (imm19
<< 13) >> 13);
3183 return (0x34000000 | sf
<< 31 | op
<< 24 | (imm19
& 0x7ffff) << 5 | xOrZr(rt
));
3186 ALWAYS_INLINE
static int conditionalBranchImmediate(int32_t imm19
, Condition cond
)
3188 ASSERT(imm19
== (imm19
<< 13) >> 13);
3189 ASSERT(!(cond
& ~15));
3190 // The only allocated values for o1 & o0 are 0.
3193 return (0x54000000 | o1
<< 24 | (imm19
& 0x7ffff) << 5 | o0
<< 4 | cond
);
3196 ALWAYS_INLINE
static int conditionalCompareImmediate(Datasize sf
, AddOp op
, int imm5
, Condition cond
, RegisterID rn
, int nzcv
)
3198 ASSERT(!(imm5
& ~0x1f));
3203 return (0x1a400800 | sf
<< 31 | op
<< 30 | S
<< 29 | (imm5
& 0x1f) << 16 | cond
<< 12 | o2
<< 10 | xOrZr(rn
) << 5 | o3
<< 4 | nzcv
);
3206 ALWAYS_INLINE
static int conditionalCompareRegister(Datasize sf
, AddOp op
, RegisterID rm
, Condition cond
, RegisterID rn
, int nzcv
)
3212 return (0x1a400000 | sf
<< 31 | op
<< 30 | S
<< 29 | xOrZr(rm
) << 16 | cond
<< 12 | o2
<< 10 | xOrZr(rn
) << 5 | o3
<< 4 | nzcv
);
3215 // 'op' means negate
3216 // 'op2' means increment
3217 ALWAYS_INLINE
static int conditionalSelect(Datasize sf
, bool op
, RegisterID rm
, Condition cond
, bool op2
, RegisterID rn
, RegisterID rd
)
3220 return (0x1a800000 | sf
<< 31 | op
<< 30 | S
<< 29 | xOrZr(rm
) << 16 | cond
<< 12 | op2
<< 10 | xOrZr(rn
) << 5 | xOrZr(rd
));
3223 ALWAYS_INLINE
static int dataProcessing1Source(Datasize sf
, DataOp1Source opcode
, RegisterID rn
, RegisterID rd
)
3226 const int opcode2
= 0;
3227 return (0x5ac00000 | sf
<< 31 | S
<< 29 | opcode2
<< 16 | opcode
<< 10 | xOrZr(rn
) << 5 | xOrZr(rd
));
3230 ALWAYS_INLINE
static int dataProcessing2Source(Datasize sf
, RegisterID rm
, DataOp2Source opcode
, RegisterID rn
, RegisterID rd
)
3233 return (0x1ac00000 | sf
<< 31 | S
<< 29 | xOrZr(rm
) << 16 | opcode
<< 10 | xOrZr(rn
) << 5 | xOrZr(rd
));
3236 ALWAYS_INLINE
static int dataProcessing3Source(Datasize sf
, DataOp3Source opcode
, RegisterID rm
, RegisterID ra
, RegisterID rn
, RegisterID rd
)
3238 int op54
= opcode
>> 4;
3239 int op31
= (opcode
>> 1) & 7;
3240 int op0
= opcode
& 1;
3241 return (0x1b000000 | sf
<< 31 | op54
<< 29 | op31
<< 21 | xOrZr(rm
) << 16 | op0
<< 15 | xOrZr(ra
) << 10 | xOrZr(rn
) << 5 | xOrZr(rd
));
3244 ALWAYS_INLINE
static int excepnGeneration(ExcepnOp opc
, uint16_t imm16
, int LL
)
3246 ASSERT((opc
== ExcepnOp_BREAKPOINT
|| opc
== ExcepnOp_HALT
) ? !LL
: (LL
&& (LL
< 4)));
3248 return (0xd4000000 | opc
<< 21 | imm16
<< 5 | op2
<< 2 | LL
);
3251 ALWAYS_INLINE
static int extract(Datasize sf
, RegisterID rm
, int imms
, RegisterID rn
, RegisterID rd
)
3253 ASSERT(imms
< (sf
? 64 : 32));
3257 return (0x13800000 | sf
<< 31 | op21
<< 29 | N
<< 22 | o0
<< 21 | xOrZr(rm
) << 16 | imms
<< 10 | xOrZr(rn
) << 5 | xOrZr(rd
));
3260 ALWAYS_INLINE
static int floatingPointCompare(Datasize type
, FPRegisterID rm
, FPRegisterID rn
, FPCmpOp opcode2
)
3265 return (0x1e202000 | M
<< 31 | S
<< 29 | type
<< 22 | rm
<< 16 | op
<< 14 | rn
<< 5 | opcode2
);
3268 ALWAYS_INLINE
static int floatingPointConditionalCompare(Datasize type
, FPRegisterID rm
, Condition cond
, FPRegisterID rn
, FPCondCmpOp op
, int nzcv
)
3273 return (0x1e200400 | M
<< 31 | S
<< 29 | type
<< 22 | rm
<< 16 | cond
<< 12 | rn
<< 5 | op
<< 4 | nzcv
);
3276 ALWAYS_INLINE
static int floatingPointConditionalSelect(Datasize type
, FPRegisterID rm
, Condition cond
, FPRegisterID rn
, FPRegisterID rd
)
3280 return (0x1e200c00 | M
<< 31 | S
<< 29 | type
<< 22 | rm
<< 16 | cond
<< 12 | rn
<< 5 | rd
);
3283 ALWAYS_INLINE
static int floatingPointImmediate(Datasize type
, int imm8
, FPRegisterID rd
)
3288 return (0x1e201000 | M
<< 31 | S
<< 29 | type
<< 22 | (imm8
& 0xff) << 13 | imm5
<< 5 | rd
);
3291 ALWAYS_INLINE
static int floatingPointIntegerConversions(Datasize sf
, Datasize type
, FPIntConvOp rmodeOpcode
, FPRegisterID rn
, FPRegisterID rd
)
3294 return (0x1e200000 | sf
<< 31 | S
<< 29 | type
<< 22 | rmodeOpcode
<< 16 | rn
<< 5 | rd
);
3297 ALWAYS_INLINE
static int floatingPointIntegerConversions(Datasize sf
, Datasize type
, FPIntConvOp rmodeOpcode
, FPRegisterID rn
, RegisterID rd
)
3299 return floatingPointIntegerConversions(sf
, type
, rmodeOpcode
, rn
, xOrZrAsFPR(rd
));
3302 ALWAYS_INLINE
static int floatingPointIntegerConversions(Datasize sf
, Datasize type
, FPIntConvOp rmodeOpcode
, RegisterID rn
, FPRegisterID rd
)
3304 return floatingPointIntegerConversions(sf
, type
, rmodeOpcode
, xOrZrAsFPR(rn
), rd
);
3307 ALWAYS_INLINE
static int floatingPointDataProcessing1Source(Datasize type
, FPDataOp1Source opcode
, FPRegisterID rn
, FPRegisterID rd
)
3311 return (0x1e204000 | M
<< 31 | S
<< 29 | type
<< 22 | opcode
<< 15 | rn
<< 5 | rd
);
3314 ALWAYS_INLINE
static int floatingPointDataProcessing2Source(Datasize type
, FPRegisterID rm
, FPDataOp2Source opcode
, FPRegisterID rn
, FPRegisterID rd
)
3318 return (0x1e200800 | M
<< 31 | S
<< 29 | type
<< 22 | rm
<< 16 | opcode
<< 12 | rn
<< 5 | rd
);
3321 // 'o1' means negate
3322 ALWAYS_INLINE
static int floatingPointDataProcessing3Source(Datasize type
, bool o1
, FPRegisterID rm
, AddOp o2
, FPRegisterID ra
, FPRegisterID rn
, FPRegisterID rd
)
3326 return (0x1f000000 | M
<< 31 | S
<< 29 | type
<< 22 | o1
<< 21 | rm
<< 16 | o2
<< 15 | ra
<< 10 | rn
<< 5 | rd
);
3330 ALWAYS_INLINE
static int loadRegisterLiteral(LdrLiteralOp opc
, bool V
, int imm19
, FPRegisterID rt
)
3332 ASSERT(((imm19
<< 13) >> 13) == imm19
);
3333 return (0x18000000 | opc
<< 30 | V
<< 26 | (imm19
& 0x7ffff) << 5 | rt
);
3336 ALWAYS_INLINE
static int loadRegisterLiteral(LdrLiteralOp opc
, bool V
, int imm19
, RegisterID rt
)
3338 return loadRegisterLiteral(opc
, V
, imm19
, xOrZrAsFPR(rt
));
3342 ALWAYS_INLINE
static int loadStoreRegisterPostIndex(MemOpSize size
, bool V
, MemOp opc
, int imm9
, RegisterID rn
, FPRegisterID rt
)
3344 ASSERT(!(size
&& V
&& (opc
& 2))); // Maximum vector size is 128 bits.
3345 ASSERT(!((size
& 2) && !V
&& (opc
== 3))); // signed 32-bit load must be extending from 8/16 bits.
3346 ASSERT(isInt9(imm9
));
3347 return (0x38000400 | size
<< 30 | V
<< 26 | opc
<< 22 | (imm9
& 0x1ff) << 12 | xOrSp(rn
) << 5 | rt
);
3350 ALWAYS_INLINE
static int loadStoreRegisterPostIndex(MemOpSize size
, bool V
, MemOp opc
, int imm9
, RegisterID rn
, RegisterID rt
)
3352 return loadStoreRegisterPostIndex(size
, V
, opc
, imm9
, rn
, xOrZrAsFPR(rt
));
3356 ALWAYS_INLINE
static int loadStoreRegisterPreIndex(MemOpSize size
, bool V
, MemOp opc
, int imm9
, RegisterID rn
, FPRegisterID rt
)
3358 ASSERT(!(size
&& V
&& (opc
& 2))); // Maximum vector size is 128 bits.
3359 ASSERT(!((size
& 2) && !V
&& (opc
== 3))); // signed 32-bit load must be extending from 8/16 bits.
3360 ASSERT(isInt9(imm9
));
3361 return (0x38000c00 | size
<< 30 | V
<< 26 | opc
<< 22 | (imm9
& 0x1ff) << 12 | xOrSp(rn
) << 5 | rt
);
3364 ALWAYS_INLINE
static int loadStoreRegisterPreIndex(MemOpSize size
, bool V
, MemOp opc
, int imm9
, RegisterID rn
, RegisterID rt
)
3366 return loadStoreRegisterPreIndex(size
, V
, opc
, imm9
, rn
, xOrZrAsFPR(rt
));
3370 // 'S' means shift rm
3371 ALWAYS_INLINE
static int loadStoreRegisterRegisterOffset(MemOpSize size
, bool V
, MemOp opc
, RegisterID rm
, ExtendType option
, bool S
, RegisterID rn
, FPRegisterID rt
)
3373 ASSERT(!(size
&& V
&& (opc
& 2))); // Maximum vector size is 128 bits.
3374 ASSERT(!((size
& 2) && !V
&& (opc
== 3))); // signed 32-bit load must be extending from 8/16 bits.
3375 ASSERT(option
& 2); // The ExtendType for the address must be 32/64 bit, signed or unsigned - not 8/16bit.
3376 return (0x38200800 | size
<< 30 | V
<< 26 | opc
<< 22 | xOrZr(rm
) << 16 | option
<< 13 | S
<< 12 | xOrSp(rn
) << 5 | rt
);
3379 ALWAYS_INLINE
static int loadStoreRegisterRegisterOffset(MemOpSize size
, bool V
, MemOp opc
, RegisterID rm
, ExtendType option
, bool S
, RegisterID rn
, RegisterID rt
)
3381 return loadStoreRegisterRegisterOffset(size
, V
, opc
, rm
, option
, S
, rn
, xOrZrAsFPR(rt
));
3385 ALWAYS_INLINE
static int loadStoreRegisterUnscaledImmediate(MemOpSize size
, bool V
, MemOp opc
, int imm9
, RegisterID rn
, FPRegisterID rt
)
3387 ASSERT(!(size
&& V
&& (opc
& 2))); // Maximum vector size is 128 bits.
3388 ASSERT(!((size
& 2) && !V
&& (opc
== 3))); // signed 32-bit load must be extending from 8/16 bits.
3389 ASSERT(isInt9(imm9
));
3390 return (0x38000000 | size
<< 30 | V
<< 26 | opc
<< 22 | (imm9
& 0x1ff) << 12 | xOrSp(rn
) << 5 | rt
);
3393 ALWAYS_INLINE
static int loadStoreRegisterUnscaledImmediate(MemOpSize size
, bool V
, MemOp opc
, int imm9
, RegisterID rn
, RegisterID rt
)
3395 ASSERT(isInt9(imm9
));
3396 return loadStoreRegisterUnscaledImmediate(size
, V
, opc
, imm9
, rn
, xOrZrAsFPR(rt
));
3400 ALWAYS_INLINE
static int loadStoreRegisterUnsignedImmediate(MemOpSize size
, bool V
, MemOp opc
, int imm12
, RegisterID rn
, FPRegisterID rt
)
3402 ASSERT(!(size
&& V
&& (opc
& 2))); // Maximum vector size is 128 bits.
3403 ASSERT(!((size
& 2) && !V
&& (opc
== 3))); // signed 32-bit load must be extending from 8/16 bits.
3404 ASSERT(isUInt12(imm12
));
3405 return (0x39000000 | size
<< 30 | V
<< 26 | opc
<< 22 | (imm12
& 0xfff) << 10 | xOrSp(rn
) << 5 | rt
);
3408 ALWAYS_INLINE
static int loadStoreRegisterUnsignedImmediate(MemOpSize size
, bool V
, MemOp opc
, int imm12
, RegisterID rn
, RegisterID rt
)
3410 return loadStoreRegisterUnsignedImmediate(size
, V
, opc
, imm12
, rn
, xOrZrAsFPR(rt
));
3413 ALWAYS_INLINE
static int logicalImmediate(Datasize sf
, LogicalOp opc
, int N_immr_imms
, RegisterID rn
, RegisterID rd
)
3415 ASSERT(!(N_immr_imms
& (sf
? ~0x1fff : ~0xfff)));
3416 return (0x12000000 | sf
<< 31 | opc
<< 29 | N_immr_imms
<< 10 | xOrZr(rn
) << 5 | xOrZrOrSp(opc
== LogicalOp_ANDS
, rd
));
3419 // 'N' means negate rm
3420 ALWAYS_INLINE
static int logicalShiftedRegister(Datasize sf
, LogicalOp opc
, ShiftType shift
, bool N
, RegisterID rm
, int imm6
, RegisterID rn
, RegisterID rd
)
3422 ASSERT(!(imm6
& (sf
? ~63 : ~31)));
3423 return (0x0a000000 | sf
<< 31 | opc
<< 29 | shift
<< 22 | N
<< 21 | xOrZr(rm
) << 16 | (imm6
& 0x3f) << 10 | xOrZr(rn
) << 5 | xOrZr(rd
));
3426 ALWAYS_INLINE
static int moveWideImediate(Datasize sf
, MoveWideOp opc
, int hw
, uint16_t imm16
, RegisterID rd
)
3428 ASSERT(hw
< (sf
? 4 : 2));
3429 return (0x12800000 | sf
<< 31 | opc
<< 29 | hw
<< 21 | (int)imm16
<< 5 | xOrZr(rd
));
3433 ALWAYS_INLINE
static int unconditionalBranchImmediate(bool op
, int32_t imm26
)
3435 ASSERT(imm26
== (imm26
<< 6) >> 6);
3436 return (0x14000000 | op
<< 31 | (imm26
& 0x3ffffff));
3440 ALWAYS_INLINE
static int pcRelative(bool op
, int32_t imm21
, RegisterID rd
)
3442 ASSERT(imm21
== (imm21
<< 11) >> 11);
3443 int32_t immlo
= imm21
& 3;
3444 int32_t immhi
= (imm21
>> 2) & 0x7ffff;
3445 return (0x10000000 | op
<< 31 | immlo
<< 29 | immhi
<< 5 | xOrZr(rd
));
3448 ALWAYS_INLINE
static int system(bool L
, int op0
, int op1
, int crn
, int crm
, int op2
, RegisterID rt
)
3450 return (0xd5000000 | L
<< 21 | op0
<< 19 | op1
<< 16 | crn
<< 12 | crm
<< 8 | op2
<< 5 | xOrZr(rt
));
3453 ALWAYS_INLINE
static int hintPseudo(int imm
)
3455 ASSERT(!(imm
& ~0x7f));
3456 return system(0, 0, 3, 2, (imm
>> 3) & 0xf, imm
& 0x7, ARM64Registers::zr
);
3459 ALWAYS_INLINE
static int nopPseudo()
3461 return hintPseudo(0);
3464 // 'op' means negate
3465 ALWAYS_INLINE
static int testAndBranchImmediate(bool op
, int b50
, int imm14
, RegisterID rt
)
3467 ASSERT(!(b50
& ~0x3f));
3468 ASSERT(imm14
== (imm14
<< 18) >> 18);
3470 int b40
= b50
& 0x1f;
3471 return (0x36000000 | b5
<< 31 | op
<< 24 | b40
<< 19 | (imm14
& 0x3fff) << 5 | xOrZr(rt
));
3474 ALWAYS_INLINE
static int unconditionalBranchRegister(BranchType opc
, RegisterID rn
)
3476 // The only allocated values for op2 is 0x1f, for op3 & op4 are 0.
3477 const int op2
= 0x1f;
3480 return (0xd6000000 | opc
<< 21 | op2
<< 16 | op3
<< 10 | xOrZr(rn
) << 5 | op4
);
3483 AssemblerBuffer m_buffer
;
3484 Vector
<LinkRecord
, 0, UnsafeVectorOverflow
> m_jumpsToLink
;
3485 int m_indexOfLastWatchpoint
;
3486 int m_indexOfTailOfLastWatchpoint
;
3491 #undef CHECK_DATASIZE_OF
3494 #undef CHECK_DATASIZE
3497 #undef CHECK_FP_MEMOP_DATASIZE
3499 #endif // ENABLE(ASSEMBLER) && CPU(ARM64)
3501 #endif // ARMAssembler_h