]>
Commit | Line | Data |
---|---|---|
93a37866 A |
1 | /* |
2 | * Copyright (C) 2012 Apple Inc. All rights reserved. | |
3 | * | |
4 | * Redistribution and use in source and binary forms, with or without | |
5 | * modification, are permitted provided that the following conditions | |
6 | * are met: | |
7 | * 1. Redistributions of source code must retain the above copyright | |
8 | * notice, this list of conditions and the following disclaimer. | |
9 | * 2. Redistributions in binary form must reproduce the above copyright | |
10 | * notice, this list of conditions and the following disclaimer in the | |
11 | * documentation and/or other materials provided with the distribution. | |
12 | * | |
13 | * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY | |
14 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | |
15 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR | |
16 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR | |
17 | * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, | |
18 | * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, | |
19 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR | |
20 | * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY | |
21 | * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
22 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |
23 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
24 | */ | |
25 | ||
26 | #ifndef ARMAssembler_h | |
27 | #define ARMAssembler_h | |
28 | ||
29 | #if ENABLE(ASSEMBLER) && CPU(ARM64) | |
30 | ||
31 | #include "AssemblerBuffer.h" | |
32 | #include <wtf/Assertions.h> | |
33 | #include <wtf/Vector.h> | |
34 | #include <stdint.h> | |
35 | ||
36 | #define CHECK_DATASIZE_OF(datasize) ASSERT(datasize == 32 || datasize == 64) | |
37 | #define DATASIZE_OF(datasize) ((datasize == 64) ? Datasize_64 : Datasize_32) | |
38 | #define MEMOPSIZE_OF(datasize) ((datasize == 8 || datasize == 128) ? MemOpSize_8_or_128 : (datasize == 16) ? MemOpSize_16 : (datasize == 32) ? MemOpSize_32 : MemOpSize_64) | |
39 | #define CHECK_DATASIZE() CHECK_DATASIZE_OF(datasize) | |
40 | #define DATASIZE DATASIZE_OF(datasize) | |
41 | #define MEMOPSIZE MEMOPSIZE_OF(datasize) | |
42 | #define CHECK_FP_MEMOP_DATASIZE() ASSERT(datasize == 8 || datasize == 16 || datasize == 32 || datasize == 64 || datasize == 128) | |
43 | ||
44 | namespace JSC { | |
45 | ||
46 | ALWAYS_INLINE bool isInt9(int32_t value) | |
47 | { | |
48 | return value == ((value << 23) >> 23); | |
49 | } | |
50 | ||
51 | ALWAYS_INLINE bool isUInt5(int32_t value) | |
52 | { | |
53 | return !(value & ~0x1f); | |
54 | } | |
55 | ||
56 | ALWAYS_INLINE bool isUInt12(int32_t value) | |
57 | { | |
58 | return !(value & ~0xfff); | |
59 | } | |
60 | ||
61 | ALWAYS_INLINE bool isUInt12(intptr_t value) | |
62 | { | |
63 | return !(value & ~0xfffL); | |
64 | } | |
65 | ||
66 | class UInt5 { | |
67 | public: | |
68 | explicit UInt5(int value) | |
69 | : m_value(value) | |
70 | { | |
71 | ASSERT(isUInt5(value)); | |
72 | } | |
73 | ||
74 | operator int() { return m_value; } | |
75 | ||
76 | private: | |
77 | int m_value; | |
78 | }; | |
79 | ||
80 | class UInt12 { | |
81 | public: | |
82 | explicit UInt12(int value) | |
83 | : m_value(value) | |
84 | { | |
85 | ASSERT(isUInt12(value)); | |
86 | } | |
87 | ||
88 | operator int() { return m_value; } | |
89 | ||
90 | private: | |
91 | int m_value; | |
92 | }; | |
93 | ||
94 | class PostIndex { | |
95 | public: | |
96 | explicit PostIndex(int value) | |
97 | : m_value(value) | |
98 | { | |
99 | ASSERT(isInt9(value)); | |
100 | } | |
101 | ||
102 | operator int() { return m_value; } | |
103 | ||
104 | private: | |
105 | int m_value; | |
106 | }; | |
107 | ||
108 | class PreIndex { | |
109 | public: | |
110 | explicit PreIndex(int value) | |
111 | : m_value(value) | |
112 | { | |
113 | ASSERT(isInt9(value)); | |
114 | } | |
115 | ||
116 | operator int() { return m_value; } | |
117 | ||
118 | private: | |
119 | int m_value; | |
120 | }; | |
121 | ||
122 | class LogicalImmediate { | |
123 | public: | |
124 | static LogicalImmediate create32(uint32_t value) | |
125 | { | |
126 | // Check for 0, -1 - these cannot be encoded. | |
127 | if (!value || !~value) | |
128 | return InvalidLogicalImmediate; | |
129 | ||
130 | // First look for a 32-bit pattern, then for repeating 16-bit | |
131 | // patterns, 8-bit, 4-bit, and finally 2-bit. | |
132 | ||
133 | unsigned hsb, lsb; | |
134 | bool inverted; | |
135 | if (findBitRange<32>(value, hsb, lsb, inverted)) | |
136 | return encodeLogicalImmediate<32>(hsb, lsb, inverted); | |
137 | ||
138 | if ((value & 0xffff) != (value >> 16)) | |
139 | return InvalidLogicalImmediate; | |
140 | value &= 0xffff; | |
141 | ||
142 | if (findBitRange<16>(value, hsb, lsb, inverted)) | |
143 | return encodeLogicalImmediate<16>(hsb, lsb, inverted); | |
144 | ||
145 | if ((value & 0xff) != (value >> 8)) | |
146 | return InvalidLogicalImmediate; | |
147 | value &= 0xff; | |
148 | ||
149 | if (findBitRange<8>(value, hsb, lsb, inverted)) | |
150 | return encodeLogicalImmediate<8>(hsb, lsb, inverted); | |
151 | ||
152 | if ((value & 0xf) != (value >> 4)) | |
153 | return InvalidLogicalImmediate; | |
154 | value &= 0xf; | |
155 | ||
156 | if (findBitRange<4>(value, hsb, lsb, inverted)) | |
157 | return encodeLogicalImmediate<4>(hsb, lsb, inverted); | |
158 | ||
159 | if ((value & 0x3) != (value >> 2)) | |
160 | return InvalidLogicalImmediate; | |
161 | value &= 0x3; | |
162 | ||
163 | if (findBitRange<2>(value, hsb, lsb, inverted)) | |
164 | return encodeLogicalImmediate<2>(hsb, lsb, inverted); | |
165 | ||
166 | return InvalidLogicalImmediate; | |
167 | } | |
168 | ||
169 | static LogicalImmediate create64(uint64_t value) | |
170 | { | |
171 | // Check for 0, -1 - these cannot be encoded. | |
172 | if (!value || !~value) | |
173 | return InvalidLogicalImmediate; | |
174 | ||
175 | // Look for a contiguous bit range. | |
176 | unsigned hsb, lsb; | |
177 | bool inverted; | |
178 | if (findBitRange<64>(value, hsb, lsb, inverted)) | |
179 | return encodeLogicalImmediate<64>(hsb, lsb, inverted); | |
180 | ||
181 | // If the high & low 32 bits are equal, we can try for a 32-bit (or narrower) pattern. | |
182 | if (static_cast<uint32_t>(value) == static_cast<uint32_t>(value >> 32)) | |
183 | return create32(static_cast<uint32_t>(value)); | |
184 | return InvalidLogicalImmediate; | |
185 | } | |
186 | ||
187 | int value() const | |
188 | { | |
189 | ASSERT(isValid()); | |
190 | return m_value; | |
191 | } | |
192 | ||
193 | bool isValid() const | |
194 | { | |
195 | return m_value != InvalidLogicalImmediate; | |
196 | } | |
197 | ||
198 | bool is64bit() const | |
199 | { | |
200 | return m_value & (1 << 12); | |
201 | } | |
202 | ||
203 | private: | |
204 | LogicalImmediate(int value) | |
205 | : m_value(value) | |
206 | { | |
207 | } | |
208 | ||
209 | // Generate a mask with bits in the range hsb..0 set, for example: | |
210 | // hsb:63 = 0xffffffffffffffff | |
211 | // hsb:42 = 0x000007ffffffffff | |
212 | // hsb: 0 = 0x0000000000000001 | |
213 | static uint64_t mask(unsigned hsb) | |
214 | { | |
215 | ASSERT(hsb < 64); | |
216 | return 0xffffffffffffffffull >> (63 - hsb); | |
217 | } | |
218 | ||
219 | template<unsigned N> | |
220 | static void partialHSB(uint64_t& value, unsigned&result) | |
221 | { | |
222 | if (value & (0xffffffffffffffffull << N)) { | |
223 | result += N; | |
224 | value >>= N; | |
225 | } | |
226 | } | |
227 | ||
228 | // Find the bit number of the highest bit set in a non-zero value, for example: | |
229 | // 0x8080808080808080 = hsb:63 | |
230 | // 0x0000000000000001 = hsb: 0 | |
231 | // 0x000007ffffe00000 = hsb:42 | |
232 | static unsigned highestSetBit(uint64_t value) | |
233 | { | |
234 | ASSERT(value); | |
235 | unsigned hsb = 0; | |
236 | partialHSB<32>(value, hsb); | |
237 | partialHSB<16>(value, hsb); | |
238 | partialHSB<8>(value, hsb); | |
239 | partialHSB<4>(value, hsb); | |
240 | partialHSB<2>(value, hsb); | |
241 | partialHSB<1>(value, hsb); | |
242 | return hsb; | |
243 | } | |
244 | ||
245 | // This function takes a value and a bit width, where value obeys the following constraints: | |
246 | // * bits outside of the width of the value must be zero. | |
247 | // * bits within the width of value must neither be all clear or all set. | |
248 | // The input is inspected to detect values that consist of either two or three contiguous | |
249 | // ranges of bits. The output range hsb..lsb will describe the second range of the value. | |
250 | // if the range is set, inverted will be false, and if the range is clear, inverted will | |
251 | // be true. For example (with width 8): | |
252 | // 00001111 = hsb:3, lsb:0, inverted:false | |
253 | // 11110000 = hsb:3, lsb:0, inverted:true | |
254 | // 00111100 = hsb:5, lsb:2, inverted:false | |
255 | // 11000011 = hsb:5, lsb:2, inverted:true | |
256 | template<unsigned width> | |
257 | static bool findBitRange(uint64_t value, unsigned& hsb, unsigned& lsb, bool& inverted) | |
258 | { | |
259 | ASSERT(value & mask(width - 1)); | |
260 | ASSERT(value != mask(width - 1)); | |
261 | ASSERT(!(value & ~mask(width - 1))); | |
262 | ||
263 | // Detect cases where the top bit is set; if so, flip all the bits & set invert. | |
264 | // This halves the number of patterns we need to look for. | |
265 | const uint64_t msb = 1ull << (width - 1); | |
266 | if ((inverted = (value & msb))) | |
267 | value ^= mask(width - 1); | |
268 | ||
269 | // Find the highest set bit in value, generate a corresponding mask & flip all | |
270 | // bits under it. | |
271 | hsb = highestSetBit(value); | |
272 | value ^= mask(hsb); | |
273 | if (!value) { | |
274 | // If this cleared the value, then the range hsb..0 was all set. | |
275 | lsb = 0; | |
276 | return true; | |
277 | } | |
278 | ||
279 | // Try making one more mask, and flipping the bits! | |
280 | lsb = highestSetBit(value); | |
281 | value ^= mask(lsb); | |
282 | if (!value) { | |
283 | // Success - but lsb actually points to the hsb of a third range - add one | |
284 | // to get to the lsb of the mid range. | |
285 | ++lsb; | |
286 | return true; | |
287 | } | |
288 | ||
289 | return false; | |
290 | } | |
291 | ||
292 | // Encodes the set of immN:immr:imms fields found in a logical immediate. | |
293 | template<unsigned width> | |
294 | static int encodeLogicalImmediate(unsigned hsb, unsigned lsb, bool inverted) | |
295 | { | |
296 | // Check width is a power of 2! | |
297 | ASSERT(!(width & (width -1))); | |
298 | ASSERT(width <= 64 && width >= 2); | |
299 | ASSERT(hsb >= lsb); | |
300 | ASSERT(hsb < width); | |
301 | ||
302 | int immN = 0; | |
303 | int imms = 0; | |
304 | int immr = 0; | |
305 | ||
306 | // For 64-bit values this is easy - just set immN to true, and imms just | |
307 | // contains the bit number of the highest set bit of the set range. For | |
308 | // values with narrower widths, these are encoded by a leading set of | |
309 | // one bits, followed by a zero bit, followed by the remaining set of bits | |
310 | // being the high bit of the range. For a 32-bit immediate there are no | |
311 | // leading one bits, just a zero followed by a five bit number. For a | |
312 | // 16-bit immediate there is one one bit, a zero bit, and then a four bit | |
313 | // bit-position, etc. | |
314 | if (width == 64) | |
315 | immN = 1; | |
316 | else | |
317 | imms = 63 & ~(width + width - 1); | |
318 | ||
319 | if (inverted) { | |
320 | // if width is 64 & hsb is 62, then we have a value something like: | |
321 | // 0x80000000ffffffff (in this case with lsb 32). | |
322 | // The ror should be by 1, imms (effectively set width minus 1) is | |
323 | // 32. Set width is full width minus cleared width. | |
324 | immr = (width - 1) - hsb; | |
325 | imms |= (width - ((hsb - lsb) + 1)) - 1; | |
326 | } else { | |
327 | // if width is 64 & hsb is 62, then we have a value something like: | |
328 | // 0x7fffffff00000000 (in this case with lsb 32). | |
329 | // The value is effectively rol'ed by lsb, which is equivalent to | |
330 | // a ror by width - lsb (or 0, in the case where lsb is 0). imms | |
331 | // is hsb - lsb. | |
332 | immr = (width - lsb) & (width - 1); | |
333 | imms |= hsb - lsb; | |
334 | } | |
335 | ||
336 | return immN << 12 | immr << 6 | imms; | |
337 | } | |
338 | ||
339 | static const int InvalidLogicalImmediate = -1; | |
340 | ||
341 | int m_value; | |
342 | }; | |
343 | ||
344 | inline uint16_t getHalfword(uint64_t value, int which) | |
345 | { | |
346 | return value >> (which << 4); | |
347 | } | |
348 | ||
349 | namespace ARM64Registers { | |
350 | typedef enum { | |
351 | // Parameter/result registers | |
352 | x0, | |
353 | x1, | |
354 | x2, | |
355 | x3, | |
356 | x4, | |
357 | x5, | |
358 | x6, | |
359 | x7, | |
360 | // Indirect result location register | |
361 | x8, | |
362 | // Temporary registers | |
363 | x9, | |
364 | x10, | |
365 | x11, | |
366 | x12, | |
367 | x13, | |
368 | x14, | |
369 | x15, | |
370 | // Intra-procedure-call scratch registers (temporary) | |
371 | x16, ip0 = x16, | |
372 | x17, ip1 = x17, | |
373 | // Platform Register (temporary) | |
374 | x18, | |
375 | // Callee-saved | |
376 | x19, | |
377 | x20, | |
378 | x21, | |
379 | x22, | |
380 | x23, | |
381 | x24, | |
382 | x25, | |
383 | x26, | |
384 | x27, | |
385 | x28, | |
386 | // Special | |
387 | x29, fp = x29, | |
388 | x30, lr = x30, | |
389 | sp, | |
390 | zr = 0x3f, | |
391 | } RegisterID; | |
392 | ||
393 | typedef enum { | |
394 | // Parameter/result registers | |
395 | q0, | |
396 | q1, | |
397 | q2, | |
398 | q3, | |
399 | q4, | |
400 | q5, | |
401 | q6, | |
402 | q7, | |
403 | // Callee-saved (up to 64-bits only!) | |
404 | q8, | |
405 | q9, | |
406 | q10, | |
407 | q11, | |
408 | q12, | |
409 | q13, | |
410 | q14, | |
411 | q15, | |
412 | // Temporary registers | |
413 | q16, | |
414 | q17, | |
415 | q18, | |
416 | q19, | |
417 | q20, | |
418 | q21, | |
419 | q22, | |
420 | q23, | |
421 | q24, | |
422 | q25, | |
423 | q26, | |
424 | q27, | |
425 | q28, | |
426 | q29, | |
427 | q30, | |
428 | q31, | |
429 | } FPRegisterID; | |
430 | ||
431 | static bool isSp(RegisterID reg) { return reg == sp; } | |
432 | static bool isZr(RegisterID reg) { return reg == zr; } | |
433 | } | |
434 | ||
435 | class ARM64Assembler { | |
436 | public: | |
437 | typedef ARM64Registers::RegisterID RegisterID; | |
438 | typedef ARM64Registers::FPRegisterID FPRegisterID; | |
439 | ||
440 | private: | |
441 | static bool isSp(RegisterID reg) { return ARM64Registers::isSp(reg); } | |
442 | static bool isZr(RegisterID reg) { return ARM64Registers::isZr(reg); } | |
443 | ||
444 | public: | |
445 | ARM64Assembler() | |
446 | : m_indexOfLastWatchpoint(INT_MIN) | |
447 | , m_indexOfTailOfLastWatchpoint(INT_MIN) | |
448 | { | |
449 | } | |
450 | ||
451 | // (HS, LO, HI, LS) -> (AE, B, A, BE) | |
452 | // (VS, VC) -> (O, NO) | |
453 | typedef enum { | |
454 | ConditionEQ, | |
455 | ConditionNE, | |
456 | ConditionHS, ConditionCS = ConditionHS, | |
457 | ConditionLO, ConditionCC = ConditionLO, | |
458 | ConditionMI, | |
459 | ConditionPL, | |
460 | ConditionVS, | |
461 | ConditionVC, | |
462 | ConditionHI, | |
463 | ConditionLS, | |
464 | ConditionGE, | |
465 | ConditionLT, | |
466 | ConditionGT, | |
467 | ConditionLE, | |
468 | ConditionAL, | |
469 | ConditionInvalid | |
470 | } Condition; | |
471 | ||
472 | static Condition invert(Condition cond) | |
473 | { | |
474 | return static_cast<Condition>(cond ^ 1); | |
475 | } | |
476 | ||
477 | typedef enum { | |
478 | LSL, | |
479 | LSR, | |
480 | ASR, | |
481 | ROR | |
482 | } ShiftType; | |
483 | ||
484 | typedef enum { | |
485 | UXTB, | |
486 | UXTH, | |
487 | UXTW, | |
488 | UXTX, | |
489 | SXTB, | |
490 | SXTH, | |
491 | SXTW, | |
492 | SXTX | |
493 | } ExtendType; | |
494 | ||
495 | enum SetFlags { | |
496 | DontSetFlags, | |
497 | S | |
498 | }; | |
499 | ||
500 | #define JUMP_ENUM_WITH_SIZE(index, value) (((value) << 4) | (index)) | |
501 | #define JUMP_ENUM_SIZE(jump) ((jump) >> 4) | |
502 | enum JumpType { JumpFixed = JUMP_ENUM_WITH_SIZE(0, 0), | |
503 | JumpNoCondition = JUMP_ENUM_WITH_SIZE(1, 1 * sizeof(uint32_t)), | |
504 | JumpCondition = JUMP_ENUM_WITH_SIZE(2, 2 * sizeof(uint32_t)), | |
505 | JumpCompareAndBranch = JUMP_ENUM_WITH_SIZE(3, 2 * sizeof(uint32_t)), | |
506 | JumpTestBit = JUMP_ENUM_WITH_SIZE(4, 2 * sizeof(uint32_t)), | |
507 | JumpNoConditionFixedSize = JUMP_ENUM_WITH_SIZE(5, 1 * sizeof(uint32_t)), | |
508 | JumpConditionFixedSize = JUMP_ENUM_WITH_SIZE(6, 2 * sizeof(uint32_t)), | |
509 | JumpCompareAndBranchFixedSize = JUMP_ENUM_WITH_SIZE(7, 2 * sizeof(uint32_t)), | |
510 | JumpTestBitFixedSize = JUMP_ENUM_WITH_SIZE(8, 2 * sizeof(uint32_t)), | |
511 | }; | |
512 | enum JumpLinkType { | |
513 | LinkInvalid = JUMP_ENUM_WITH_SIZE(0, 0), | |
514 | LinkJumpNoCondition = JUMP_ENUM_WITH_SIZE(1, 1 * sizeof(uint32_t)), | |
515 | LinkJumpConditionDirect = JUMP_ENUM_WITH_SIZE(2, 1 * sizeof(uint32_t)), | |
516 | LinkJumpCondition = JUMP_ENUM_WITH_SIZE(3, 2 * sizeof(uint32_t)), | |
517 | LinkJumpCompareAndBranch = JUMP_ENUM_WITH_SIZE(4, 2 * sizeof(uint32_t)), | |
518 | LinkJumpCompareAndBranchDirect = JUMP_ENUM_WITH_SIZE(5, 1 * sizeof(uint32_t)), | |
519 | LinkJumpTestBit = JUMP_ENUM_WITH_SIZE(6, 2 * sizeof(uint32_t)), | |
520 | LinkJumpTestBitDirect = JUMP_ENUM_WITH_SIZE(7, 1 * sizeof(uint32_t)), | |
521 | }; | |
522 | ||
523 | class LinkRecord { | |
524 | public: | |
525 | LinkRecord(intptr_t from, intptr_t to, JumpType type, Condition condition) | |
526 | { | |
527 | data.realTypes.m_from = from; | |
528 | data.realTypes.m_to = to; | |
529 | data.realTypes.m_type = type; | |
530 | data.realTypes.m_linkType = LinkInvalid; | |
531 | data.realTypes.m_condition = condition; | |
532 | } | |
533 | LinkRecord(intptr_t from, intptr_t to, JumpType type, Condition condition, bool is64Bit, RegisterID compareRegister) | |
534 | { | |
535 | data.realTypes.m_from = from; | |
536 | data.realTypes.m_to = to; | |
537 | data.realTypes.m_type = type; | |
538 | data.realTypes.m_linkType = LinkInvalid; | |
539 | data.realTypes.m_condition = condition; | |
540 | data.realTypes.m_is64Bit = is64Bit; | |
541 | data.realTypes.m_compareRegister = compareRegister; | |
542 | } | |
543 | LinkRecord(intptr_t from, intptr_t to, JumpType type, Condition condition, unsigned bitNumber, RegisterID compareRegister) | |
544 | { | |
545 | data.realTypes.m_from = from; | |
546 | data.realTypes.m_to = to; | |
547 | data.realTypes.m_type = type; | |
548 | data.realTypes.m_linkType = LinkInvalid; | |
549 | data.realTypes.m_condition = condition; | |
550 | data.realTypes.m_bitNumber = bitNumber; | |
551 | data.realTypes.m_compareRegister = compareRegister; | |
552 | } | |
553 | void operator=(const LinkRecord& other) | |
554 | { | |
555 | data.copyTypes.content[0] = other.data.copyTypes.content[0]; | |
556 | data.copyTypes.content[1] = other.data.copyTypes.content[1]; | |
557 | data.copyTypes.content[2] = other.data.copyTypes.content[2]; | |
558 | } | |
559 | intptr_t from() const { return data.realTypes.m_from; } | |
560 | void setFrom(intptr_t from) { data.realTypes.m_from = from; } | |
561 | intptr_t to() const { return data.realTypes.m_to; } | |
562 | JumpType type() const { return data.realTypes.m_type; } | |
563 | JumpLinkType linkType() const { return data.realTypes.m_linkType; } | |
564 | void setLinkType(JumpLinkType linkType) { ASSERT(data.realTypes.m_linkType == LinkInvalid); data.realTypes.m_linkType = linkType; } | |
565 | Condition condition() const { return data.realTypes.m_condition; } | |
566 | bool is64Bit() const { return data.realTypes.m_is64Bit; } | |
567 | unsigned bitNumber() const { return data.realTypes.m_bitNumber; } | |
568 | RegisterID compareRegister() const { return data.realTypes.m_compareRegister; } | |
569 | ||
570 | private: | |
571 | union { | |
572 | struct RealTypes { | |
573 | intptr_t m_from : 48; | |
574 | intptr_t m_to : 48; | |
575 | JumpType m_type : 8; | |
576 | JumpLinkType m_linkType : 8; | |
577 | Condition m_condition : 4; | |
578 | bool m_is64Bit : 1; | |
579 | unsigned m_bitNumber : 6; | |
580 | RegisterID m_compareRegister : 5; | |
581 | } realTypes; | |
582 | struct CopyTypes { | |
583 | uint64_t content[3]; | |
584 | } copyTypes; | |
585 | COMPILE_ASSERT(sizeof(RealTypes) == sizeof(CopyTypes), LinkRecordCopyStructSizeEqualsRealStruct); | |
586 | } data; | |
587 | }; | |
588 | ||
589 | // bits(N) VFPExpandImm(bits(8) imm8); | |
590 | // | |
591 | // Encoding of floating point immediates is a litte complicated. Here's a | |
592 | // high level description: | |
593 | // +/-m*2-n where m and n are integers, 16 <= m <= 31, 0 <= n <= 7 | |
594 | // and the algirithm for expanding to a single precision float: | |
595 | // return imm8<7>:NOT(imm8<6>):Replicate(imm8<6>,5):imm8<5:0>:Zeros(19); | |
596 | // | |
597 | // The trickiest bit is how the exponent is handled. The following table | |
598 | // may help clarify things a little: | |
599 | // 654 | |
600 | // 100 01111100 124 -3 1020 01111111100 | |
601 | // 101 01111101 125 -2 1021 01111111101 | |
602 | // 110 01111110 126 -1 1022 01111111110 | |
603 | // 111 01111111 127 0 1023 01111111111 | |
604 | // 000 10000000 128 1 1024 10000000000 | |
605 | // 001 10000001 129 2 1025 10000000001 | |
606 | // 010 10000010 130 3 1026 10000000010 | |
607 | // 011 10000011 131 4 1027 10000000011 | |
608 | // The first column shows the bit pattern stored in bits 6-4 of the arm | |
609 | // encoded immediate. The second column shows the 8-bit IEEE 754 single | |
610 | // -precision exponent in binary, the third column shows the raw decimal | |
611 | // value. IEEE 754 single-precision numbers are stored with a bias of 127 | |
612 | // to the exponent, so the fourth column shows the resulting exponent. | |
613 | // From this was can see that the exponent can be in the range -3..4, | |
614 | // which agrees with the high level description given above. The fifth | |
615 | // and sixth columns shows the value stored in a IEEE 754 double-precision | |
616 | // number to represent these exponents in decimal and binary, given the | |
617 | // bias of 1023. | |
618 | // | |
619 | // Ultimately, detecting doubles that can be encoded as immediates on arm | |
620 | // and encoding doubles is actually not too bad. A floating point value can | |
621 | // be encoded by retaining the sign bit, the low three bits of the exponent | |
622 | // and the high 4 bits of the mantissa. To validly be able to encode an | |
623 | // immediate the remainder of the mantissa must be zero, and the high part | |
624 | // of the exponent must match the top bit retained, bar the highest bit | |
625 | // which must be its inverse. | |
626 | static bool canEncodeFPImm(double d) | |
627 | { | |
628 | // Discard the sign bit, the low two bits of the exponent & the highest | |
629 | // four bits of the mantissa. | |
630 | uint64_t masked = bitwise_cast<uint64_t>(d) & 0x7fc0ffffffffffffull; | |
631 | return (masked == 0x3fc0000000000000ull) || (masked == 0x4000000000000000ull); | |
632 | } | |
633 | ||
634 | template<int datasize> | |
635 | static bool canEncodePImmOffset(int32_t offset) | |
636 | { | |
637 | int32_t maxPImm = 4095 * (datasize / 8); | |
638 | if (offset < 0) | |
639 | return false; | |
640 | if (offset > maxPImm) | |
641 | return false; | |
642 | if (offset & ((datasize / 8 ) - 1)) | |
643 | return false; | |
644 | return true; | |
645 | } | |
646 | ||
647 | static bool canEncodeSImmOffset(int32_t offset) | |
648 | { | |
649 | return isInt9(offset); | |
650 | } | |
651 | ||
652 | private: | |
653 | int encodeFPImm(double d) | |
654 | { | |
655 | ASSERT(canEncodeFPImm(d)); | |
656 | uint64_t u64 = bitwise_cast<uint64_t>(d); | |
657 | return (static_cast<int>(u64 >> 56) & 0x80) | (static_cast<int>(u64 >> 48) & 0x7f); | |
658 | } | |
659 | ||
660 | template<int datasize> | |
661 | int encodeShiftAmount(int amount) | |
662 | { | |
663 | ASSERT(!amount || datasize == (8 << amount)); | |
664 | return amount; | |
665 | } | |
666 | ||
667 | template<int datasize> | |
668 | static int encodePositiveImmediate(unsigned pimm) | |
669 | { | |
670 | ASSERT(!(pimm & ((datasize / 8) - 1))); | |
671 | return pimm / (datasize / 8); | |
672 | } | |
673 | ||
674 | enum Datasize { | |
675 | Datasize_32, | |
676 | Datasize_64, | |
677 | Datasize_64_top, | |
678 | Datasize_16 | |
679 | }; | |
680 | ||
681 | enum MemOpSize { | |
682 | MemOpSize_8_or_128, | |
683 | MemOpSize_16, | |
684 | MemOpSize_32, | |
685 | MemOpSize_64, | |
686 | }; | |
687 | ||
688 | enum BranchType { | |
689 | BranchType_JMP, | |
690 | BranchType_CALL, | |
691 | BranchType_RET | |
692 | }; | |
693 | ||
694 | enum AddOp { | |
695 | AddOp_ADD, | |
696 | AddOp_SUB | |
697 | }; | |
698 | ||
699 | enum BitfieldOp { | |
700 | BitfieldOp_SBFM, | |
701 | BitfieldOp_BFM, | |
702 | BitfieldOp_UBFM | |
703 | }; | |
704 | ||
705 | enum DataOp1Source { | |
706 | DataOp_RBIT, | |
707 | DataOp_REV16, | |
708 | DataOp_REV32, | |
709 | DataOp_REV64, | |
710 | DataOp_CLZ, | |
711 | DataOp_CLS | |
712 | }; | |
713 | ||
714 | enum DataOp2Source { | |
715 | DataOp_UDIV = 2, | |
716 | DataOp_SDIV = 3, | |
717 | DataOp_LSLV = 8, | |
718 | DataOp_LSRV = 9, | |
719 | DataOp_ASRV = 10, | |
720 | DataOp_RORV = 11 | |
721 | }; | |
722 | ||
723 | enum DataOp3Source { | |
724 | DataOp_MADD = 0, | |
725 | DataOp_MSUB = 1, | |
726 | DataOp_SMADDL = 2, | |
727 | DataOp_SMSUBL = 3, | |
728 | DataOp_SMULH = 4, | |
729 | DataOp_UMADDL = 10, | |
730 | DataOp_UMSUBL = 11, | |
731 | DataOp_UMULH = 12 | |
732 | }; | |
733 | ||
734 | enum ExcepnOp { | |
735 | ExcepnOp_EXCEPTION = 0, | |
736 | ExcepnOp_BREAKPOINT = 1, | |
737 | ExcepnOp_HALT = 2, | |
738 | ExcepnOp_DCPS = 5 | |
739 | }; | |
740 | ||
741 | enum FPCmpOp { | |
742 | FPCmpOp_FCMP = 0x00, | |
743 | FPCmpOp_FCMP0 = 0x08, | |
744 | FPCmpOp_FCMPE = 0x10, | |
745 | FPCmpOp_FCMPE0 = 0x18 | |
746 | }; | |
747 | ||
748 | enum FPCondCmpOp { | |
749 | FPCondCmpOp_FCMP, | |
750 | FPCondCmpOp_FCMPE | |
751 | }; | |
752 | ||
753 | enum FPDataOp1Source { | |
754 | FPDataOp_FMOV = 0, | |
755 | FPDataOp_FABS = 1, | |
756 | FPDataOp_FNEG = 2, | |
757 | FPDataOp_FSQRT = 3, | |
758 | FPDataOp_FCVT_toSingle = 4, | |
759 | FPDataOp_FCVT_toDouble = 5, | |
760 | FPDataOp_FCVT_toHalf = 7, | |
761 | FPDataOp_FRINTN = 8, | |
762 | FPDataOp_FRINTP = 9, | |
763 | FPDataOp_FRINTM = 10, | |
764 | FPDataOp_FRINTZ = 11, | |
765 | FPDataOp_FRINTA = 12, | |
766 | FPDataOp_FRINTX = 14, | |
767 | FPDataOp_FRINTI = 15 | |
768 | }; | |
769 | ||
770 | enum FPDataOp2Source { | |
771 | FPDataOp_FMUL, | |
772 | FPDataOp_FDIV, | |
773 | FPDataOp_FADD, | |
774 | FPDataOp_FSUB, | |
775 | FPDataOp_FMAX, | |
776 | FPDataOp_FMIN, | |
777 | FPDataOp_FMAXNM, | |
778 | FPDataOp_FMINNM, | |
779 | FPDataOp_FNMUL | |
780 | }; | |
781 | ||
782 | enum FPIntConvOp { | |
783 | FPIntConvOp_FCVTNS = 0x00, | |
784 | FPIntConvOp_FCVTNU = 0x01, | |
785 | FPIntConvOp_SCVTF = 0x02, | |
786 | FPIntConvOp_UCVTF = 0x03, | |
787 | FPIntConvOp_FCVTAS = 0x04, | |
788 | FPIntConvOp_FCVTAU = 0x05, | |
789 | FPIntConvOp_FMOV_QtoX = 0x06, | |
790 | FPIntConvOp_FMOV_XtoQ = 0x07, | |
791 | FPIntConvOp_FCVTPS = 0x08, | |
792 | FPIntConvOp_FCVTPU = 0x09, | |
793 | FPIntConvOp_FMOV_QtoX_top = 0x0e, | |
794 | FPIntConvOp_FMOV_XtoQ_top = 0x0f, | |
795 | FPIntConvOp_FCVTMS = 0x10, | |
796 | FPIntConvOp_FCVTMU = 0x11, | |
797 | FPIntConvOp_FCVTZS = 0x18, | |
798 | FPIntConvOp_FCVTZU = 0x19, | |
799 | }; | |
800 | ||
801 | enum LogicalOp { | |
802 | LogicalOp_AND, | |
803 | LogicalOp_ORR, | |
804 | LogicalOp_EOR, | |
805 | LogicalOp_ANDS | |
806 | }; | |
807 | ||
808 | enum MemOp { | |
809 | MemOp_STORE, | |
810 | MemOp_LOAD, | |
811 | MemOp_STORE_V128, | |
812 | MemOp_LOAD_V128, | |
813 | MemOp_PREFETCH = 2, // size must be 3 | |
814 | MemOp_LOAD_signed64 = 2, // size may be 0, 1 or 2 | |
815 | MemOp_LOAD_signed32 = 3 // size may be 0 or 1 | |
816 | }; | |
817 | ||
818 | enum MoveWideOp { | |
819 | MoveWideOp_N = 0, | |
820 | MoveWideOp_Z = 2, | |
821 | MoveWideOp_K = 3 | |
822 | }; | |
823 | ||
824 | enum LdrLiteralOp { | |
825 | LdrLiteralOp_32BIT = 0, | |
826 | LdrLiteralOp_64BIT = 1, | |
827 | LdrLiteralOp_LDRSW = 2, | |
828 | LdrLiteralOp_128BIT = 2 | |
829 | }; | |
830 | ||
831 | public: | |
832 | // Integer Instructions: | |
833 | ||
834 | template<int datasize, SetFlags setFlags = DontSetFlags> | |
835 | ALWAYS_INLINE void adc(RegisterID rd, RegisterID rn, RegisterID rm) | |
836 | { | |
837 | CHECK_DATASIZE(); | |
838 | insn(addSubtractWithCarry(DATASIZE, AddOp_ADD, setFlags, rm, rn, rd)); | |
839 | } | |
840 | ||
841 | template<int datasize, SetFlags setFlags = DontSetFlags> | |
842 | ALWAYS_INLINE void add(RegisterID rd, RegisterID rn, UInt12 imm12, int shift = 0) | |
843 | { | |
844 | CHECK_DATASIZE(); | |
845 | ASSERT(!shift || shift == 12); | |
846 | insn(addSubtractImmediate(DATASIZE, AddOp_ADD, setFlags, shift == 12, imm12, rn, rd)); | |
847 | } | |
848 | ||
849 | template<int datasize, SetFlags setFlags = DontSetFlags> | |
850 | ALWAYS_INLINE void add(RegisterID rd, RegisterID rn, RegisterID rm) | |
851 | { | |
852 | add<datasize, setFlags>(rd, rn, rm, LSL, 0); | |
853 | } | |
854 | ||
855 | template<int datasize, SetFlags setFlags = DontSetFlags> | |
856 | ALWAYS_INLINE void add(RegisterID rd, RegisterID rn, RegisterID rm, ExtendType extend, int amount) | |
857 | { | |
858 | CHECK_DATASIZE(); | |
859 | insn(addSubtractExtendedRegister(DATASIZE, AddOp_ADD, setFlags, rm, extend, amount, rn, rd)); | |
860 | } | |
861 | ||
862 | template<int datasize, SetFlags setFlags = DontSetFlags> | |
863 | ALWAYS_INLINE void add(RegisterID rd, RegisterID rn, RegisterID rm, ShiftType shift, int amount) | |
864 | { | |
865 | CHECK_DATASIZE(); | |
866 | if (isSp(rn)) { | |
867 | ASSERT(shift == LSL); | |
868 | add<datasize, setFlags>(rd, rn, rm, UXTX, amount); | |
869 | } else | |
870 | insn(addSubtractShiftedRegister(DATASIZE, AddOp_ADD, setFlags, shift, rm, amount, rn, rd)); | |
871 | } | |
872 | ||
873 | ALWAYS_INLINE void adr(RegisterID rd, int offset) | |
874 | { | |
875 | insn(pcRelative(false, offset, rd)); | |
876 | } | |
877 | ||
878 | ALWAYS_INLINE void adrp(RegisterID rd, int offset) | |
879 | { | |
880 | ASSERT(!(offset & 0xfff)); | |
881 | insn(pcRelative(true, offset >> 12, rd)); | |
882 | } | |
883 | ||
884 | template<int datasize, SetFlags setFlags = DontSetFlags> | |
885 | ALWAYS_INLINE void and_(RegisterID rd, RegisterID rn, RegisterID rm) | |
886 | { | |
887 | and_<datasize, setFlags>(rd, rn, rm, LSL, 0); | |
888 | } | |
889 | ||
890 | template<int datasize, SetFlags setFlags = DontSetFlags> | |
891 | ALWAYS_INLINE void and_(RegisterID rd, RegisterID rn, RegisterID rm, ShiftType shift, int amount) | |
892 | { | |
893 | CHECK_DATASIZE(); | |
894 | insn(logicalShiftedRegister(DATASIZE, setFlags ? LogicalOp_ANDS : LogicalOp_AND, shift, false, rm, amount, rn, rd)); | |
895 | } | |
896 | ||
897 | template<int datasize, SetFlags setFlags = DontSetFlags> | |
898 | ALWAYS_INLINE void and_(RegisterID rd, RegisterID rn, LogicalImmediate imm) | |
899 | { | |
900 | CHECK_DATASIZE(); | |
901 | insn(logicalImmediate(DATASIZE, setFlags ? LogicalOp_ANDS : LogicalOp_AND, imm.value(), rn, rd)); | |
902 | } | |
903 | ||
904 | template<int datasize> | |
905 | ALWAYS_INLINE void asr(RegisterID rd, RegisterID rn, int shift) | |
906 | { | |
907 | ASSERT(shift < datasize); | |
908 | sbfm<datasize>(rd, rn, shift, datasize - 1); | |
909 | } | |
910 | ||
911 | template<int datasize> | |
912 | ALWAYS_INLINE void asr(RegisterID rd, RegisterID rn, RegisterID rm) | |
913 | { | |
914 | asrv<datasize>(rd, rn, rm); | |
915 | } | |
916 | ||
917 | template<int datasize> | |
918 | ALWAYS_INLINE void asrv(RegisterID rd, RegisterID rn, RegisterID rm) | |
919 | { | |
920 | CHECK_DATASIZE(); | |
921 | insn(dataProcessing2Source(DATASIZE, rm, DataOp_ASRV, rn, rd)); | |
922 | } | |
923 | ||
924 | ALWAYS_INLINE void b(int32_t offset = 0) | |
925 | { | |
926 | ASSERT(!(offset & 3)); | |
927 | offset >>= 2; | |
928 | ASSERT(offset == (offset << 6) >> 6); | |
929 | insn(unconditionalBranchImmediate(false, offset)); | |
930 | } | |
931 | ||
932 | ALWAYS_INLINE void b_cond(Condition cond, int32_t offset = 0) | |
933 | { | |
934 | ASSERT(!(offset & 3)); | |
935 | offset >>= 2; | |
936 | ASSERT(offset == (offset << 13) >> 13); | |
937 | insn(conditionalBranchImmediate(offset, cond)); | |
938 | } | |
939 | ||
940 | template<int datasize> | |
941 | ALWAYS_INLINE void bfi(RegisterID rd, RegisterID rn, int lsb, int width) | |
942 | { | |
943 | bfm<datasize>(rd, rn, (datasize - lsb) & (datasize - 1), width - 1); | |
944 | } | |
945 | ||
946 | template<int datasize> | |
947 | ALWAYS_INLINE void bfm(RegisterID rd, RegisterID rn, int immr, int imms) | |
948 | { | |
949 | CHECK_DATASIZE(); | |
950 | insn(bitfield(DATASIZE, BitfieldOp_BFM, immr, imms, rn, rd)); | |
951 | } | |
952 | ||
953 | template<int datasize> | |
954 | ALWAYS_INLINE void bfxil(RegisterID rd, RegisterID rn, int lsb, int width) | |
955 | { | |
956 | bfm<datasize>(rd, rn, lsb, lsb + width - 1); | |
957 | } | |
958 | ||
959 | template<int datasize, SetFlags setFlags = DontSetFlags> | |
960 | ALWAYS_INLINE void bic(RegisterID rd, RegisterID rn, RegisterID rm) | |
961 | { | |
962 | bic<datasize, setFlags>(rd, rn, rm, LSL, 0); | |
963 | } | |
964 | ||
965 | template<int datasize, SetFlags setFlags = DontSetFlags> | |
966 | ALWAYS_INLINE void bic(RegisterID rd, RegisterID rn, RegisterID rm, ShiftType shift, int amount) | |
967 | { | |
968 | CHECK_DATASIZE(); | |
969 | insn(logicalShiftedRegister(DATASIZE, setFlags ? LogicalOp_ANDS : LogicalOp_AND, shift, true, rm, amount, rn, rd)); | |
970 | } | |
971 | ||
972 | ALWAYS_INLINE void bl(int32_t offset = 0) | |
973 | { | |
974 | ASSERT(!(offset & 3)); | |
975 | offset >>= 2; | |
976 | insn(unconditionalBranchImmediate(true, offset)); | |
977 | } | |
978 | ||
979 | ALWAYS_INLINE void blr(RegisterID rn) | |
980 | { | |
981 | insn(unconditionalBranchRegister(BranchType_CALL, rn)); | |
982 | } | |
983 | ||
984 | ALWAYS_INLINE void br(RegisterID rn) | |
985 | { | |
986 | insn(unconditionalBranchRegister(BranchType_JMP, rn)); | |
987 | } | |
988 | ||
989 | ALWAYS_INLINE void brk(uint16_t imm) | |
990 | { | |
991 | insn(excepnGeneration(ExcepnOp_BREAKPOINT, imm, 0)); | |
992 | } | |
993 | ||
994 | template<int datasize> | |
995 | ALWAYS_INLINE void cbnz(RegisterID rt, int32_t offset = 0) | |
996 | { | |
997 | CHECK_DATASIZE(); | |
998 | ASSERT(!(offset & 3)); | |
999 | offset >>= 2; | |
1000 | insn(compareAndBranchImmediate(DATASIZE, true, offset, rt)); | |
1001 | } | |
1002 | ||
1003 | template<int datasize> | |
1004 | ALWAYS_INLINE void cbz(RegisterID rt, int32_t offset = 0) | |
1005 | { | |
1006 | CHECK_DATASIZE(); | |
1007 | ASSERT(!(offset & 3)); | |
1008 | offset >>= 2; | |
1009 | insn(compareAndBranchImmediate(DATASIZE, false, offset, rt)); | |
1010 | } | |
1011 | ||
1012 | template<int datasize> | |
1013 | ALWAYS_INLINE void ccmn(RegisterID rn, RegisterID rm, int nzcv, Condition cond) | |
1014 | { | |
1015 | CHECK_DATASIZE(); | |
1016 | insn(conditionalCompareRegister(DATASIZE, AddOp_ADD, rm, cond, rn, nzcv)); | |
1017 | } | |
1018 | ||
1019 | template<int datasize> | |
1020 | ALWAYS_INLINE void ccmn(RegisterID rn, UInt5 imm, int nzcv, Condition cond) | |
1021 | { | |
1022 | CHECK_DATASIZE(); | |
1023 | insn(conditionalCompareImmediate(DATASIZE, AddOp_ADD, imm, cond, rn, nzcv)); | |
1024 | } | |
1025 | ||
1026 | template<int datasize> | |
1027 | ALWAYS_INLINE void ccmp(RegisterID rn, RegisterID rm, int nzcv, Condition cond) | |
1028 | { | |
1029 | CHECK_DATASIZE(); | |
1030 | insn(conditionalCompareRegister(DATASIZE, AddOp_SUB, rm, cond, rn, nzcv)); | |
1031 | } | |
1032 | ||
1033 | template<int datasize> | |
1034 | ALWAYS_INLINE void ccmp(RegisterID rn, UInt5 imm, int nzcv, Condition cond) | |
1035 | { | |
1036 | CHECK_DATASIZE(); | |
1037 | insn(conditionalCompareImmediate(DATASIZE, AddOp_SUB, imm, cond, rn, nzcv)); | |
1038 | } | |
1039 | ||
1040 | template<int datasize> | |
1041 | ALWAYS_INLINE void cinc(RegisterID rd, RegisterID rn, Condition cond) | |
1042 | { | |
1043 | csinc<datasize>(rd, rn, rn, invert(cond)); | |
1044 | } | |
1045 | ||
1046 | template<int datasize> | |
1047 | ALWAYS_INLINE void cinv(RegisterID rd, RegisterID rn, Condition cond) | |
1048 | { | |
1049 | csinv<datasize>(rd, rn, rn, invert(cond)); | |
1050 | } | |
1051 | ||
1052 | template<int datasize> | |
1053 | ALWAYS_INLINE void cls(RegisterID rd, RegisterID rn) | |
1054 | { | |
1055 | CHECK_DATASIZE(); | |
1056 | insn(dataProcessing1Source(DATASIZE, DataOp_CLS, rn, rd)); | |
1057 | } | |
1058 | ||
1059 | template<int datasize> | |
1060 | ALWAYS_INLINE void clz(RegisterID rd, RegisterID rn) | |
1061 | { | |
1062 | CHECK_DATASIZE(); | |
1063 | insn(dataProcessing1Source(DATASIZE, DataOp_CLZ, rn, rd)); | |
1064 | } | |
1065 | ||
1066 | template<int datasize> | |
1067 | ALWAYS_INLINE void cmn(RegisterID rn, UInt12 imm12, int shift = 0) | |
1068 | { | |
1069 | add<datasize, S>(ARM64Registers::zr, rn, imm12, shift); | |
1070 | } | |
1071 | ||
1072 | template<int datasize> | |
1073 | ALWAYS_INLINE void cmn(RegisterID rn, RegisterID rm) | |
1074 | { | |
1075 | add<datasize, S>(ARM64Registers::zr, rn, rm); | |
1076 | } | |
1077 | ||
1078 | template<int datasize> | |
1079 | ALWAYS_INLINE void cmn(RegisterID rn, RegisterID rm, ExtendType extend, int amount) | |
1080 | { | |
1081 | add<datasize, S>(ARM64Registers::zr, rn, rm, extend, amount); | |
1082 | } | |
1083 | ||
1084 | template<int datasize> | |
1085 | ALWAYS_INLINE void cmn(RegisterID rn, RegisterID rm, ShiftType shift, int amount) | |
1086 | { | |
1087 | add<datasize, S>(ARM64Registers::zr, rn, rm, shift, amount); | |
1088 | } | |
1089 | ||
1090 | template<int datasize> | |
1091 | ALWAYS_INLINE void cmp(RegisterID rn, UInt12 imm12, int shift = 0) | |
1092 | { | |
1093 | sub<datasize, S>(ARM64Registers::zr, rn, imm12, shift); | |
1094 | } | |
1095 | ||
1096 | template<int datasize> | |
1097 | ALWAYS_INLINE void cmp(RegisterID rn, RegisterID rm) | |
1098 | { | |
1099 | sub<datasize, S>(ARM64Registers::zr, rn, rm); | |
1100 | } | |
1101 | ||
1102 | template<int datasize> | |
1103 | ALWAYS_INLINE void cmp(RegisterID rn, RegisterID rm, ExtendType extend, int amount) | |
1104 | { | |
1105 | sub<datasize, S>(ARM64Registers::zr, rn, rm, extend, amount); | |
1106 | } | |
1107 | ||
1108 | template<int datasize> | |
1109 | ALWAYS_INLINE void cmp(RegisterID rn, RegisterID rm, ShiftType shift, int amount) | |
1110 | { | |
1111 | sub<datasize, S>(ARM64Registers::zr, rn, rm, shift, amount); | |
1112 | } | |
1113 | ||
1114 | template<int datasize> | |
1115 | ALWAYS_INLINE void cneg(RegisterID rd, RegisterID rn, Condition cond) | |
1116 | { | |
1117 | csneg<datasize>(rd, rn, rn, invert(cond)); | |
1118 | } | |
1119 | ||
1120 | template<int datasize> | |
1121 | ALWAYS_INLINE void csel(RegisterID rd, RegisterID rn, RegisterID rm, Condition cond) | |
1122 | { | |
1123 | CHECK_DATASIZE(); | |
1124 | insn(conditionalSelect(DATASIZE, false, rm, cond, false, rn, rd)); | |
1125 | } | |
1126 | ||
1127 | template<int datasize> | |
1128 | ALWAYS_INLINE void cset(RegisterID rd, Condition cond) | |
1129 | { | |
1130 | csinc<datasize>(rd, ARM64Registers::zr, ARM64Registers::zr, invert(cond)); | |
1131 | } | |
1132 | ||
1133 | template<int datasize> | |
1134 | ALWAYS_INLINE void csetm(RegisterID rd, Condition cond) | |
1135 | { | |
1136 | csinv<datasize>(rd, ARM64Registers::zr, ARM64Registers::zr, invert(cond)); | |
1137 | } | |
1138 | ||
1139 | template<int datasize> | |
1140 | ALWAYS_INLINE void csinc(RegisterID rd, RegisterID rn, RegisterID rm, Condition cond) | |
1141 | { | |
1142 | CHECK_DATASIZE(); | |
1143 | insn(conditionalSelect(DATASIZE, false, rm, cond, true, rn, rd)); | |
1144 | } | |
1145 | ||
1146 | template<int datasize> | |
1147 | ALWAYS_INLINE void csinv(RegisterID rd, RegisterID rn, RegisterID rm, Condition cond) | |
1148 | { | |
1149 | CHECK_DATASIZE(); | |
1150 | insn(conditionalSelect(DATASIZE, true, rm, cond, false, rn, rd)); | |
1151 | } | |
1152 | ||
1153 | template<int datasize> | |
1154 | ALWAYS_INLINE void csneg(RegisterID rd, RegisterID rn, RegisterID rm, Condition cond) | |
1155 | { | |
1156 | CHECK_DATASIZE(); | |
1157 | insn(conditionalSelect(DATASIZE, true, rm, cond, true, rn, rd)); | |
1158 | } | |
1159 | ||
1160 | template<int datasize> | |
1161 | ALWAYS_INLINE void eon(RegisterID rd, RegisterID rn, RegisterID rm) | |
1162 | { | |
1163 | eon<datasize>(rd, rn, rm, LSL, 0); | |
1164 | } | |
1165 | ||
1166 | template<int datasize> | |
1167 | ALWAYS_INLINE void eon(RegisterID rd, RegisterID rn, RegisterID rm, ShiftType shift, int amount) | |
1168 | { | |
1169 | CHECK_DATASIZE(); | |
1170 | insn(logicalShiftedRegister(DATASIZE, LogicalOp_EOR, shift, true, rm, amount, rn, rd)); | |
1171 | } | |
1172 | ||
1173 | template<int datasize> | |
1174 | ALWAYS_INLINE void eor(RegisterID rd, RegisterID rn, RegisterID rm) | |
1175 | { | |
1176 | eor<datasize>(rd, rn, rm, LSL, 0); | |
1177 | } | |
1178 | ||
1179 | template<int datasize> | |
1180 | ALWAYS_INLINE void eor(RegisterID rd, RegisterID rn, RegisterID rm, ShiftType shift, int amount) | |
1181 | { | |
1182 | CHECK_DATASIZE(); | |
1183 | insn(logicalShiftedRegister(DATASIZE, LogicalOp_EOR, shift, false, rm, amount, rn, rd)); | |
1184 | } | |
1185 | ||
1186 | template<int datasize> | |
1187 | ALWAYS_INLINE void eor(RegisterID rd, RegisterID rn, LogicalImmediate imm) | |
1188 | { | |
1189 | CHECK_DATASIZE(); | |
1190 | insn(logicalImmediate(DATASIZE, LogicalOp_EOR, imm.value(), rn, rd)); | |
1191 | } | |
1192 | ||
1193 | template<int datasize> | |
1194 | ALWAYS_INLINE void extr(RegisterID rd, RegisterID rn, RegisterID rm, int lsb) | |
1195 | { | |
1196 | CHECK_DATASIZE(); | |
1197 | insn(extract(DATASIZE, rm, lsb, rn, rd)); | |
1198 | } | |
1199 | ||
1200 | ALWAYS_INLINE void hint(int imm) | |
1201 | { | |
1202 | insn(hintPseudo(imm)); | |
1203 | } | |
1204 | ||
1205 | ALWAYS_INLINE void hlt(uint16_t imm) | |
1206 | { | |
1207 | insn(excepnGeneration(ExcepnOp_HALT, imm, 0)); | |
1208 | } | |
1209 | ||
1210 | template<int datasize> | |
1211 | ALWAYS_INLINE void ldr(RegisterID rt, RegisterID rn, RegisterID rm) | |
1212 | { | |
1213 | ldr<datasize>(rt, rn, rm, UXTX, 0); | |
1214 | } | |
1215 | ||
1216 | template<int datasize> | |
1217 | ALWAYS_INLINE void ldr(RegisterID rt, RegisterID rn, RegisterID rm, ExtendType extend, int amount) | |
1218 | { | |
1219 | CHECK_DATASIZE(); | |
1220 | insn(loadStoreRegisterRegisterOffset(MEMOPSIZE, false, MemOp_LOAD, rm, extend, encodeShiftAmount<datasize>(amount), rn, rt)); | |
1221 | } | |
1222 | ||
1223 | template<int datasize> | |
1224 | ALWAYS_INLINE void ldr(RegisterID rt, RegisterID rn, unsigned pimm) | |
1225 | { | |
1226 | CHECK_DATASIZE(); | |
1227 | insn(loadStoreRegisterUnsignedImmediate(MEMOPSIZE, false, MemOp_LOAD, encodePositiveImmediate<datasize>(pimm), rn, rt)); | |
1228 | } | |
1229 | ||
1230 | template<int datasize> | |
1231 | ALWAYS_INLINE void ldr(RegisterID rt, RegisterID rn, PostIndex simm) | |
1232 | { | |
1233 | CHECK_DATASIZE(); | |
1234 | insn(loadStoreRegisterPostIndex(MEMOPSIZE, false, MemOp_LOAD, simm, rn, rt)); | |
1235 | } | |
1236 | ||
1237 | template<int datasize> | |
1238 | ALWAYS_INLINE void ldr(RegisterID rt, RegisterID rn, PreIndex simm) | |
1239 | { | |
1240 | CHECK_DATASIZE(); | |
1241 | insn(loadStoreRegisterPreIndex(MEMOPSIZE, false, MemOp_LOAD, simm, rn, rt)); | |
1242 | } | |
1243 | ||
1244 | template<int datasize> | |
1245 | ALWAYS_INLINE void ldr_literal(RegisterID rt, int offset = 0) | |
1246 | { | |
1247 | CHECK_DATASIZE(); | |
1248 | ASSERT(!(offset & 3)); | |
1249 | insn(loadRegisterLiteral(datasize == 64 ? LdrLiteralOp_64BIT : LdrLiteralOp_32BIT, false, offset >> 2, rt)); | |
1250 | } | |
1251 | ||
1252 | ALWAYS_INLINE void ldrb(RegisterID rt, RegisterID rn, RegisterID rm) | |
1253 | { | |
1254 | // Not calling the 5 argument form of ldrb, since is amount is ommitted S is false. | |
1255 | insn(loadStoreRegisterRegisterOffset(MemOpSize_8_or_128, false, MemOp_LOAD, rm, UXTX, false, rn, rt)); | |
1256 | } | |
1257 | ||
1258 | ALWAYS_INLINE void ldrb(RegisterID rt, RegisterID rn, RegisterID rm, ExtendType extend, int amount) | |
1259 | { | |
1260 | ASSERT_UNUSED(amount, !amount); | |
1261 | insn(loadStoreRegisterRegisterOffset(MemOpSize_8_or_128, false, MemOp_LOAD, rm, extend, true, rn, rt)); | |
1262 | } | |
1263 | ||
1264 | ALWAYS_INLINE void ldrb(RegisterID rt, RegisterID rn, unsigned pimm) | |
1265 | { | |
1266 | insn(loadStoreRegisterUnsignedImmediate(MemOpSize_8_or_128, false, MemOp_LOAD, encodePositiveImmediate<8>(pimm), rn, rt)); | |
1267 | } | |
1268 | ||
1269 | ALWAYS_INLINE void ldrb(RegisterID rt, RegisterID rn, PostIndex simm) | |
1270 | { | |
1271 | insn(loadStoreRegisterPostIndex(MemOpSize_8_or_128, false, MemOp_LOAD, simm, rn, rt)); | |
1272 | } | |
1273 | ||
1274 | ALWAYS_INLINE void ldrb(RegisterID rt, RegisterID rn, PreIndex simm) | |
1275 | { | |
1276 | insn(loadStoreRegisterPreIndex(MemOpSize_8_or_128, false, MemOp_LOAD, simm, rn, rt)); | |
1277 | } | |
1278 | ||
1279 | ALWAYS_INLINE void ldrh(RegisterID rt, RegisterID rn, RegisterID rm) | |
1280 | { | |
1281 | ldrh(rt, rn, rm, UXTX, 0); | |
1282 | } | |
1283 | ||
1284 | ALWAYS_INLINE void ldrh(RegisterID rt, RegisterID rn, RegisterID rm, ExtendType extend, int amount) | |
1285 | { | |
1286 | ASSERT(!amount || amount == 1); | |
1287 | insn(loadStoreRegisterRegisterOffset(MemOpSize_16, false, MemOp_LOAD, rm, extend, amount == 1, rn, rt)); | |
1288 | } | |
1289 | ||
1290 | ALWAYS_INLINE void ldrh(RegisterID rt, RegisterID rn, unsigned pimm) | |
1291 | { | |
1292 | insn(loadStoreRegisterUnsignedImmediate(MemOpSize_16, false, MemOp_LOAD, encodePositiveImmediate<16>(pimm), rn, rt)); | |
1293 | } | |
1294 | ||
1295 | ALWAYS_INLINE void ldrh(RegisterID rt, RegisterID rn, PostIndex simm) | |
1296 | { | |
1297 | insn(loadStoreRegisterPostIndex(MemOpSize_16, false, MemOp_LOAD, simm, rn, rt)); | |
1298 | } | |
1299 | ||
1300 | ALWAYS_INLINE void ldrh(RegisterID rt, RegisterID rn, PreIndex simm) | |
1301 | { | |
1302 | insn(loadStoreRegisterPreIndex(MemOpSize_16, false, MemOp_LOAD, simm, rn, rt)); | |
1303 | } | |
1304 | ||
1305 | template<int datasize> | |
1306 | ALWAYS_INLINE void ldrsb(RegisterID rt, RegisterID rn, RegisterID rm) | |
1307 | { | |
1308 | CHECK_DATASIZE(); | |
1309 | // Not calling the 5 argument form of ldrsb, since is amount is ommitted S is false. | |
1310 | insn(loadStoreRegisterRegisterOffset(MemOpSize_8_or_128, false, (datasize == 64) ? MemOp_LOAD_signed64 : MemOp_LOAD_signed32, rm, UXTX, false, rn, rt)); | |
1311 | } | |
1312 | ||
1313 | template<int datasize> | |
1314 | ALWAYS_INLINE void ldrsb(RegisterID rt, RegisterID rn, RegisterID rm, ExtendType extend, int amount) | |
1315 | { | |
1316 | CHECK_DATASIZE(); | |
1317 | ASSERT_UNUSED(amount, !amount); | |
1318 | insn(loadStoreRegisterRegisterOffset(MemOpSize_8_or_128, false, (datasize == 64) ? MemOp_LOAD_signed64 : MemOp_LOAD_signed32, rm, extend, true, rn, rt)); | |
1319 | } | |
1320 | ||
1321 | template<int datasize> | |
1322 | ALWAYS_INLINE void ldrsb(RegisterID rt, RegisterID rn, unsigned pimm) | |
1323 | { | |
1324 | CHECK_DATASIZE(); | |
1325 | insn(loadStoreRegisterUnsignedImmediate(MemOpSize_8_or_128, false, (datasize == 64) ? MemOp_LOAD_signed64 : MemOp_LOAD_signed32, encodePositiveImmediate<8>(pimm), rn, rt)); | |
1326 | } | |
1327 | ||
1328 | template<int datasize> | |
1329 | ALWAYS_INLINE void ldrsb(RegisterID rt, RegisterID rn, PostIndex simm) | |
1330 | { | |
1331 | CHECK_DATASIZE(); | |
1332 | insn(loadStoreRegisterPostIndex(MemOpSize_8_or_128, false, (datasize == 64) ? MemOp_LOAD_signed64 : MemOp_LOAD_signed32, simm, rn, rt)); | |
1333 | } | |
1334 | ||
1335 | template<int datasize> | |
1336 | ALWAYS_INLINE void ldrsb(RegisterID rt, RegisterID rn, PreIndex simm) | |
1337 | { | |
1338 | CHECK_DATASIZE(); | |
1339 | insn(loadStoreRegisterPreIndex(MemOpSize_8_or_128, false, (datasize == 64) ? MemOp_LOAD_signed64 : MemOp_LOAD_signed32, simm, rn, rt)); | |
1340 | } | |
1341 | ||
1342 | template<int datasize> | |
1343 | ALWAYS_INLINE void ldrsh(RegisterID rt, RegisterID rn, RegisterID rm) | |
1344 | { | |
1345 | ldrsh<datasize>(rt, rn, rm, UXTX, 0); | |
1346 | } | |
1347 | ||
1348 | template<int datasize> | |
1349 | ALWAYS_INLINE void ldrsh(RegisterID rt, RegisterID rn, RegisterID rm, ExtendType extend, int amount) | |
1350 | { | |
1351 | CHECK_DATASIZE(); | |
1352 | ASSERT(!amount || amount == 1); | |
1353 | insn(loadStoreRegisterRegisterOffset(MemOpSize_16, false, (datasize == 64) ? MemOp_LOAD_signed64 : MemOp_LOAD_signed32, rm, extend, amount == 1, rn, rt)); | |
1354 | } | |
1355 | ||
1356 | template<int datasize> | |
1357 | ALWAYS_INLINE void ldrsh(RegisterID rt, RegisterID rn, unsigned pimm) | |
1358 | { | |
1359 | CHECK_DATASIZE(); | |
1360 | insn(loadStoreRegisterUnsignedImmediate(MemOpSize_16, false, (datasize == 64) ? MemOp_LOAD_signed64 : MemOp_LOAD_signed32, encodePositiveImmediate<16>(pimm), rn, rt)); | |
1361 | } | |
1362 | ||
1363 | template<int datasize> | |
1364 | ALWAYS_INLINE void ldrsh(RegisterID rt, RegisterID rn, PostIndex simm) | |
1365 | { | |
1366 | CHECK_DATASIZE(); | |
1367 | insn(loadStoreRegisterPostIndex(MemOpSize_16, false, (datasize == 64) ? MemOp_LOAD_signed64 : MemOp_LOAD_signed32, simm, rn, rt)); | |
1368 | } | |
1369 | ||
1370 | template<int datasize> | |
1371 | ALWAYS_INLINE void ldrsh(RegisterID rt, RegisterID rn, PreIndex simm) | |
1372 | { | |
1373 | CHECK_DATASIZE(); | |
1374 | insn(loadStoreRegisterPreIndex(MemOpSize_16, false, (datasize == 64) ? MemOp_LOAD_signed64 : MemOp_LOAD_signed32, simm, rn, rt)); | |
1375 | } | |
1376 | ||
1377 | ALWAYS_INLINE void ldrsw(RegisterID rt, RegisterID rn, RegisterID rm) | |
1378 | { | |
1379 | ldrsw(rt, rn, rm, UXTX, 0); | |
1380 | } | |
1381 | ||
1382 | ALWAYS_INLINE void ldrsw(RegisterID rt, RegisterID rn, RegisterID rm, ExtendType extend, int amount) | |
1383 | { | |
1384 | ASSERT(!amount || amount == 2); | |
1385 | insn(loadStoreRegisterRegisterOffset(MemOpSize_32, false, MemOp_LOAD_signed64, rm, extend, amount == 2, rn, rt)); | |
1386 | } | |
1387 | ||
1388 | ALWAYS_INLINE void ldrsw(RegisterID rt, RegisterID rn, unsigned pimm) | |
1389 | { | |
1390 | insn(loadStoreRegisterUnsignedImmediate(MemOpSize_32, false, MemOp_LOAD_signed64, encodePositiveImmediate<32>(pimm), rn, rt)); | |
1391 | } | |
1392 | ||
1393 | ALWAYS_INLINE void ldrsw(RegisterID rt, RegisterID rn, PostIndex simm) | |
1394 | { | |
1395 | insn(loadStoreRegisterPostIndex(MemOpSize_32, false, MemOp_LOAD_signed64, simm, rn, rt)); | |
1396 | } | |
1397 | ||
1398 | ALWAYS_INLINE void ldrsw(RegisterID rt, RegisterID rn, PreIndex simm) | |
1399 | { | |
1400 | insn(loadStoreRegisterPreIndex(MemOpSize_32, false, MemOp_LOAD_signed64, simm, rn, rt)); | |
1401 | } | |
1402 | ||
1403 | ALWAYS_INLINE void ldrsw_literal(RegisterID rt, int offset = 0) | |
1404 | { | |
1405 | ASSERT(!(offset & 3)); | |
1406 | insn(loadRegisterLiteral(LdrLiteralOp_LDRSW, false, offset >> 2, rt)); | |
1407 | } | |
1408 | ||
1409 | template<int datasize> | |
1410 | ALWAYS_INLINE void ldur(RegisterID rt, RegisterID rn, int simm) | |
1411 | { | |
1412 | CHECK_DATASIZE(); | |
1413 | insn(loadStoreRegisterUnscaledImmediate(MEMOPSIZE, false, MemOp_LOAD, simm, rn, rt)); | |
1414 | } | |
1415 | ||
1416 | ALWAYS_INLINE void ldurb(RegisterID rt, RegisterID rn, int simm) | |
1417 | { | |
1418 | insn(loadStoreRegisterUnscaledImmediate(MemOpSize_8_or_128, false, MemOp_LOAD, simm, rn, rt)); | |
1419 | } | |
1420 | ||
1421 | ALWAYS_INLINE void ldurh(RegisterID rt, RegisterID rn, int simm) | |
1422 | { | |
1423 | insn(loadStoreRegisterUnscaledImmediate(MemOpSize_16, false, MemOp_LOAD, simm, rn, rt)); | |
1424 | } | |
1425 | ||
1426 | template<int datasize> | |
1427 | ALWAYS_INLINE void ldursb(RegisterID rt, RegisterID rn, int simm) | |
1428 | { | |
1429 | CHECK_DATASIZE(); | |
1430 | insn(loadStoreRegisterUnscaledImmediate(MemOpSize_8_or_128, false, (datasize == 64) ? MemOp_LOAD_signed64 : MemOp_LOAD_signed32, simm, rn, rt)); | |
1431 | } | |
1432 | ||
1433 | template<int datasize> | |
1434 | ALWAYS_INLINE void ldursh(RegisterID rt, RegisterID rn, int simm) | |
1435 | { | |
1436 | CHECK_DATASIZE(); | |
1437 | insn(loadStoreRegisterUnscaledImmediate(MemOpSize_16, false, (datasize == 64) ? MemOp_LOAD_signed64 : MemOp_LOAD_signed32, simm, rn, rt)); | |
1438 | } | |
1439 | ||
1440 | ALWAYS_INLINE void ldursw(RegisterID rt, RegisterID rn, int simm) | |
1441 | { | |
1442 | insn(loadStoreRegisterUnscaledImmediate(MemOpSize_32, false, MemOp_LOAD_signed64, simm, rn, rt)); | |
1443 | } | |
1444 | ||
1445 | template<int datasize> | |
1446 | ALWAYS_INLINE void lsl(RegisterID rd, RegisterID rn, int shift) | |
1447 | { | |
1448 | ASSERT(shift < datasize); | |
1449 | ubfm<datasize>(rd, rn, (datasize - shift) & (datasize - 1), datasize - 1 - shift); | |
1450 | } | |
1451 | ||
1452 | template<int datasize> | |
1453 | ALWAYS_INLINE void lsl(RegisterID rd, RegisterID rn, RegisterID rm) | |
1454 | { | |
1455 | lslv<datasize>(rd, rn, rm); | |
1456 | } | |
1457 | ||
1458 | template<int datasize> | |
1459 | ALWAYS_INLINE void lslv(RegisterID rd, RegisterID rn, RegisterID rm) | |
1460 | { | |
1461 | CHECK_DATASIZE(); | |
1462 | insn(dataProcessing2Source(DATASIZE, rm, DataOp_LSLV, rn, rd)); | |
1463 | } | |
1464 | ||
1465 | template<int datasize> | |
1466 | ALWAYS_INLINE void lsr(RegisterID rd, RegisterID rn, int shift) | |
1467 | { | |
1468 | ASSERT(shift < datasize); | |
1469 | ubfm<datasize>(rd, rn, shift, datasize - 1); | |
1470 | } | |
1471 | ||
1472 | template<int datasize> | |
1473 | ALWAYS_INLINE void lsr(RegisterID rd, RegisterID rn, RegisterID rm) | |
1474 | { | |
1475 | lsrv<datasize>(rd, rn, rm); | |
1476 | } | |
1477 | ||
1478 | template<int datasize> | |
1479 | ALWAYS_INLINE void lsrv(RegisterID rd, RegisterID rn, RegisterID rm) | |
1480 | { | |
1481 | CHECK_DATASIZE(); | |
1482 | insn(dataProcessing2Source(DATASIZE, rm, DataOp_LSRV, rn, rd)); | |
1483 | } | |
1484 | ||
1485 | template<int datasize> | |
1486 | ALWAYS_INLINE void madd(RegisterID rd, RegisterID rn, RegisterID rm, RegisterID ra) | |
1487 | { | |
1488 | CHECK_DATASIZE(); | |
1489 | insn(dataProcessing3Source(DATASIZE, DataOp_MADD, rm, ra, rn, rd)); | |
1490 | } | |
1491 | ||
1492 | template<int datasize> | |
1493 | ALWAYS_INLINE void mneg(RegisterID rd, RegisterID rn, RegisterID rm) | |
1494 | { | |
1495 | msub<datasize>(rd, rn, rm, ARM64Registers::zr); | |
1496 | } | |
1497 | ||
1498 | template<int datasize> | |
1499 | ALWAYS_INLINE void mov(RegisterID rd, RegisterID rm) | |
1500 | { | |
1501 | if (isSp(rd) || isSp(rm)) | |
1502 | add<datasize>(rd, rm, UInt12(0)); | |
1503 | else | |
1504 | orr<datasize>(rd, ARM64Registers::zr, rm); | |
1505 | } | |
1506 | ||
1507 | template<int datasize> | |
1508 | ALWAYS_INLINE void movi(RegisterID rd, LogicalImmediate imm) | |
1509 | { | |
1510 | orr<datasize>(rd, ARM64Registers::zr, imm); | |
1511 | } | |
1512 | ||
1513 | template<int datasize> | |
1514 | ALWAYS_INLINE void movk(RegisterID rd, uint16_t value, int shift = 0) | |
1515 | { | |
1516 | CHECK_DATASIZE(); | |
1517 | ASSERT(!(shift & 0xf)); | |
1518 | insn(moveWideImediate(DATASIZE, MoveWideOp_K, shift >> 4, value, rd)); | |
1519 | } | |
1520 | ||
1521 | template<int datasize> | |
1522 | ALWAYS_INLINE void movn(RegisterID rd, uint16_t value, int shift = 0) | |
1523 | { | |
1524 | CHECK_DATASIZE(); | |
1525 | ASSERT(!(shift & 0xf)); | |
1526 | insn(moveWideImediate(DATASIZE, MoveWideOp_N, shift >> 4, value, rd)); | |
1527 | } | |
1528 | ||
1529 | template<int datasize> | |
1530 | ALWAYS_INLINE void movz(RegisterID rd, uint16_t value, int shift = 0) | |
1531 | { | |
1532 | CHECK_DATASIZE(); | |
1533 | ASSERT(!(shift & 0xf)); | |
1534 | insn(moveWideImediate(DATASIZE, MoveWideOp_Z, shift >> 4, value, rd)); | |
1535 | } | |
1536 | ||
1537 | template<int datasize> | |
1538 | ALWAYS_INLINE void msub(RegisterID rd, RegisterID rn, RegisterID rm, RegisterID ra) | |
1539 | { | |
1540 | CHECK_DATASIZE(); | |
1541 | insn(dataProcessing3Source(DATASIZE, DataOp_MSUB, rm, ra, rn, rd)); | |
1542 | } | |
1543 | ||
1544 | template<int datasize> | |
1545 | ALWAYS_INLINE void mul(RegisterID rd, RegisterID rn, RegisterID rm) | |
1546 | { | |
1547 | madd<datasize>(rd, rn, rm, ARM64Registers::zr); | |
1548 | } | |
1549 | ||
1550 | template<int datasize> | |
1551 | ALWAYS_INLINE void mvn(RegisterID rd, RegisterID rm) | |
1552 | { | |
1553 | orn<datasize>(rd, ARM64Registers::zr, rm); | |
1554 | } | |
1555 | ||
1556 | template<int datasize> | |
1557 | ALWAYS_INLINE void mvn(RegisterID rd, RegisterID rm, ShiftType shift, int amount) | |
1558 | { | |
1559 | orn<datasize>(rd, ARM64Registers::zr, rm, shift, amount); | |
1560 | } | |
1561 | ||
1562 | template<int datasize, SetFlags setFlags = DontSetFlags> | |
1563 | ALWAYS_INLINE void neg(RegisterID rd, RegisterID rm) | |
1564 | { | |
1565 | sub<datasize, setFlags>(rd, ARM64Registers::zr, rm); | |
1566 | } | |
1567 | ||
1568 | template<int datasize, SetFlags setFlags = DontSetFlags> | |
1569 | ALWAYS_INLINE void neg(RegisterID rd, RegisterID rm, ShiftType shift, int amount) | |
1570 | { | |
1571 | sub<datasize, setFlags>(rd, ARM64Registers::zr, rm, shift, amount); | |
1572 | } | |
1573 | ||
1574 | template<int datasize, SetFlags setFlags = DontSetFlags> | |
1575 | ALWAYS_INLINE void ngc(RegisterID rd, RegisterID rm) | |
1576 | { | |
1577 | sbc<datasize, setFlags>(rd, ARM64Registers::zr, rm); | |
1578 | } | |
1579 | ||
1580 | template<int datasize, SetFlags setFlags = DontSetFlags> | |
1581 | ALWAYS_INLINE void ngc(RegisterID rd, RegisterID rm, ShiftType shift, int amount) | |
1582 | { | |
1583 | sbc<datasize, setFlags>(rd, ARM64Registers::zr, rm, shift, amount); | |
1584 | } | |
1585 | ||
1586 | ALWAYS_INLINE void nop() | |
1587 | { | |
1588 | insn(nopPseudo()); | |
1589 | } | |
1590 | ||
1591 | template<int datasize> | |
1592 | ALWAYS_INLINE void orn(RegisterID rd, RegisterID rn, RegisterID rm) | |
1593 | { | |
1594 | orn<datasize>(rd, rn, rm, LSL, 0); | |
1595 | } | |
1596 | ||
1597 | template<int datasize> | |
1598 | ALWAYS_INLINE void orn(RegisterID rd, RegisterID rn, RegisterID rm, ShiftType shift, int amount) | |
1599 | { | |
1600 | CHECK_DATASIZE(); | |
1601 | insn(logicalShiftedRegister(DATASIZE, LogicalOp_ORR, shift, true, rm, amount, rn, rd)); | |
1602 | } | |
1603 | ||
1604 | template<int datasize> | |
1605 | ALWAYS_INLINE void orr(RegisterID rd, RegisterID rn, RegisterID rm) | |
1606 | { | |
1607 | orr<datasize>(rd, rn, rm, LSL, 0); | |
1608 | } | |
1609 | ||
1610 | template<int datasize> | |
1611 | ALWAYS_INLINE void orr(RegisterID rd, RegisterID rn, RegisterID rm, ShiftType shift, int amount) | |
1612 | { | |
1613 | CHECK_DATASIZE(); | |
1614 | insn(logicalShiftedRegister(DATASIZE, LogicalOp_ORR, shift, false, rm, amount, rn, rd)); | |
1615 | } | |
1616 | ||
1617 | template<int datasize> | |
1618 | ALWAYS_INLINE void orr(RegisterID rd, RegisterID rn, LogicalImmediate imm) | |
1619 | { | |
1620 | CHECK_DATASIZE(); | |
1621 | insn(logicalImmediate(DATASIZE, LogicalOp_ORR, imm.value(), rn, rd)); | |
1622 | } | |
1623 | ||
1624 | template<int datasize> | |
1625 | ALWAYS_INLINE void rbit(RegisterID rd, RegisterID rn) | |
1626 | { | |
1627 | CHECK_DATASIZE(); | |
1628 | insn(dataProcessing1Source(DATASIZE, DataOp_RBIT, rn, rd)); | |
1629 | } | |
1630 | ||
1631 | ALWAYS_INLINE void ret(RegisterID rn = ARM64Registers::lr) | |
1632 | { | |
1633 | insn(unconditionalBranchRegister(BranchType_RET, rn)); | |
1634 | } | |
1635 | ||
1636 | template<int datasize> | |
1637 | ALWAYS_INLINE void rev(RegisterID rd, RegisterID rn) | |
1638 | { | |
1639 | CHECK_DATASIZE(); | |
1640 | if (datasize == 32) // 'rev' mnemonic means REV32 or REV64 depending on the operand width. | |
1641 | insn(dataProcessing1Source(Datasize_32, DataOp_REV32, rn, rd)); | |
1642 | else | |
1643 | insn(dataProcessing1Source(Datasize_64, DataOp_REV64, rn, rd)); | |
1644 | } | |
1645 | ||
1646 | template<int datasize> | |
1647 | ALWAYS_INLINE void rev16(RegisterID rd, RegisterID rn) | |
1648 | { | |
1649 | CHECK_DATASIZE(); | |
1650 | insn(dataProcessing1Source(DATASIZE, DataOp_REV16, rn, rd)); | |
1651 | } | |
1652 | ||
1653 | template<int datasize> | |
1654 | ALWAYS_INLINE void rev32(RegisterID rd, RegisterID rn) | |
1655 | { | |
1656 | ASSERT(datasize == 64); // 'rev32' only valid with 64-bit operands. | |
1657 | insn(dataProcessing1Source(Datasize_64, DataOp_REV32, rn, rd)); | |
1658 | } | |
1659 | ||
1660 | template<int datasize> | |
1661 | ALWAYS_INLINE void ror(RegisterID rd, RegisterID rn, RegisterID rm) | |
1662 | { | |
1663 | rorv<datasize>(rd, rn, rm); | |
1664 | } | |
1665 | ||
1666 | template<int datasize> | |
1667 | ALWAYS_INLINE void ror(RegisterID rd, RegisterID rs, int shift) | |
1668 | { | |
1669 | extr<datasize>(rd, rs, rs, shift); | |
1670 | } | |
1671 | ||
1672 | template<int datasize> | |
1673 | ALWAYS_INLINE void rorv(RegisterID rd, RegisterID rn, RegisterID rm) | |
1674 | { | |
1675 | CHECK_DATASIZE(); | |
1676 | insn(dataProcessing2Source(DATASIZE, rm, DataOp_RORV, rn, rd)); | |
1677 | } | |
1678 | ||
1679 | template<int datasize, SetFlags setFlags = DontSetFlags> | |
1680 | ALWAYS_INLINE void sbc(RegisterID rd, RegisterID rn, RegisterID rm) | |
1681 | { | |
1682 | CHECK_DATASIZE(); | |
1683 | insn(addSubtractWithCarry(DATASIZE, AddOp_SUB, setFlags, rm, rn, rd)); | |
1684 | } | |
1685 | ||
1686 | template<int datasize> | |
1687 | ALWAYS_INLINE void sbfiz(RegisterID rd, RegisterID rn, int lsb, int width) | |
1688 | { | |
1689 | sbfm<datasize>(rd, rn, (datasize - lsb) & (datasize - 1), width - 1); | |
1690 | } | |
1691 | ||
1692 | template<int datasize> | |
1693 | ALWAYS_INLINE void sbfm(RegisterID rd, RegisterID rn, int immr, int imms) | |
1694 | { | |
1695 | CHECK_DATASIZE(); | |
1696 | insn(bitfield(DATASIZE, BitfieldOp_SBFM, immr, imms, rn, rd)); | |
1697 | } | |
1698 | ||
1699 | template<int datasize> | |
1700 | ALWAYS_INLINE void sbfx(RegisterID rd, RegisterID rn, int lsb, int width) | |
1701 | { | |
1702 | sbfm<datasize>(rd, rn, lsb, lsb + width - 1); | |
1703 | } | |
1704 | ||
1705 | template<int datasize> | |
1706 | ALWAYS_INLINE void sdiv(RegisterID rd, RegisterID rn, RegisterID rm) | |
1707 | { | |
1708 | CHECK_DATASIZE(); | |
1709 | insn(dataProcessing2Source(DATASIZE, rm, DataOp_SDIV, rn, rd)); | |
1710 | } | |
1711 | ||
1712 | ALWAYS_INLINE void smaddl(RegisterID rd, RegisterID rn, RegisterID rm, RegisterID ra) | |
1713 | { | |
1714 | insn(dataProcessing3Source(Datasize_64, DataOp_SMADDL, rm, ra, rn, rd)); | |
1715 | } | |
1716 | ||
1717 | ALWAYS_INLINE void smnegl(RegisterID rd, RegisterID rn, RegisterID rm) | |
1718 | { | |
1719 | smsubl(rd, rn, rm, ARM64Registers::zr); | |
1720 | } | |
1721 | ||
1722 | ALWAYS_INLINE void smsubl(RegisterID rd, RegisterID rn, RegisterID rm, RegisterID ra) | |
1723 | { | |
1724 | insn(dataProcessing3Source(Datasize_64, DataOp_SMSUBL, rm, ra, rn, rd)); | |
1725 | } | |
1726 | ||
1727 | ALWAYS_INLINE void smulh(RegisterID rd, RegisterID rn, RegisterID rm) | |
1728 | { | |
1729 | insn(dataProcessing3Source(Datasize_64, DataOp_SMULH, rm, ARM64Registers::zr, rn, rd)); | |
1730 | } | |
1731 | ||
1732 | ALWAYS_INLINE void smull(RegisterID rd, RegisterID rn, RegisterID rm) | |
1733 | { | |
1734 | smaddl(rd, rn, rm, ARM64Registers::zr); | |
1735 | } | |
1736 | ||
1737 | template<int datasize> | |
1738 | ALWAYS_INLINE void str(RegisterID rt, RegisterID rn, RegisterID rm) | |
1739 | { | |
1740 | str<datasize>(rt, rn, rm, UXTX, 0); | |
1741 | } | |
1742 | ||
1743 | template<int datasize> | |
1744 | ALWAYS_INLINE void str(RegisterID rt, RegisterID rn, RegisterID rm, ExtendType extend, int amount) | |
1745 | { | |
1746 | CHECK_DATASIZE(); | |
1747 | insn(loadStoreRegisterRegisterOffset(MEMOPSIZE, false, MemOp_STORE, rm, extend, encodeShiftAmount<datasize>(amount), rn, rt)); | |
1748 | } | |
1749 | ||
1750 | template<int datasize> | |
1751 | ALWAYS_INLINE void str(RegisterID rt, RegisterID rn, unsigned pimm) | |
1752 | { | |
1753 | CHECK_DATASIZE(); | |
1754 | insn(loadStoreRegisterUnsignedImmediate(MEMOPSIZE, false, MemOp_STORE, encodePositiveImmediate<datasize>(pimm), rn, rt)); | |
1755 | } | |
1756 | ||
1757 | template<int datasize> | |
1758 | ALWAYS_INLINE void str(RegisterID rt, RegisterID rn, PostIndex simm) | |
1759 | { | |
1760 | CHECK_DATASIZE(); | |
1761 | insn(loadStoreRegisterPostIndex(MEMOPSIZE, false, MemOp_STORE, simm, rn, rt)); | |
1762 | } | |
1763 | ||
1764 | template<int datasize> | |
1765 | ALWAYS_INLINE void str(RegisterID rt, RegisterID rn, PreIndex simm) | |
1766 | { | |
1767 | CHECK_DATASIZE(); | |
1768 | insn(loadStoreRegisterPreIndex(MEMOPSIZE, false, MemOp_STORE, simm, rn, rt)); | |
1769 | } | |
1770 | ||
1771 | ALWAYS_INLINE void strb(RegisterID rt, RegisterID rn, RegisterID rm) | |
1772 | { | |
1773 | // Not calling the 5 argument form of strb, since is amount is ommitted S is false. | |
1774 | insn(loadStoreRegisterRegisterOffset(MemOpSize_8_or_128, false, MemOp_STORE, rm, UXTX, false, rn, rt)); | |
1775 | } | |
1776 | ||
1777 | ALWAYS_INLINE void strb(RegisterID rt, RegisterID rn, RegisterID rm, ExtendType extend, int amount) | |
1778 | { | |
1779 | ASSERT_UNUSED(amount, !amount); | |
1780 | insn(loadStoreRegisterRegisterOffset(MemOpSize_8_or_128, false, MemOp_STORE, rm, extend, true, rn, rt)); | |
1781 | } | |
1782 | ||
1783 | ALWAYS_INLINE void strb(RegisterID rt, RegisterID rn, unsigned pimm) | |
1784 | { | |
1785 | insn(loadStoreRegisterUnsignedImmediate(MemOpSize_8_or_128, false, MemOp_STORE, encodePositiveImmediate<8>(pimm), rn, rt)); | |
1786 | } | |
1787 | ||
1788 | ALWAYS_INLINE void strb(RegisterID rt, RegisterID rn, PostIndex simm) | |
1789 | { | |
1790 | insn(loadStoreRegisterPostIndex(MemOpSize_8_or_128, false, MemOp_STORE, simm, rn, rt)); | |
1791 | } | |
1792 | ||
1793 | ALWAYS_INLINE void strb(RegisterID rt, RegisterID rn, PreIndex simm) | |
1794 | { | |
1795 | insn(loadStoreRegisterPreIndex(MemOpSize_8_or_128, false, MemOp_STORE, simm, rn, rt)); | |
1796 | } | |
1797 | ||
1798 | ALWAYS_INLINE void strh(RegisterID rt, RegisterID rn, RegisterID rm) | |
1799 | { | |
1800 | strh(rt, rn, rm, UXTX, 0); | |
1801 | } | |
1802 | ||
1803 | ALWAYS_INLINE void strh(RegisterID rt, RegisterID rn, RegisterID rm, ExtendType extend, int amount) | |
1804 | { | |
1805 | ASSERT(!amount || amount == 1); | |
1806 | insn(loadStoreRegisterRegisterOffset(MemOpSize_16, false, MemOp_STORE, rm, extend, amount == 1, rn, rt)); | |
1807 | } | |
1808 | ||
1809 | ALWAYS_INLINE void strh(RegisterID rt, RegisterID rn, unsigned pimm) | |
1810 | { | |
1811 | insn(loadStoreRegisterUnsignedImmediate(MemOpSize_16, false, MemOp_STORE, encodePositiveImmediate<16>(pimm), rn, rt)); | |
1812 | } | |
1813 | ||
1814 | ALWAYS_INLINE void strh(RegisterID rt, RegisterID rn, PostIndex simm) | |
1815 | { | |
1816 | insn(loadStoreRegisterPostIndex(MemOpSize_16, false, MemOp_STORE, simm, rn, rt)); | |
1817 | } | |
1818 | ||
1819 | ALWAYS_INLINE void strh(RegisterID rt, RegisterID rn, PreIndex simm) | |
1820 | { | |
1821 | insn(loadStoreRegisterPreIndex(MemOpSize_16, false, MemOp_STORE, simm, rn, rt)); | |
1822 | } | |
1823 | ||
1824 | template<int datasize> | |
1825 | ALWAYS_INLINE void stur(RegisterID rt, RegisterID rn, int simm) | |
1826 | { | |
1827 | CHECK_DATASIZE(); | |
1828 | insn(loadStoreRegisterUnscaledImmediate(MEMOPSIZE, false, MemOp_STORE, simm, rn, rt)); | |
1829 | } | |
1830 | ||
1831 | ALWAYS_INLINE void sturb(RegisterID rt, RegisterID rn, int simm) | |
1832 | { | |
1833 | insn(loadStoreRegisterUnscaledImmediate(MemOpSize_8_or_128, false, MemOp_STORE, simm, rn, rt)); | |
1834 | } | |
1835 | ||
1836 | ALWAYS_INLINE void sturh(RegisterID rt, RegisterID rn, int simm) | |
1837 | { | |
1838 | insn(loadStoreRegisterUnscaledImmediate(MemOpSize_16, false, MemOp_STORE, simm, rn, rt)); | |
1839 | } | |
1840 | ||
1841 | template<int datasize, SetFlags setFlags = DontSetFlags> | |
1842 | ALWAYS_INLINE void sub(RegisterID rd, RegisterID rn, UInt12 imm12, int shift = 0) | |
1843 | { | |
1844 | CHECK_DATASIZE(); | |
1845 | ASSERT(!shift || shift == 12); | |
1846 | insn(addSubtractImmediate(DATASIZE, AddOp_SUB, setFlags, shift == 12, imm12, rn, rd)); | |
1847 | } | |
1848 | ||
1849 | template<int datasize, SetFlags setFlags = DontSetFlags> | |
1850 | ALWAYS_INLINE void sub(RegisterID rd, RegisterID rn, RegisterID rm) | |
1851 | { | |
1852 | sub<datasize, setFlags>(rd, rn, rm, LSL, 0); | |
1853 | } | |
1854 | ||
1855 | template<int datasize, SetFlags setFlags = DontSetFlags> | |
1856 | ALWAYS_INLINE void sub(RegisterID rd, RegisterID rn, RegisterID rm, ExtendType extend, int amount) | |
1857 | { | |
1858 | CHECK_DATASIZE(); | |
1859 | insn(addSubtractExtendedRegister(DATASIZE, AddOp_SUB, setFlags, rm, extend, amount, rn, rd)); | |
1860 | } | |
1861 | ||
1862 | template<int datasize, SetFlags setFlags = DontSetFlags> | |
1863 | ALWAYS_INLINE void sub(RegisterID rd, RegisterID rn, RegisterID rm, ShiftType shift, int amount) | |
1864 | { | |
1865 | CHECK_DATASIZE(); | |
1866 | if (isSp(rn)) { | |
1867 | ASSERT(shift == LSL); | |
1868 | sub<datasize, setFlags>(rd, rn, rm, UXTX, amount); | |
1869 | } else | |
1870 | insn(addSubtractShiftedRegister(DATASIZE, AddOp_SUB, setFlags, shift, rm, amount, rn, rd)); | |
1871 | } | |
1872 | ||
1873 | template<int datasize> | |
1874 | ALWAYS_INLINE void sxtb(RegisterID rd, RegisterID rn) | |
1875 | { | |
1876 | sbfm<datasize>(rd, rn, 0, 7); | |
1877 | } | |
1878 | ||
1879 | template<int datasize> | |
1880 | ALWAYS_INLINE void sxth(RegisterID rd, RegisterID rn) | |
1881 | { | |
1882 | sbfm<datasize>(rd, rn, 0, 15); | |
1883 | } | |
1884 | ||
1885 | ALWAYS_INLINE void sxtw(RegisterID rd, RegisterID rn) | |
1886 | { | |
1887 | sbfm<64>(rd, rn, 0, 31); | |
1888 | } | |
1889 | ||
1890 | ALWAYS_INLINE void tbz(RegisterID rt, int imm, int offset = 0) | |
1891 | { | |
1892 | ASSERT(!(offset & 3)); | |
1893 | offset >>= 2; | |
1894 | insn(testAndBranchImmediate(false, imm, offset, rt)); | |
1895 | } | |
1896 | ||
1897 | ALWAYS_INLINE void tbnz(RegisterID rt, int imm, int offset = 0) | |
1898 | { | |
1899 | ASSERT(!(offset & 3)); | |
1900 | offset >>= 2; | |
1901 | insn(testAndBranchImmediate(true, imm, offset, rt)); | |
1902 | } | |
1903 | ||
1904 | template<int datasize> | |
1905 | ALWAYS_INLINE void tst(RegisterID rn, RegisterID rm) | |
1906 | { | |
1907 | and_<datasize, S>(ARM64Registers::zr, rn, rm); | |
1908 | } | |
1909 | ||
1910 | template<int datasize> | |
1911 | ALWAYS_INLINE void tst(RegisterID rn, RegisterID rm, ShiftType shift, int amount) | |
1912 | { | |
1913 | and_<datasize, S>(ARM64Registers::zr, rn, rm, shift, amount); | |
1914 | } | |
1915 | ||
1916 | template<int datasize> | |
1917 | ALWAYS_INLINE void tst(RegisterID rn, LogicalImmediate imm) | |
1918 | { | |
1919 | and_<datasize, S>(ARM64Registers::zr, rn, imm); | |
1920 | } | |
1921 | ||
1922 | template<int datasize> | |
1923 | ALWAYS_INLINE void ubfiz(RegisterID rd, RegisterID rn, int lsb, int width) | |
1924 | { | |
1925 | ubfm<datasize>(rd, rn, (datasize - lsb) & (datasize - 1), width - 1); | |
1926 | } | |
1927 | ||
1928 | template<int datasize> | |
1929 | ALWAYS_INLINE void ubfm(RegisterID rd, RegisterID rn, int immr, int imms) | |
1930 | { | |
1931 | CHECK_DATASIZE(); | |
1932 | insn(bitfield(DATASIZE, BitfieldOp_UBFM, immr, imms, rn, rd)); | |
1933 | } | |
1934 | ||
1935 | template<int datasize> | |
1936 | ALWAYS_INLINE void ubfx(RegisterID rd, RegisterID rn, int lsb, int width) | |
1937 | { | |
1938 | ubfm<datasize>(rd, rn, lsb, lsb + width - 1); | |
1939 | } | |
1940 | ||
1941 | template<int datasize> | |
1942 | ALWAYS_INLINE void udiv(RegisterID rd, RegisterID rn, RegisterID rm) | |
1943 | { | |
1944 | CHECK_DATASIZE(); | |
1945 | insn(dataProcessing2Source(DATASIZE, rm, DataOp_UDIV, rn, rd)); | |
1946 | } | |
1947 | ||
1948 | ALWAYS_INLINE void umaddl(RegisterID rd, RegisterID rn, RegisterID rm, RegisterID ra) | |
1949 | { | |
1950 | insn(dataProcessing3Source(Datasize_64, DataOp_UMADDL, rm, ra, rn, rd)); | |
1951 | } | |
1952 | ||
1953 | ALWAYS_INLINE void umnegl(RegisterID rd, RegisterID rn, RegisterID rm) | |
1954 | { | |
1955 | umsubl(rd, rn, rm, ARM64Registers::zr); | |
1956 | } | |
1957 | ||
1958 | ALWAYS_INLINE void umsubl(RegisterID rd, RegisterID rn, RegisterID rm, RegisterID ra) | |
1959 | { | |
1960 | insn(dataProcessing3Source(Datasize_64, DataOp_UMSUBL, rm, ra, rn, rd)); | |
1961 | } | |
1962 | ||
1963 | ALWAYS_INLINE void umulh(RegisterID rd, RegisterID rn, RegisterID rm) | |
1964 | { | |
1965 | insn(dataProcessing3Source(Datasize_64, DataOp_UMULH, rm, ARM64Registers::zr, rn, rd)); | |
1966 | } | |
1967 | ||
1968 | ALWAYS_INLINE void umull(RegisterID rd, RegisterID rn, RegisterID rm) | |
1969 | { | |
1970 | umaddl(rd, rn, rm, ARM64Registers::zr); | |
1971 | } | |
1972 | ||
1973 | template<int datasize> | |
1974 | ALWAYS_INLINE void uxtb(RegisterID rd, RegisterID rn) | |
1975 | { | |
1976 | ubfm<datasize>(rd, rn, 0, 7); | |
1977 | } | |
1978 | ||
1979 | template<int datasize> | |
1980 | ALWAYS_INLINE void uxth(RegisterID rd, RegisterID rn) | |
1981 | { | |
1982 | ubfm<datasize>(rd, rn, 0, 15); | |
1983 | } | |
1984 | ||
1985 | ALWAYS_INLINE void uxtw(RegisterID rd, RegisterID rn) | |
1986 | { | |
1987 | ubfm<64>(rd, rn, 0, 31); | |
1988 | } | |
1989 | ||
1990 | // Floating Point Instructions: | |
1991 | ||
1992 | template<int datasize> | |
1993 | ALWAYS_INLINE void fabs(FPRegisterID vd, FPRegisterID vn) | |
1994 | { | |
1995 | CHECK_DATASIZE(); | |
1996 | insn(floatingPointDataProcessing1Source(DATASIZE, FPDataOp_FABS, vn, vd)); | |
1997 | } | |
1998 | ||
1999 | template<int datasize> | |
2000 | ALWAYS_INLINE void fadd(FPRegisterID vd, FPRegisterID vn, FPRegisterID vm) | |
2001 | { | |
2002 | CHECK_DATASIZE(); | |
2003 | insn(floatingPointDataProcessing2Source(DATASIZE, vm, FPDataOp_FADD, vn, vd)); | |
2004 | } | |
2005 | ||
2006 | template<int datasize> | |
2007 | ALWAYS_INLINE void fccmp(FPRegisterID vn, FPRegisterID vm, int nzcv, Condition cond) | |
2008 | { | |
2009 | CHECK_DATASIZE(); | |
2010 | insn(floatingPointConditionalCompare(DATASIZE, vm, cond, vn, FPCondCmpOp_FCMP, nzcv)); | |
2011 | } | |
2012 | ||
2013 | template<int datasize> | |
2014 | ALWAYS_INLINE void fccmpe(FPRegisterID vn, FPRegisterID vm, int nzcv, Condition cond) | |
2015 | { | |
2016 | CHECK_DATASIZE(); | |
2017 | insn(floatingPointConditionalCompare(DATASIZE, vm, cond, vn, FPCondCmpOp_FCMPE, nzcv)); | |
2018 | } | |
2019 | ||
2020 | template<int datasize> | |
2021 | ALWAYS_INLINE void fcmp(FPRegisterID vn, FPRegisterID vm) | |
2022 | { | |
2023 | CHECK_DATASIZE(); | |
2024 | insn(floatingPointCompare(DATASIZE, vm, vn, FPCmpOp_FCMP)); | |
2025 | } | |
2026 | ||
2027 | template<int datasize> | |
2028 | ALWAYS_INLINE void fcmp_0(FPRegisterID vn) | |
2029 | { | |
2030 | CHECK_DATASIZE(); | |
2031 | insn(floatingPointCompare(DATASIZE, static_cast<FPRegisterID>(0), vn, FPCmpOp_FCMP0)); | |
2032 | } | |
2033 | ||
2034 | template<int datasize> | |
2035 | ALWAYS_INLINE void fcmpe(FPRegisterID vn, FPRegisterID vm) | |
2036 | { | |
2037 | CHECK_DATASIZE(); | |
2038 | insn(floatingPointCompare(DATASIZE, vm, vn, FPCmpOp_FCMPE)); | |
2039 | } | |
2040 | ||
2041 | template<int datasize> | |
2042 | ALWAYS_INLINE void fcmpe_0(FPRegisterID vn) | |
2043 | { | |
2044 | CHECK_DATASIZE(); | |
2045 | insn(floatingPointCompare(DATASIZE, static_cast<FPRegisterID>(0), vn, FPCmpOp_FCMPE0)); | |
2046 | } | |
2047 | ||
2048 | template<int datasize> | |
2049 | ALWAYS_INLINE void fcsel(FPRegisterID vd, FPRegisterID vn, FPRegisterID vm, Condition cond) | |
2050 | { | |
2051 | CHECK_DATASIZE(); | |
2052 | insn(floatingPointConditionalSelect(DATASIZE, vm, cond, vn, vd)); | |
2053 | } | |
2054 | ||
2055 | template<int dstsize, int srcsize> | |
2056 | ALWAYS_INLINE void fcvt(FPRegisterID vd, FPRegisterID vn) | |
2057 | { | |
2058 | ASSERT(dstsize == 16 || dstsize == 32 || dstsize == 64); | |
2059 | ASSERT(srcsize == 16 || srcsize == 32 || srcsize == 64); | |
2060 | ASSERT(dstsize != srcsize); | |
2061 | Datasize type = (srcsize == 64) ? Datasize_64 : (srcsize == 32) ? Datasize_32 : Datasize_16; | |
2062 | FPDataOp1Source opcode = (dstsize == 64) ? FPDataOp_FCVT_toDouble : (dstsize == 32) ? FPDataOp_FCVT_toSingle : FPDataOp_FCVT_toHalf; | |
2063 | insn(floatingPointDataProcessing1Source(type, opcode, vn, vd)); | |
2064 | } | |
2065 | ||
2066 | template<int dstsize, int srcsize> | |
2067 | ALWAYS_INLINE void fcvtas(RegisterID rd, FPRegisterID vn) | |
2068 | { | |
2069 | CHECK_DATASIZE_OF(dstsize); | |
2070 | CHECK_DATASIZE_OF(srcsize); | |
2071 | insn(floatingPointIntegerConversions(DATASIZE_OF(dstsize), DATASIZE_OF(srcsize), FPIntConvOp_FCVTAS, vn, rd)); | |
2072 | } | |
2073 | ||
2074 | template<int dstsize, int srcsize> | |
2075 | ALWAYS_INLINE void fcvtau(RegisterID rd, FPRegisterID vn) | |
2076 | { | |
2077 | CHECK_DATASIZE_OF(dstsize); | |
2078 | CHECK_DATASIZE_OF(srcsize); | |
2079 | insn(floatingPointIntegerConversions(DATASIZE_OF(dstsize), DATASIZE_OF(srcsize), FPIntConvOp_FCVTAU, vn, rd)); | |
2080 | } | |
2081 | ||
2082 | template<int dstsize, int srcsize> | |
2083 | ALWAYS_INLINE void fcvtms(RegisterID rd, FPRegisterID vn) | |
2084 | { | |
2085 | CHECK_DATASIZE_OF(dstsize); | |
2086 | CHECK_DATASIZE_OF(srcsize); | |
2087 | insn(floatingPointIntegerConversions(DATASIZE_OF(dstsize), DATASIZE_OF(srcsize), FPIntConvOp_FCVTMS, vn, rd)); | |
2088 | } | |
2089 | ||
2090 | template<int dstsize, int srcsize> | |
2091 | ALWAYS_INLINE void fcvtmu(RegisterID rd, FPRegisterID vn) | |
2092 | { | |
2093 | CHECK_DATASIZE_OF(dstsize); | |
2094 | CHECK_DATASIZE_OF(srcsize); | |
2095 | insn(floatingPointIntegerConversions(DATASIZE_OF(dstsize), DATASIZE_OF(srcsize), FPIntConvOp_FCVTMU, vn, rd)); | |
2096 | } | |
2097 | ||
2098 | template<int dstsize, int srcsize> | |
2099 | ALWAYS_INLINE void fcvtns(RegisterID rd, FPRegisterID vn) | |
2100 | { | |
2101 | CHECK_DATASIZE_OF(dstsize); | |
2102 | CHECK_DATASIZE_OF(srcsize); | |
2103 | insn(floatingPointIntegerConversions(DATASIZE_OF(dstsize), DATASIZE_OF(srcsize), FPIntConvOp_FCVTNS, vn, rd)); | |
2104 | } | |
2105 | ||
2106 | template<int dstsize, int srcsize> | |
2107 | ALWAYS_INLINE void fcvtnu(RegisterID rd, FPRegisterID vn) | |
2108 | { | |
2109 | CHECK_DATASIZE_OF(dstsize); | |
2110 | CHECK_DATASIZE_OF(srcsize); | |
2111 | insn(floatingPointIntegerConversions(DATASIZE_OF(dstsize), DATASIZE_OF(srcsize), FPIntConvOp_FCVTNU, vn, rd)); | |
2112 | } | |
2113 | ||
2114 | template<int dstsize, int srcsize> | |
2115 | ALWAYS_INLINE void fcvtps(RegisterID rd, FPRegisterID vn) | |
2116 | { | |
2117 | CHECK_DATASIZE_OF(dstsize); | |
2118 | CHECK_DATASIZE_OF(srcsize); | |
2119 | insn(floatingPointIntegerConversions(DATASIZE_OF(dstsize), DATASIZE_OF(srcsize), FPIntConvOp_FCVTPS, vn, rd)); | |
2120 | } | |
2121 | ||
2122 | template<int dstsize, int srcsize> | |
2123 | ALWAYS_INLINE void fcvtpu(RegisterID rd, FPRegisterID vn) | |
2124 | { | |
2125 | CHECK_DATASIZE_OF(dstsize); | |
2126 | CHECK_DATASIZE_OF(srcsize); | |
2127 | insn(floatingPointIntegerConversions(DATASIZE_OF(dstsize), DATASIZE_OF(srcsize), FPIntConvOp_FCVTPU, vn, rd)); | |
2128 | } | |
2129 | ||
2130 | template<int dstsize, int srcsize> | |
2131 | ALWAYS_INLINE void fcvtzs(RegisterID rd, FPRegisterID vn) | |
2132 | { | |
2133 | CHECK_DATASIZE_OF(dstsize); | |
2134 | CHECK_DATASIZE_OF(srcsize); | |
2135 | insn(floatingPointIntegerConversions(DATASIZE_OF(dstsize), DATASIZE_OF(srcsize), FPIntConvOp_FCVTZS, vn, rd)); | |
2136 | } | |
2137 | ||
2138 | template<int dstsize, int srcsize> | |
2139 | ALWAYS_INLINE void fcvtzu(RegisterID rd, FPRegisterID vn) | |
2140 | { | |
2141 | CHECK_DATASIZE_OF(dstsize); | |
2142 | CHECK_DATASIZE_OF(srcsize); | |
2143 | insn(floatingPointIntegerConversions(DATASIZE_OF(dstsize), DATASIZE_OF(srcsize), FPIntConvOp_FCVTZU, vn, rd)); | |
2144 | } | |
2145 | ||
2146 | template<int datasize> | |
2147 | ALWAYS_INLINE void fdiv(FPRegisterID vd, FPRegisterID vn, FPRegisterID vm) | |
2148 | { | |
2149 | CHECK_DATASIZE(); | |
2150 | insn(floatingPointDataProcessing2Source(DATASIZE, vm, FPDataOp_FDIV, vn, vd)); | |
2151 | } | |
2152 | ||
2153 | template<int datasize> | |
2154 | ALWAYS_INLINE void fmadd(FPRegisterID vd, FPRegisterID vn, FPRegisterID vm, FPRegisterID va) | |
2155 | { | |
2156 | CHECK_DATASIZE(); | |
2157 | insn(floatingPointDataProcessing3Source(DATASIZE, false, vm, AddOp_ADD, va, vn, vd)); | |
2158 | } | |
2159 | ||
2160 | template<int datasize> | |
2161 | ALWAYS_INLINE void fmax(FPRegisterID vd, FPRegisterID vn, FPRegisterID vm) | |
2162 | { | |
2163 | CHECK_DATASIZE(); | |
2164 | insn(floatingPointDataProcessing2Source(DATASIZE, vm, FPDataOp_FMAX, vn, vd)); | |
2165 | } | |
2166 | ||
2167 | template<int datasize> | |
2168 | ALWAYS_INLINE void fmaxnm(FPRegisterID vd, FPRegisterID vn, FPRegisterID vm) | |
2169 | { | |
2170 | CHECK_DATASIZE(); | |
2171 | insn(floatingPointDataProcessing2Source(DATASIZE, vm, FPDataOp_FMAXNM, vn, vd)); | |
2172 | } | |
2173 | ||
2174 | template<int datasize> | |
2175 | ALWAYS_INLINE void fmin(FPRegisterID vd, FPRegisterID vn, FPRegisterID vm) | |
2176 | { | |
2177 | CHECK_DATASIZE(); | |
2178 | insn(floatingPointDataProcessing2Source(DATASIZE, vm, FPDataOp_FMIN, vn, vd)); | |
2179 | } | |
2180 | ||
2181 | template<int datasize> | |
2182 | ALWAYS_INLINE void fminnm(FPRegisterID vd, FPRegisterID vn, FPRegisterID vm) | |
2183 | { | |
2184 | CHECK_DATASIZE(); | |
2185 | insn(floatingPointDataProcessing2Source(DATASIZE, vm, FPDataOp_FMINNM, vn, vd)); | |
2186 | } | |
2187 | ||
2188 | template<int datasize> | |
2189 | ALWAYS_INLINE void fmov(FPRegisterID vd, FPRegisterID vn) | |
2190 | { | |
2191 | CHECK_DATASIZE(); | |
2192 | insn(floatingPointDataProcessing1Source(DATASIZE, FPDataOp_FMOV, vn, vd)); | |
2193 | } | |
2194 | ||
2195 | template<int datasize> | |
2196 | ALWAYS_INLINE void fmov(FPRegisterID vd, RegisterID rn) | |
2197 | { | |
2198 | CHECK_DATASIZE(); | |
2199 | insn(floatingPointIntegerConversions(DATASIZE, DATASIZE, FPIntConvOp_FMOV_XtoQ, rn, vd)); | |
2200 | } | |
2201 | ||
2202 | template<int datasize> | |
2203 | ALWAYS_INLINE void fmov(RegisterID rd, FPRegisterID vn) | |
2204 | { | |
2205 | CHECK_DATASIZE(); | |
2206 | insn(floatingPointIntegerConversions(DATASIZE, DATASIZE, FPIntConvOp_FMOV_QtoX, vn, rd)); | |
2207 | } | |
2208 | ||
2209 | template<int datasize> | |
2210 | ALWAYS_INLINE void fmov(FPRegisterID vd, double imm) | |
2211 | { | |
2212 | CHECK_DATASIZE(); | |
2213 | insn(floatingPointImmediate(DATASIZE, encodeFPImm(imm), vd)); | |
2214 | } | |
2215 | ||
2216 | ALWAYS_INLINE void fmov_top(FPRegisterID vd, RegisterID rn) | |
2217 | { | |
2218 | insn(floatingPointIntegerConversions(Datasize_64, Datasize_64, FPIntConvOp_FMOV_XtoQ_top, rn, vd)); | |
2219 | } | |
2220 | ||
2221 | ALWAYS_INLINE void fmov_top(RegisterID rd, FPRegisterID vn) | |
2222 | { | |
2223 | insn(floatingPointIntegerConversions(Datasize_64, Datasize_64, FPIntConvOp_FMOV_QtoX_top, vn, rd)); | |
2224 | } | |
2225 | ||
2226 | template<int datasize> | |
2227 | ALWAYS_INLINE void fmsub(FPRegisterID vd, FPRegisterID vn, FPRegisterID vm, FPRegisterID va) | |
2228 | { | |
2229 | CHECK_DATASIZE(); | |
2230 | insn(floatingPointDataProcessing3Source(DATASIZE, false, vm, AddOp_SUB, va, vn, vd)); | |
2231 | } | |
2232 | ||
2233 | template<int datasize> | |
2234 | ALWAYS_INLINE void fmul(FPRegisterID vd, FPRegisterID vn, FPRegisterID vm) | |
2235 | { | |
2236 | CHECK_DATASIZE(); | |
2237 | insn(floatingPointDataProcessing2Source(DATASIZE, vm, FPDataOp_FMUL, vn, vd)); | |
2238 | } | |
2239 | ||
2240 | template<int datasize> | |
2241 | ALWAYS_INLINE void fneg(FPRegisterID vd, FPRegisterID vn) | |
2242 | { | |
2243 | CHECK_DATASIZE(); | |
2244 | insn(floatingPointDataProcessing1Source(DATASIZE, FPDataOp_FNEG, vn, vd)); | |
2245 | } | |
2246 | ||
2247 | template<int datasize> | |
2248 | ALWAYS_INLINE void fnmadd(FPRegisterID vd, FPRegisterID vn, FPRegisterID vm, FPRegisterID va) | |
2249 | { | |
2250 | CHECK_DATASIZE(); | |
2251 | insn(floatingPointDataProcessing3Source(DATASIZE, true, vm, AddOp_ADD, va, vn, vd)); | |
2252 | } | |
2253 | ||
2254 | template<int datasize> | |
2255 | ALWAYS_INLINE void fnmsub(FPRegisterID vd, FPRegisterID vn, FPRegisterID vm, FPRegisterID va) | |
2256 | { | |
2257 | CHECK_DATASIZE(); | |
2258 | insn(floatingPointDataProcessing3Source(DATASIZE, true, vm, AddOp_SUB, va, vn, vd)); | |
2259 | } | |
2260 | ||
2261 | template<int datasize> | |
2262 | ALWAYS_INLINE void fnmul(FPRegisterID vd, FPRegisterID vn, FPRegisterID vm) | |
2263 | { | |
2264 | CHECK_DATASIZE(); | |
2265 | insn(floatingPointDataProcessing2Source(DATASIZE, vm, FPDataOp_FNMUL, vn, vd)); | |
2266 | } | |
2267 | ||
2268 | template<int datasize> | |
2269 | ALWAYS_INLINE void frinta(FPRegisterID vd, FPRegisterID vn) | |
2270 | { | |
2271 | CHECK_DATASIZE(); | |
2272 | insn(floatingPointDataProcessing1Source(DATASIZE, FPDataOp_FRINTA, vn, vd)); | |
2273 | } | |
2274 | ||
2275 | template<int datasize> | |
2276 | ALWAYS_INLINE void frinti(FPRegisterID vd, FPRegisterID vn) | |
2277 | { | |
2278 | CHECK_DATASIZE(); | |
2279 | insn(floatingPointDataProcessing1Source(DATASIZE, FPDataOp_FRINTI, vn, vd)); | |
2280 | } | |
2281 | ||
2282 | template<int datasize> | |
2283 | ALWAYS_INLINE void frintm(FPRegisterID vd, FPRegisterID vn) | |
2284 | { | |
2285 | CHECK_DATASIZE(); | |
2286 | insn(floatingPointDataProcessing1Source(DATASIZE, FPDataOp_FRINTM, vn, vd)); | |
2287 | } | |
2288 | ||
2289 | template<int datasize> | |
2290 | ALWAYS_INLINE void frintn(FPRegisterID vd, FPRegisterID vn) | |
2291 | { | |
2292 | CHECK_DATASIZE(); | |
2293 | insn(floatingPointDataProcessing1Source(DATASIZE, FPDataOp_FRINTN, vn, vd)); | |
2294 | } | |
2295 | ||
2296 | template<int datasize> | |
2297 | ALWAYS_INLINE void frintp(FPRegisterID vd, FPRegisterID vn) | |
2298 | { | |
2299 | CHECK_DATASIZE(); | |
2300 | insn(floatingPointDataProcessing1Source(DATASIZE, FPDataOp_FRINTP, vn, vd)); | |
2301 | } | |
2302 | ||
2303 | template<int datasize> | |
2304 | ALWAYS_INLINE void frintx(FPRegisterID vd, FPRegisterID vn) | |
2305 | { | |
2306 | CHECK_DATASIZE(); | |
2307 | insn(floatingPointDataProcessing1Source(DATASIZE, FPDataOp_FRINTX, vn, vd)); | |
2308 | } | |
2309 | ||
2310 | template<int datasize> | |
2311 | ALWAYS_INLINE void frintz(FPRegisterID vd, FPRegisterID vn) | |
2312 | { | |
2313 | CHECK_DATASIZE(); | |
2314 | insn(floatingPointDataProcessing1Source(DATASIZE, FPDataOp_FRINTZ, vn, vd)); | |
2315 | } | |
2316 | ||
2317 | template<int datasize> | |
2318 | ALWAYS_INLINE void fsqrt(FPRegisterID vd, FPRegisterID vn) | |
2319 | { | |
2320 | CHECK_DATASIZE(); | |
2321 | insn(floatingPointDataProcessing1Source(DATASIZE, FPDataOp_FSQRT, vn, vd)); | |
2322 | } | |
2323 | ||
2324 | template<int datasize> | |
2325 | ALWAYS_INLINE void fsub(FPRegisterID vd, FPRegisterID vn, FPRegisterID vm) | |
2326 | { | |
2327 | CHECK_DATASIZE(); | |
2328 | insn(floatingPointDataProcessing2Source(DATASIZE, vm, FPDataOp_FSUB, vn, vd)); | |
2329 | } | |
2330 | ||
2331 | template<int datasize> | |
2332 | ALWAYS_INLINE void ldr(FPRegisterID rt, RegisterID rn, RegisterID rm) | |
2333 | { | |
2334 | ldr<datasize>(rt, rn, rm, UXTX, 0); | |
2335 | } | |
2336 | ||
2337 | template<int datasize> | |
2338 | ALWAYS_INLINE void ldr(FPRegisterID rt, RegisterID rn, RegisterID rm, ExtendType extend, int amount) | |
2339 | { | |
2340 | CHECK_FP_MEMOP_DATASIZE(); | |
2341 | insn(loadStoreRegisterRegisterOffset(MEMOPSIZE, true, datasize == 128 ? MemOp_LOAD_V128 : MemOp_LOAD, rm, extend, encodeShiftAmount<datasize>(amount), rn, rt)); | |
2342 | } | |
2343 | ||
2344 | template<int datasize> | |
2345 | ALWAYS_INLINE void ldr(FPRegisterID rt, RegisterID rn, unsigned pimm) | |
2346 | { | |
2347 | CHECK_FP_MEMOP_DATASIZE(); | |
2348 | insn(loadStoreRegisterUnsignedImmediate(MEMOPSIZE, true, datasize == 128 ? MemOp_LOAD_V128 : MemOp_LOAD, encodePositiveImmediate<datasize>(pimm), rn, rt)); | |
2349 | } | |
2350 | ||
2351 | template<int datasize> | |
2352 | ALWAYS_INLINE void ldr(FPRegisterID rt, RegisterID rn, PostIndex simm) | |
2353 | { | |
2354 | CHECK_FP_MEMOP_DATASIZE(); | |
2355 | insn(loadStoreRegisterPostIndex(MEMOPSIZE, true, datasize == 128 ? MemOp_LOAD_V128 : MemOp_LOAD, simm, rn, rt)); | |
2356 | } | |
2357 | ||
2358 | template<int datasize> | |
2359 | ALWAYS_INLINE void ldr(FPRegisterID rt, RegisterID rn, PreIndex simm) | |
2360 | { | |
2361 | CHECK_FP_MEMOP_DATASIZE(); | |
2362 | insn(loadStoreRegisterPreIndex(MEMOPSIZE, true, datasize == 128 ? MemOp_LOAD_V128 : MemOp_LOAD, simm, rn, rt)); | |
2363 | } | |
2364 | ||
2365 | template<int datasize> | |
2366 | ALWAYS_INLINE void ldr_literal(FPRegisterID rt, int offset = 0) | |
2367 | { | |
2368 | CHECK_FP_MEMOP_DATASIZE(); | |
2369 | ASSERT(datasize >= 32); | |
2370 | ASSERT(!(offset & 3)); | |
2371 | insn(loadRegisterLiteral(datasize == 128 ? LdrLiteralOp_128BIT : datasize == 64 ? LdrLiteralOp_64BIT : LdrLiteralOp_32BIT, true, offset >> 2, rt)); | |
2372 | } | |
2373 | ||
2374 | template<int datasize> | |
2375 | ALWAYS_INLINE void ldur(FPRegisterID rt, RegisterID rn, int simm) | |
2376 | { | |
2377 | CHECK_FP_MEMOP_DATASIZE(); | |
2378 | insn(loadStoreRegisterUnscaledImmediate(MEMOPSIZE, true, datasize == 128 ? MemOp_LOAD_V128 : MemOp_LOAD, simm, rn, rt)); | |
2379 | } | |
2380 | ||
2381 | template<int dstsize, int srcsize> | |
2382 | ALWAYS_INLINE void scvtf(FPRegisterID vd, RegisterID rn) | |
2383 | { | |
2384 | CHECK_DATASIZE_OF(dstsize); | |
2385 | CHECK_DATASIZE_OF(srcsize); | |
2386 | insn(floatingPointIntegerConversions(DATASIZE_OF(srcsize), DATASIZE_OF(dstsize), FPIntConvOp_SCVTF, rn, vd)); | |
2387 | } | |
2388 | ||
2389 | template<int datasize> | |
2390 | ALWAYS_INLINE void str(FPRegisterID rt, RegisterID rn, RegisterID rm) | |
2391 | { | |
2392 | str<datasize>(rt, rn, rm, UXTX, 0); | |
2393 | } | |
2394 | ||
2395 | template<int datasize> | |
2396 | ALWAYS_INLINE void str(FPRegisterID rt, RegisterID rn, RegisterID rm, ExtendType extend, int amount) | |
2397 | { | |
2398 | CHECK_FP_MEMOP_DATASIZE(); | |
2399 | insn(loadStoreRegisterRegisterOffset(MEMOPSIZE, true, datasize == 128 ? MemOp_STORE_V128 : MemOp_STORE, rm, extend, encodeShiftAmount<datasize>(amount), rn, rt)); | |
2400 | } | |
2401 | ||
2402 | template<int datasize> | |
2403 | ALWAYS_INLINE void str(FPRegisterID rt, RegisterID rn, unsigned pimm) | |
2404 | { | |
2405 | CHECK_FP_MEMOP_DATASIZE(); | |
2406 | insn(loadStoreRegisterUnsignedImmediate(MEMOPSIZE, true, datasize == 128 ? MemOp_STORE_V128 : MemOp_STORE, encodePositiveImmediate<datasize>(pimm), rn, rt)); | |
2407 | } | |
2408 | ||
2409 | template<int datasize> | |
2410 | ALWAYS_INLINE void str(FPRegisterID rt, RegisterID rn, PostIndex simm) | |
2411 | { | |
2412 | CHECK_FP_MEMOP_DATASIZE(); | |
2413 | insn(loadStoreRegisterPostIndex(MEMOPSIZE, true, datasize == 128 ? MemOp_STORE_V128 : MemOp_STORE, simm, rn, rt)); | |
2414 | } | |
2415 | ||
2416 | template<int datasize> | |
2417 | ALWAYS_INLINE void str(FPRegisterID rt, RegisterID rn, PreIndex simm) | |
2418 | { | |
2419 | CHECK_FP_MEMOP_DATASIZE(); | |
2420 | insn(loadStoreRegisterPreIndex(MEMOPSIZE, true, datasize == 128 ? MemOp_STORE_V128 : MemOp_STORE, simm, rn, rt)); | |
2421 | } | |
2422 | ||
2423 | template<int datasize> | |
2424 | ALWAYS_INLINE void stur(FPRegisterID rt, RegisterID rn, int simm) | |
2425 | { | |
2426 | CHECK_DATASIZE(); | |
2427 | insn(loadStoreRegisterUnscaledImmediate(MEMOPSIZE, true, datasize == 128 ? MemOp_STORE_V128 : MemOp_STORE, simm, rn, rt)); | |
2428 | } | |
2429 | ||
2430 | template<int dstsize, int srcsize> | |
2431 | ALWAYS_INLINE void ucvtf(FPRegisterID vd, RegisterID rn) | |
2432 | { | |
2433 | CHECK_DATASIZE_OF(dstsize); | |
2434 | CHECK_DATASIZE_OF(srcsize); | |
2435 | insn(floatingPointIntegerConversions(DATASIZE_OF(srcsize), DATASIZE_OF(dstsize), FPIntConvOp_UCVTF, rn, vd)); | |
2436 | } | |
2437 | ||
2438 | // Admin methods: | |
2439 | ||
2440 | AssemblerLabel labelIgnoringWatchpoints() | |
2441 | { | |
2442 | return m_buffer.label(); | |
2443 | } | |
2444 | ||
2445 | AssemblerLabel labelForWatchpoint() | |
2446 | { | |
2447 | AssemblerLabel result = m_buffer.label(); | |
2448 | if (static_cast<int>(result.m_offset) != m_indexOfLastWatchpoint) | |
2449 | result = label(); | |
2450 | m_indexOfLastWatchpoint = result.m_offset; | |
2451 | m_indexOfTailOfLastWatchpoint = result.m_offset + maxJumpReplacementSize(); | |
2452 | return result; | |
2453 | } | |
2454 | ||
2455 | AssemblerLabel label() | |
2456 | { | |
2457 | AssemblerLabel result = m_buffer.label(); | |
2458 | while (UNLIKELY(static_cast<int>(result.m_offset) < m_indexOfTailOfLastWatchpoint)) { | |
2459 | nop(); | |
2460 | result = m_buffer.label(); | |
2461 | } | |
2462 | return result; | |
2463 | } | |
2464 | ||
2465 | AssemblerLabel align(int alignment) | |
2466 | { | |
2467 | ASSERT(!(alignment & 3)); | |
2468 | while (!m_buffer.isAligned(alignment)) | |
2469 | brk(0); | |
2470 | return label(); | |
2471 | } | |
2472 | ||
2473 | static void* getRelocatedAddress(void* code, AssemblerLabel label) | |
2474 | { | |
2475 | ASSERT(label.isSet()); | |
2476 | return reinterpret_cast<void*>(reinterpret_cast<ptrdiff_t>(code) + label.m_offset); | |
2477 | } | |
2478 | ||
2479 | static int getDifferenceBetweenLabels(AssemblerLabel a, AssemblerLabel b) | |
2480 | { | |
2481 | return b.m_offset - a.m_offset; | |
2482 | } | |
2483 | ||
2484 | int executableOffsetFor(int location) | |
2485 | { | |
2486 | if (!location) | |
2487 | return 0; | |
2488 | return static_cast<int32_t*>(m_buffer.data())[location / sizeof(int32_t) - 1]; | |
2489 | } | |
2490 | ||
2491 | PassRefPtr<ExecutableMemoryHandle> executableCopy(VM& vm, void* ownerUID, JITCompilationEffort effort) | |
2492 | { | |
2493 | return m_buffer.executableCopy(vm, ownerUID, effort); | |
2494 | } | |
2495 | ||
2496 | void* unlinkedCode() { return m_buffer.data(); } | |
2497 | size_t codeSize() const { return m_buffer.codeSize(); } | |
2498 | ||
2499 | static unsigned getCallReturnOffset(AssemblerLabel call) | |
2500 | { | |
2501 | ASSERT(call.isSet()); | |
2502 | return call.m_offset; | |
2503 | } | |
2504 | ||
2505 | // Linking & patching: | |
2506 | // | |
2507 | // 'link' and 'patch' methods are for use on unprotected code - such as the code | |
2508 | // within the AssemblerBuffer, and code being patched by the patch buffer. Once | |
2509 | // code has been finalized it is (platform support permitting) within a non- | |
2510 | // writable region of memory; to modify the code in an execute-only execuable | |
2511 | // pool the 'repatch' and 'relink' methods should be used. | |
2512 | ||
2513 | void linkJump(AssemblerLabel from, AssemblerLabel to, JumpType type, Condition condition) | |
2514 | { | |
2515 | ASSERT(to.isSet()); | |
2516 | ASSERT(from.isSet()); | |
2517 | m_jumpsToLink.append(LinkRecord(from.m_offset, to.m_offset, type, condition)); | |
2518 | } | |
2519 | ||
2520 | void linkJump(AssemblerLabel from, AssemblerLabel to, JumpType type, Condition condition, bool is64Bit, RegisterID compareRegister) | |
2521 | { | |
2522 | ASSERT(to.isSet()); | |
2523 | ASSERT(from.isSet()); | |
2524 | m_jumpsToLink.append(LinkRecord(from.m_offset, to.m_offset, type, condition, is64Bit, compareRegister)); | |
2525 | } | |
2526 | ||
2527 | void linkJump(AssemblerLabel from, AssemblerLabel to, JumpType type, Condition condition, unsigned bitNumber, RegisterID compareRegister) | |
2528 | { | |
2529 | ASSERT(to.isSet()); | |
2530 | ASSERT(from.isSet()); | |
2531 | m_jumpsToLink.append(LinkRecord(from.m_offset, to.m_offset, type, condition, bitNumber, compareRegister)); | |
2532 | } | |
2533 | ||
2534 | void linkJump(AssemblerLabel from, AssemblerLabel to) | |
2535 | { | |
2536 | ASSERT(from.isSet()); | |
2537 | ASSERT(to.isSet()); | |
2538 | relinkJumpOrCall<false>(addressOf(from), addressOf(to)); | |
2539 | } | |
2540 | ||
2541 | static void linkJump(void* code, AssemblerLabel from, void* to) | |
2542 | { | |
2543 | ASSERT(from.isSet()); | |
2544 | relinkJumpOrCall<false>(addressOf(code, from), to); | |
2545 | } | |
2546 | ||
2547 | static void linkCall(void* code, AssemblerLabel from, void* to) | |
2548 | { | |
2549 | ASSERT(from.isSet()); | |
2550 | linkJumpOrCall<true>(addressOf(code, from) - 1, to); | |
2551 | } | |
2552 | ||
2553 | static void linkPointer(void* code, AssemblerLabel where, void* valuePtr) | |
2554 | { | |
2555 | linkPointer(addressOf(code, where), valuePtr); | |
2556 | } | |
2557 | ||
2558 | static void replaceWithJump(void* where, void* to) | |
2559 | { | |
2560 | intptr_t offset = (reinterpret_cast<intptr_t>(to) - reinterpret_cast<intptr_t>(where)) >> 2; | |
2561 | ASSERT(static_cast<int>(offset) == offset); | |
2562 | *static_cast<int*>(where) = unconditionalBranchImmediate(false, static_cast<int>(offset)); | |
2563 | cacheFlush(where, sizeof(int)); | |
2564 | } | |
2565 | ||
2566 | static ptrdiff_t maxJumpReplacementSize() | |
2567 | { | |
2568 | return 4; | |
2569 | } | |
2570 | ||
2571 | static void replaceWithLoad(void* where) | |
2572 | { | |
2573 | Datasize sf; | |
2574 | AddOp op; | |
2575 | SetFlags S; | |
2576 | int shift; | |
2577 | int imm12; | |
2578 | RegisterID rn; | |
2579 | RegisterID rd; | |
2580 | if (disassembleAddSubtractImmediate(where, sf, op, S, shift, imm12, rn, rd)) { | |
2581 | ASSERT(sf == Datasize_64); | |
2582 | ASSERT(op == AddOp_ADD); | |
2583 | ASSERT(!S); | |
2584 | ASSERT(!shift); | |
2585 | ASSERT(!(imm12 & ~0xff8)); | |
2586 | *static_cast<int*>(where) = loadStoreRegisterUnsignedImmediate(MemOpSize_64, false, MemOp_LOAD, encodePositiveImmediate<64>(imm12), rn, rd); | |
2587 | cacheFlush(where, sizeof(int)); | |
2588 | } | |
2589 | #if !ASSERT_DISABLED | |
2590 | else { | |
2591 | MemOpSize size; | |
2592 | bool V; | |
2593 | MemOp opc; | |
2594 | int imm12; | |
2595 | RegisterID rn; | |
2596 | RegisterID rt; | |
2597 | ASSERT(disassembleLoadStoreRegisterUnsignedImmediate(where, size, V, opc, imm12, rn, rt)); | |
2598 | ASSERT(size == MemOpSize_64); | |
2599 | ASSERT(!V); | |
2600 | ASSERT(opc == MemOp_LOAD); | |
2601 | ASSERT(!(imm12 & ~0x1ff)); | |
2602 | } | |
2603 | #endif | |
2604 | } | |
2605 | ||
2606 | static void replaceWithAddressComputation(void* where) | |
2607 | { | |
2608 | MemOpSize size; | |
2609 | bool V; | |
2610 | MemOp opc; | |
2611 | int imm12; | |
2612 | RegisterID rn; | |
2613 | RegisterID rt; | |
2614 | if (disassembleLoadStoreRegisterUnsignedImmediate(where, size, V, opc, imm12, rn, rt)) { | |
2615 | ASSERT(size == MemOpSize_64); | |
2616 | ASSERT(!V); | |
2617 | ASSERT(opc == MemOp_LOAD); | |
2618 | ASSERT(!(imm12 & ~0x1ff)); | |
2619 | *static_cast<int*>(where) = addSubtractImmediate(Datasize_64, AddOp_ADD, DontSetFlags, 0, imm12 * sizeof(void*), rn, rt); | |
2620 | cacheFlush(where, sizeof(int)); | |
2621 | } | |
2622 | #if !ASSERT_DISABLED | |
2623 | else { | |
2624 | Datasize sf; | |
2625 | AddOp op; | |
2626 | SetFlags S; | |
2627 | int shift; | |
2628 | int imm12; | |
2629 | RegisterID rn; | |
2630 | RegisterID rd; | |
2631 | ASSERT(disassembleAddSubtractImmediate(where, sf, op, S, shift, imm12, rn, rd)); | |
2632 | ASSERT(sf == Datasize_64); | |
2633 | ASSERT(op == AddOp_ADD); | |
2634 | ASSERT(!S); | |
2635 | ASSERT(!shift); | |
2636 | ASSERT(!(imm12 & ~0xff8)); | |
2637 | } | |
2638 | #endif | |
2639 | } | |
2640 | ||
2641 | static void repatchPointer(void* where, void* valuePtr) | |
2642 | { | |
2643 | linkPointer(static_cast<int*>(where), valuePtr, true); | |
2644 | } | |
2645 | ||
2646 | static void setPointer(int* address, void* valuePtr, RegisterID rd, bool flush) | |
2647 | { | |
2648 | uintptr_t value = reinterpret_cast<uintptr_t>(valuePtr); | |
2649 | address[0] = moveWideImediate(Datasize_64, MoveWideOp_Z, 0, getHalfword(value, 0), rd); | |
2650 | address[1] = moveWideImediate(Datasize_64, MoveWideOp_K, 1, getHalfword(value, 1), rd); | |
2651 | address[2] = moveWideImediate(Datasize_64, MoveWideOp_K, 2, getHalfword(value, 2), rd); | |
2652 | ||
2653 | if (flush) | |
2654 | cacheFlush(address, sizeof(int) * 3); | |
2655 | } | |
2656 | ||
2657 | static void repatchInt32(void* where, int32_t value) | |
2658 | { | |
2659 | int* address = static_cast<int*>(where); | |
2660 | ||
2661 | Datasize sf; | |
2662 | MoveWideOp opc; | |
2663 | int hw; | |
2664 | uint16_t imm16; | |
2665 | RegisterID rd; | |
2666 | bool expected = disassembleMoveWideImediate(address, sf, opc, hw, imm16, rd); | |
2667 | ASSERT_UNUSED(expected, expected && !sf && (opc == MoveWideOp_Z || opc == MoveWideOp_N) && !hw); | |
2668 | ASSERT(checkMovk<Datasize_32>(address[1], 1, rd)); | |
2669 | ||
2670 | if (value >= 0) { | |
2671 | address[0] = moveWideImediate(Datasize_32, MoveWideOp_Z, 0, getHalfword(value, 0), rd); | |
2672 | address[1] = moveWideImediate(Datasize_32, MoveWideOp_K, 1, getHalfword(value, 1), rd); | |
2673 | } else { | |
2674 | address[0] = moveWideImediate(Datasize_32, MoveWideOp_N, 0, ~getHalfword(value, 0), rd); | |
2675 | address[1] = moveWideImediate(Datasize_32, MoveWideOp_K, 1, getHalfword(value, 1), rd); | |
2676 | } | |
2677 | ||
2678 | cacheFlush(where, sizeof(int) * 2); | |
2679 | } | |
2680 | ||
2681 | static void* readPointer(void* where) | |
2682 | { | |
2683 | int* address = static_cast<int*>(where); | |
2684 | ||
2685 | Datasize sf; | |
2686 | MoveWideOp opc; | |
2687 | int hw; | |
2688 | uint16_t imm16; | |
2689 | RegisterID rdFirst, rd; | |
2690 | ||
2691 | bool expected = disassembleMoveWideImediate(address, sf, opc, hw, imm16, rdFirst); | |
2692 | ASSERT_UNUSED(expected, expected && sf && opc == MoveWideOp_Z && !hw); | |
2693 | uintptr_t result = imm16; | |
2694 | ||
2695 | expected = disassembleMoveWideImediate(address + 1, sf, opc, hw, imm16, rd); | |
2696 | ASSERT_UNUSED(expected, expected && sf && opc == MoveWideOp_K && hw == 1 && rd == rdFirst); | |
2697 | result |= static_cast<uintptr_t>(imm16) << 16; | |
2698 | ||
2699 | expected = disassembleMoveWideImediate(address + 2, sf, opc, hw, imm16, rd); | |
2700 | ASSERT_UNUSED(expected, expected && sf && opc == MoveWideOp_K && hw == 2 && rd == rdFirst); | |
2701 | result |= static_cast<uintptr_t>(imm16) << 32; | |
2702 | ||
2703 | return reinterpret_cast<void*>(result); | |
2704 | } | |
2705 | ||
2706 | static void* readCallTarget(void* from) | |
2707 | { | |
2708 | return readPointer(reinterpret_cast<int*>(from) - 4); | |
2709 | } | |
2710 | ||
2711 | static void relinkJump(void* from, void* to) | |
2712 | { | |
2713 | relinkJumpOrCall<false>(reinterpret_cast<int*>(from), to); | |
2714 | cacheFlush(from, sizeof(int)); | |
2715 | } | |
2716 | ||
2717 | static void relinkCall(void* from, void* to) | |
2718 | { | |
2719 | relinkJumpOrCall<true>(reinterpret_cast<int*>(from) - 1, to); | |
2720 | cacheFlush(reinterpret_cast<int*>(from) - 1, sizeof(int)); | |
2721 | } | |
2722 | ||
2723 | static void repatchCompact(void* where, int32_t value) | |
2724 | { | |
2725 | ASSERT(!(value & ~0x3ff8)); | |
2726 | ||
2727 | MemOpSize size; | |
2728 | bool V; | |
2729 | MemOp opc; | |
2730 | int imm12; | |
2731 | RegisterID rn; | |
2732 | RegisterID rt; | |
2733 | bool expected = disassembleLoadStoreRegisterUnsignedImmediate(where, size, V, opc, imm12, rn, rt); | |
2734 | ASSERT_UNUSED(expected, expected && size >= MemOpSize_32 && !V && opc == MemOp_LOAD); // expect 32/64 bit load to GPR. | |
2735 | ||
2736 | if (size == MemOpSize_32) | |
2737 | imm12 = encodePositiveImmediate<32>(value); | |
2738 | else | |
2739 | imm12 = encodePositiveImmediate<64>(value); | |
2740 | *static_cast<int*>(where) = loadStoreRegisterUnsignedImmediate(size, V, opc, imm12, rn, rt); | |
2741 | ||
2742 | cacheFlush(where, sizeof(int)); | |
2743 | } | |
2744 | ||
2745 | unsigned debugOffset() { return m_buffer.debugOffset(); } | |
2746 | ||
2747 | static void cacheFlush(void* code, size_t size) | |
2748 | { | |
2749 | #if OS(IOS) | |
2750 | sys_cache_control(kCacheFunctionPrepareForExecution, code, size); | |
2751 | #else | |
2752 | #error "The cacheFlush support is missing on this platform." | |
2753 | #endif | |
2754 | } | |
2755 | ||
2756 | // Assembler admin methods: | |
2757 | ||
2758 | int jumpSizeDelta(JumpType jumpType, JumpLinkType jumpLinkType) { return JUMP_ENUM_SIZE(jumpType) - JUMP_ENUM_SIZE(jumpLinkType); } | |
2759 | ||
2760 | static ALWAYS_INLINE bool linkRecordSourceComparator(const LinkRecord& a, const LinkRecord& b) | |
2761 | { | |
2762 | return a.from() < b.from(); | |
2763 | } | |
2764 | ||
2765 | bool canCompact(JumpType jumpType) | |
2766 | { | |
2767 | // Fixed jumps cannot be compacted | |
2768 | return (jumpType == JumpNoCondition) || (jumpType == JumpCondition) || (jumpType == JumpCompareAndBranch) || (jumpType == JumpTestBit); | |
2769 | } | |
2770 | ||
2771 | JumpLinkType computeJumpType(JumpType jumpType, const uint8_t* from, const uint8_t* to) | |
2772 | { | |
2773 | switch (jumpType) { | |
2774 | case JumpFixed: | |
2775 | return LinkInvalid; | |
2776 | case JumpNoConditionFixedSize: | |
2777 | return LinkJumpNoCondition; | |
2778 | case JumpConditionFixedSize: | |
2779 | return LinkJumpCondition; | |
2780 | case JumpCompareAndBranchFixedSize: | |
2781 | return LinkJumpCompareAndBranch; | |
2782 | case JumpTestBitFixedSize: | |
2783 | return LinkJumpTestBit; | |
2784 | case JumpNoCondition: | |
2785 | return LinkJumpNoCondition; | |
2786 | case JumpCondition: { | |
2787 | ASSERT(!(reinterpret_cast<intptr_t>(from) & 0x3)); | |
2788 | ASSERT(!(reinterpret_cast<intptr_t>(to) & 0x3)); | |
2789 | intptr_t relative = reinterpret_cast<intptr_t>(to) - (reinterpret_cast<intptr_t>(from)); | |
2790 | ||
2791 | if (((relative << 43) >> 43) == relative) | |
2792 | return LinkJumpConditionDirect; | |
2793 | ||
2794 | return LinkJumpCondition; | |
2795 | } | |
2796 | case JumpCompareAndBranch: { | |
2797 | ASSERT(!(reinterpret_cast<intptr_t>(from) & 0x3)); | |
2798 | ASSERT(!(reinterpret_cast<intptr_t>(to) & 0x3)); | |
2799 | intptr_t relative = reinterpret_cast<intptr_t>(to) - (reinterpret_cast<intptr_t>(from)); | |
2800 | ||
2801 | if (((relative << 43) >> 43) == relative) | |
2802 | return LinkJumpCompareAndBranchDirect; | |
2803 | ||
2804 | return LinkJumpCompareAndBranch; | |
2805 | } | |
2806 | case JumpTestBit: { | |
2807 | ASSERT(!(reinterpret_cast<intptr_t>(from) & 0x3)); | |
2808 | ASSERT(!(reinterpret_cast<intptr_t>(to) & 0x3)); | |
2809 | intptr_t relative = reinterpret_cast<intptr_t>(to) - (reinterpret_cast<intptr_t>(from)); | |
2810 | ||
2811 | if (((relative << 50) >> 50) == relative) | |
2812 | return LinkJumpTestBitDirect; | |
2813 | ||
2814 | return LinkJumpTestBit; | |
2815 | } | |
2816 | default: | |
2817 | ASSERT_NOT_REACHED(); | |
2818 | } | |
2819 | ||
2820 | return LinkJumpNoCondition; | |
2821 | } | |
2822 | ||
2823 | JumpLinkType computeJumpType(LinkRecord& record, const uint8_t* from, const uint8_t* to) | |
2824 | { | |
2825 | JumpLinkType linkType = computeJumpType(record.type(), from, to); | |
2826 | record.setLinkType(linkType); | |
2827 | return linkType; | |
2828 | } | |
2829 | ||
2830 | void recordLinkOffsets(int32_t regionStart, int32_t regionEnd, int32_t offset) | |
2831 | { | |
2832 | int32_t ptr = regionStart / sizeof(int32_t); | |
2833 | const int32_t end = regionEnd / sizeof(int32_t); | |
2834 | int32_t* offsets = static_cast<int32_t*>(m_buffer.data()); | |
2835 | while (ptr < end) | |
2836 | offsets[ptr++] = offset; | |
2837 | } | |
2838 | ||
2839 | Vector<LinkRecord, 0, UnsafeVectorOverflow>& jumpsToLink() | |
2840 | { | |
2841 | std::sort(m_jumpsToLink.begin(), m_jumpsToLink.end(), linkRecordSourceComparator); | |
2842 | return m_jumpsToLink; | |
2843 | } | |
2844 | ||
2845 | void ALWAYS_INLINE link(LinkRecord& record, uint8_t* from, uint8_t* to) | |
2846 | { | |
2847 | switch (record.linkType()) { | |
2848 | case LinkJumpNoCondition: | |
2849 | linkJumpOrCall<false>(reinterpret_cast<int*>(from), to); | |
2850 | break; | |
2851 | case LinkJumpConditionDirect: | |
2852 | linkConditionalBranch<true>(record.condition(), reinterpret_cast<int*>(from), to); | |
2853 | break; | |
2854 | case LinkJumpCondition: | |
2855 | linkConditionalBranch<false>(record.condition(), reinterpret_cast<int*>(from) - 1, to); | |
2856 | break; | |
2857 | case LinkJumpCompareAndBranchDirect: | |
2858 | linkCompareAndBranch<true>(record.condition(), record.is64Bit(), record.compareRegister(), reinterpret_cast<int*>(from), to); | |
2859 | break; | |
2860 | case LinkJumpCompareAndBranch: | |
2861 | linkCompareAndBranch<false>(record.condition(), record.is64Bit(), record.compareRegister(), reinterpret_cast<int*>(from) - 1, to); | |
2862 | break; | |
2863 | case LinkJumpTestBitDirect: | |
2864 | linkTestAndBranch<true>(record.condition(), record.bitNumber(), record.compareRegister(), reinterpret_cast<int*>(from), to); | |
2865 | break; | |
2866 | case LinkJumpTestBit: | |
2867 | linkTestAndBranch<false>(record.condition(), record.bitNumber(), record.compareRegister(), reinterpret_cast<int*>(from) - 1, to); | |
2868 | break; | |
2869 | default: | |
2870 | ASSERT_NOT_REACHED(); | |
2871 | break; | |
2872 | } | |
2873 | } | |
2874 | ||
2875 | private: | |
2876 | template<Datasize size> | |
2877 | static bool checkMovk(int insn, int _hw, RegisterID _rd) | |
2878 | { | |
2879 | Datasize sf; | |
2880 | MoveWideOp opc; | |
2881 | int hw; | |
2882 | uint16_t imm16; | |
2883 | RegisterID rd; | |
2884 | bool expected = disassembleMoveWideImediate(&insn, sf, opc, hw, imm16, rd); | |
2885 | ||
2886 | return expected && | |
2887 | sf == size && | |
2888 | opc == MoveWideOp_K && | |
2889 | hw == _hw && | |
2890 | rd == _rd; | |
2891 | } | |
2892 | ||
2893 | static void linkPointer(int* address, void* valuePtr, bool flush = false) | |
2894 | { | |
2895 | Datasize sf; | |
2896 | MoveWideOp opc; | |
2897 | int hw; | |
2898 | uint16_t imm16; | |
2899 | RegisterID rd; | |
2900 | bool expected = disassembleMoveWideImediate(address, sf, opc, hw, imm16, rd); | |
2901 | ASSERT_UNUSED(expected, expected && sf && opc == MoveWideOp_Z && !hw); | |
2902 | ASSERT(checkMovk<Datasize_64>(address[1], 1, rd)); | |
2903 | ASSERT(checkMovk<Datasize_64>(address[2], 2, rd)); | |
2904 | ||
2905 | setPointer(address, valuePtr, rd, flush); | |
2906 | } | |
2907 | ||
2908 | template<bool isCall> | |
2909 | static void linkJumpOrCall(int* from, void* to) | |
2910 | { | |
2911 | bool link; | |
2912 | int imm26; | |
2913 | bool isUnconditionalBranchImmediateOrNop = disassembleUnconditionalBranchImmediate(from, link, imm26) || disassembleNop(from); | |
2914 | ||
2915 | ASSERT_UNUSED(isUnconditionalBranchImmediateOrNop, isUnconditionalBranchImmediateOrNop); | |
2916 | ASSERT_UNUSED(isCall, (link == isCall) || disassembleNop(from)); | |
2917 | ASSERT(!(reinterpret_cast<intptr_t>(from) & 3)); | |
2918 | ASSERT(!(reinterpret_cast<intptr_t>(to) & 3)); | |
2919 | intptr_t offset = (reinterpret_cast<intptr_t>(to) - reinterpret_cast<intptr_t>(from)) >> 2; | |
2920 | ASSERT(static_cast<int>(offset) == offset); | |
2921 | ||
2922 | *from = unconditionalBranchImmediate(isCall, static_cast<int>(offset)); | |
2923 | } | |
2924 | ||
2925 | template<bool isDirect> | |
2926 | static void linkCompareAndBranch(Condition condition, bool is64Bit, RegisterID rt, int* from, void* to) | |
2927 | { | |
2928 | ASSERT(!(reinterpret_cast<intptr_t>(from) & 3)); | |
2929 | ASSERT(!(reinterpret_cast<intptr_t>(to) & 3)); | |
2930 | intptr_t offset = (reinterpret_cast<intptr_t>(to) - reinterpret_cast<intptr_t>(from)) >> 2; | |
2931 | ASSERT(((offset << 38) >> 38) == offset); | |
2932 | ||
2933 | bool useDirect = ((offset << 45) >> 45) == offset; // Fits in 19 bits | |
2934 | ASSERT(!isDirect || useDirect); | |
2935 | ||
2936 | if (useDirect || isDirect) { | |
2937 | *from = compareAndBranchImmediate(is64Bit ? Datasize_64 : Datasize_32, condition == ConditionNE, static_cast<int>(offset), rt); | |
2938 | if (!isDirect) | |
2939 | *(from + 1) = nopPseudo(); | |
2940 | } else { | |
2941 | *from = compareAndBranchImmediate(is64Bit ? Datasize_64 : Datasize_32, invert(condition) == ConditionNE, 2, rt); | |
2942 | linkJumpOrCall<false>(from + 1, to); | |
2943 | } | |
2944 | } | |
2945 | ||
2946 | template<bool isDirect> | |
2947 | static void linkConditionalBranch(Condition condition, int* from, void* to) | |
2948 | { | |
2949 | ASSERT(!(reinterpret_cast<intptr_t>(from) & 3)); | |
2950 | ASSERT(!(reinterpret_cast<intptr_t>(to) & 3)); | |
2951 | intptr_t offset = (reinterpret_cast<intptr_t>(to) - reinterpret_cast<intptr_t>(from)) >> 2; | |
2952 | ASSERT(((offset << 38) >> 38) == offset); | |
2953 | ||
2954 | bool useDirect = ((offset << 45) >> 45) == offset; // Fits in 19 bits | |
2955 | ASSERT(!isDirect || useDirect); | |
2956 | ||
2957 | if (useDirect || isDirect) { | |
2958 | *from = conditionalBranchImmediate(static_cast<int>(offset), condition); | |
2959 | if (!isDirect) | |
2960 | *(from + 1) = nopPseudo(); | |
2961 | } else { | |
2962 | *from = conditionalBranchImmediate(2, invert(condition)); | |
2963 | linkJumpOrCall<false>(from + 1, to); | |
2964 | } | |
2965 | } | |
2966 | ||
2967 | template<bool isDirect> | |
2968 | static void linkTestAndBranch(Condition condition, unsigned bitNumber, RegisterID rt, int* from, void* to) | |
2969 | { | |
2970 | ASSERT(!(reinterpret_cast<intptr_t>(from) & 3)); | |
2971 | ASSERT(!(reinterpret_cast<intptr_t>(to) & 3)); | |
2972 | intptr_t offset = (reinterpret_cast<intptr_t>(to) - reinterpret_cast<intptr_t>(from)) >> 2; | |
2973 | ASSERT(static_cast<int>(offset) == offset); | |
2974 | ASSERT(((offset << 38) >> 38) == offset); | |
2975 | ||
2976 | bool useDirect = ((offset << 50) >> 50) == offset; // Fits in 14 bits | |
2977 | ASSERT(!isDirect || useDirect); | |
2978 | ||
2979 | if (useDirect || isDirect) { | |
2980 | *from = testAndBranchImmediate(condition == ConditionNE, static_cast<int>(bitNumber), static_cast<int>(offset), rt); | |
2981 | if (!isDirect) | |
2982 | *(from + 1) = nopPseudo(); | |
2983 | } else { | |
2984 | *from = testAndBranchImmediate(invert(condition) == ConditionNE, static_cast<int>(bitNumber), 2, rt); | |
2985 | linkJumpOrCall<false>(from + 1, to); | |
2986 | } | |
2987 | } | |
2988 | ||
2989 | template<bool isCall> | |
2990 | static void relinkJumpOrCall(int* from, void* to) | |
2991 | { | |
2992 | if (!isCall && disassembleNop(from)) { | |
2993 | unsigned op01; | |
2994 | int imm19; | |
2995 | Condition condition; | |
2996 | bool isConditionalBranchImmediate = disassembleConditionalBranchImmediate(from - 1, op01, imm19, condition); | |
2997 | ||
2998 | if (isConditionalBranchImmediate) { | |
2999 | ASSERT_UNUSED(op01, !op01); | |
3000 | ASSERT_UNUSED(isCall, !isCall); | |
3001 | ||
3002 | if (imm19 == 8) | |
3003 | condition = invert(condition); | |
3004 | ||
3005 | linkConditionalBranch<false>(condition, from - 1, to); | |
3006 | return; | |
3007 | } | |
3008 | ||
3009 | Datasize opSize; | |
3010 | bool op; | |
3011 | RegisterID rt; | |
3012 | bool isCompareAndBranchImmediate = disassembleCompareAndBranchImmediate(from - 1, opSize, op, imm19, rt); | |
3013 | ||
3014 | if (isCompareAndBranchImmediate) { | |
3015 | if (imm19 == 8) | |
3016 | op = !op; | |
3017 | ||
3018 | linkCompareAndBranch<false>(op ? ConditionNE : ConditionEQ, opSize == Datasize_64, rt, from - 1, to); | |
3019 | return; | |
3020 | } | |
3021 | ||
3022 | int imm14; | |
3023 | unsigned bitNumber; | |
3024 | bool isTestAndBranchImmediate = disassembleTestAndBranchImmediate(from - 1, op, bitNumber, imm14, rt); | |
3025 | ||
3026 | if (isTestAndBranchImmediate) { | |
3027 | if (imm14 == 8) | |
3028 | op = !op; | |
3029 | ||
3030 | linkTestAndBranch<false>(op ? ConditionNE : ConditionEQ, bitNumber, rt, from - 1, to); | |
3031 | return; | |
3032 | } | |
3033 | } | |
3034 | ||
3035 | linkJumpOrCall<isCall>(from, to); | |
3036 | } | |
3037 | ||
3038 | static int* addressOf(void* code, AssemblerLabel label) | |
3039 | { | |
3040 | return reinterpret_cast<int*>(static_cast<char*>(code) + label.m_offset); | |
3041 | } | |
3042 | ||
3043 | int* addressOf(AssemblerLabel label) | |
3044 | { | |
3045 | return addressOf(m_buffer.data(), label); | |
3046 | } | |
3047 | ||
3048 | static RegisterID disassembleXOrSp(int reg) { return reg == 31 ? ARM64Registers::sp : static_cast<RegisterID>(reg); } | |
3049 | static RegisterID disassembleXOrZr(int reg) { return reg == 31 ? ARM64Registers::zr : static_cast<RegisterID>(reg); } | |
3050 | static RegisterID disassembleXOrZrOrSp(bool useZr, int reg) { return reg == 31 ? (useZr ? ARM64Registers::zr : ARM64Registers::sp) : static_cast<RegisterID>(reg); } | |
3051 | ||
3052 | static bool disassembleAddSubtractImmediate(void* address, Datasize& sf, AddOp& op, SetFlags& S, int& shift, int& imm12, RegisterID& rn, RegisterID& rd) | |
3053 | { | |
3054 | int insn = *static_cast<int*>(address); | |
3055 | sf = static_cast<Datasize>((insn >> 31) & 1); | |
3056 | op = static_cast<AddOp>((insn >> 30) & 1); | |
3057 | S = static_cast<SetFlags>((insn >> 29) & 1); | |
3058 | shift = (insn >> 22) & 3; | |
3059 | imm12 = (insn >> 10) & 0x3ff; | |
3060 | rn = disassembleXOrSp((insn >> 5) & 0x1f); | |
3061 | rd = disassembleXOrZrOrSp(S, insn & 0x1f); | |
3062 | return (insn & 0x1f000000) == 0x11000000; | |
3063 | } | |
3064 | ||
3065 | static bool disassembleLoadStoreRegisterUnsignedImmediate(void* address, MemOpSize& size, bool& V, MemOp& opc, int& imm12, RegisterID& rn, RegisterID& rt) | |
3066 | { | |
3067 | int insn = *static_cast<int*>(address); | |
3068 | size = static_cast<MemOpSize>((insn >> 30) & 3); | |
3069 | V = (insn >> 26) & 1; | |
3070 | opc = static_cast<MemOp>((insn >> 22) & 3); | |
3071 | imm12 = (insn >> 10) & 0xfff; | |
3072 | rn = disassembleXOrSp((insn >> 5) & 0x1f); | |
3073 | rt = disassembleXOrZr(insn & 0x1f); | |
3074 | return (insn & 0x3b000000) == 0x39000000; | |
3075 | } | |
3076 | ||
3077 | static bool disassembleMoveWideImediate(void* address, Datasize& sf, MoveWideOp& opc, int& hw, uint16_t& imm16, RegisterID& rd) | |
3078 | { | |
3079 | int insn = *static_cast<int*>(address); | |
3080 | sf = static_cast<Datasize>((insn >> 31) & 1); | |
3081 | opc = static_cast<MoveWideOp>((insn >> 29) & 3); | |
3082 | hw = (insn >> 21) & 3; | |
3083 | imm16 = insn >> 5; | |
3084 | rd = disassembleXOrZr(insn & 0x1f); | |
3085 | return (insn & 0x1f800000) == 0x12800000; | |
3086 | } | |
3087 | ||
3088 | static bool disassembleNop(void* address) | |
3089 | { | |
3090 | unsigned int insn = *static_cast<unsigned int*>(address); | |
3091 | return insn == 0xd503201f; | |
3092 | } | |
3093 | ||
3094 | static bool disassembleCompareAndBranchImmediate(void* address, Datasize& sf, bool& op, int& imm19, RegisterID& rt) | |
3095 | { | |
3096 | int insn = *static_cast<int*>(address); | |
3097 | sf = static_cast<Datasize>((insn >> 31) & 1); | |
3098 | op = (insn >> 24) & 0x1; | |
3099 | imm19 = (insn << 8) >> 13; | |
3100 | rt = static_cast<RegisterID>(insn & 0x1f); | |
3101 | return (insn & 0x7e000000) == 0x34000000; | |
3102 | ||
3103 | } | |
3104 | ||
3105 | static bool disassembleConditionalBranchImmediate(void* address, unsigned& op01, int& imm19, Condition &condition) | |
3106 | { | |
3107 | int insn = *static_cast<int*>(address); | |
3108 | op01 = ((insn >> 23) & 0x2) | ((insn >> 4) & 0x1); | |
3109 | imm19 = (insn << 8) >> 13; | |
3110 | condition = static_cast<Condition>(insn & 0xf); | |
3111 | return (insn & 0xfe000000) == 0x54000000; | |
3112 | } | |
3113 | ||
3114 | static bool disassembleTestAndBranchImmediate(void* address, bool& op, unsigned& bitNumber, int& imm14, RegisterID& rt) | |
3115 | { | |
3116 | int insn = *static_cast<int*>(address); | |
3117 | op = (insn >> 24) & 0x1; | |
3118 | imm14 = (insn << 13) >> 18; | |
3119 | bitNumber = static_cast<unsigned>((((insn >> 26) & 0x20)) | ((insn > 19) & 0x1f)); | |
3120 | rt = static_cast<RegisterID>(insn & 0x1f); | |
3121 | return (insn & 0x7e000000) == 0x36000000; | |
3122 | ||
3123 | } | |
3124 | ||
3125 | static bool disassembleUnconditionalBranchImmediate(void* address, bool& op, int& imm26) | |
3126 | { | |
3127 | int insn = *static_cast<int*>(address); | |
3128 | op = (insn >> 31) & 1; | |
3129 | imm26 = (insn << 6) >> 6; | |
3130 | return (insn & 0x7c000000) == 0x14000000; | |
3131 | } | |
3132 | ||
3133 | static int xOrSp(RegisterID reg) { ASSERT(!isZr(reg)); return reg; } | |
3134 | static int xOrZr(RegisterID reg) { ASSERT(!isSp(reg)); return reg & 31; } | |
3135 | static FPRegisterID xOrZrAsFPR(RegisterID reg) { return static_cast<FPRegisterID>(xOrZr(reg)); } | |
3136 | static int xOrZrOrSp(bool useZr, RegisterID reg) { return useZr ? xOrZr(reg) : xOrSp(reg); } | |
3137 | ||
3138 | ALWAYS_INLINE void insn(int instruction) | |
3139 | { | |
3140 | m_buffer.putInt(instruction); | |
3141 | } | |
3142 | ||
3143 | ALWAYS_INLINE static int addSubtractExtendedRegister(Datasize sf, AddOp op, SetFlags S, RegisterID rm, ExtendType option, int imm3, RegisterID rn, RegisterID rd) | |
3144 | { | |
3145 | ASSERT(imm3 < 5); | |
3146 | // The only allocated values for opt is 0. | |
3147 | const int opt = 0; | |
3148 | return (0x0b200000 | sf << 31 | op << 30 | S << 29 | opt << 22 | xOrZr(rm) << 16 | option << 13 | (imm3 & 0x7) << 10 | xOrSp(rn) << 5 | xOrZrOrSp(S, rd)); | |
3149 | } | |
3150 | ||
3151 | ALWAYS_INLINE static int addSubtractImmediate(Datasize sf, AddOp op, SetFlags S, int shift, int imm12, RegisterID rn, RegisterID rd) | |
3152 | { | |
3153 | ASSERT(shift < 2); | |
3154 | ASSERT(isUInt12(imm12)); | |
3155 | return (0x11000000 | sf << 31 | op << 30 | S << 29 | shift << 22 | (imm12 & 0xfff) << 10 | xOrSp(rn) << 5 | xOrZrOrSp(S, rd)); | |
3156 | } | |
3157 | ||
3158 | ALWAYS_INLINE static int addSubtractShiftedRegister(Datasize sf, AddOp op, SetFlags S, ShiftType shift, RegisterID rm, int imm6, RegisterID rn, RegisterID rd) | |
3159 | { | |
3160 | ASSERT(shift < 3); | |
3161 | ASSERT(!(imm6 & (sf ? ~63 : ~31))); | |
3162 | return (0x0b000000 | sf << 31 | op << 30 | S << 29 | shift << 22 | xOrZr(rm) << 16 | (imm6 & 0x3f) << 10 | xOrZr(rn) << 5 | xOrZr(rd)); | |
3163 | } | |
3164 | ||
3165 | ALWAYS_INLINE static int addSubtractWithCarry(Datasize sf, AddOp op, SetFlags S, RegisterID rm, RegisterID rn, RegisterID rd) | |
3166 | { | |
3167 | const int opcode2 = 0; | |
3168 | return (0x1a000000 | sf << 31 | op << 30 | S << 29 | xOrZr(rm) << 16 | opcode2 << 10 | xOrZr(rn) << 5 | xOrZr(rd)); | |
3169 | } | |
3170 | ||
3171 | ALWAYS_INLINE static int bitfield(Datasize sf, BitfieldOp opc, int immr, int imms, RegisterID rn, RegisterID rd) | |
3172 | { | |
3173 | ASSERT(immr < (sf ? 64 : 32)); | |
3174 | ASSERT(imms < (sf ? 64 : 32)); | |
3175 | const int N = sf; | |
3176 | return (0x13000000 | sf << 31 | opc << 29 | N << 22 | immr << 16 | imms << 10 | xOrZr(rn) << 5 | xOrZr(rd)); | |
3177 | } | |
3178 | ||
3179 | // 'op' means negate | |
3180 | ALWAYS_INLINE static int compareAndBranchImmediate(Datasize sf, bool op, int32_t imm19, RegisterID rt) | |
3181 | { | |
3182 | ASSERT(imm19 == (imm19 << 13) >> 13); | |
3183 | return (0x34000000 | sf << 31 | op << 24 | (imm19 & 0x7ffff) << 5 | xOrZr(rt)); | |
3184 | } | |
3185 | ||
3186 | ALWAYS_INLINE static int conditionalBranchImmediate(int32_t imm19, Condition cond) | |
3187 | { | |
3188 | ASSERT(imm19 == (imm19 << 13) >> 13); | |
3189 | ASSERT(!(cond & ~15)); | |
3190 | // The only allocated values for o1 & o0 are 0. | |
3191 | const int o1 = 0; | |
3192 | const int o0 = 0; | |
3193 | return (0x54000000 | o1 << 24 | (imm19 & 0x7ffff) << 5 | o0 << 4 | cond); | |
3194 | } | |
3195 | ||
3196 | ALWAYS_INLINE static int conditionalCompareImmediate(Datasize sf, AddOp op, int imm5, Condition cond, RegisterID rn, int nzcv) | |
3197 | { | |
3198 | ASSERT(!(imm5 & ~0x1f)); | |
3199 | ASSERT(nzcv < 16); | |
3200 | const int S = 1; | |
3201 | const int o2 = 0; | |
3202 | const int o3 = 0; | |
3203 | return (0x1a400800 | sf << 31 | op << 30 | S << 29 | (imm5 & 0x1f) << 16 | cond << 12 | o2 << 10 | xOrZr(rn) << 5 | o3 << 4 | nzcv); | |
3204 | } | |
3205 | ||
3206 | ALWAYS_INLINE static int conditionalCompareRegister(Datasize sf, AddOp op, RegisterID rm, Condition cond, RegisterID rn, int nzcv) | |
3207 | { | |
3208 | ASSERT(nzcv < 16); | |
3209 | const int S = 1; | |
3210 | const int o2 = 0; | |
3211 | const int o3 = 0; | |
3212 | return (0x1a400000 | sf << 31 | op << 30 | S << 29 | xOrZr(rm) << 16 | cond << 12 | o2 << 10 | xOrZr(rn) << 5 | o3 << 4 | nzcv); | |
3213 | } | |
3214 | ||
3215 | // 'op' means negate | |
3216 | // 'op2' means increment | |
3217 | ALWAYS_INLINE static int conditionalSelect(Datasize sf, bool op, RegisterID rm, Condition cond, bool op2, RegisterID rn, RegisterID rd) | |
3218 | { | |
3219 | const int S = 0; | |
3220 | return (0x1a800000 | sf << 31 | op << 30 | S << 29 | xOrZr(rm) << 16 | cond << 12 | op2 << 10 | xOrZr(rn) << 5 | xOrZr(rd)); | |
3221 | } | |
3222 | ||
3223 | ALWAYS_INLINE static int dataProcessing1Source(Datasize sf, DataOp1Source opcode, RegisterID rn, RegisterID rd) | |
3224 | { | |
3225 | const int S = 0; | |
3226 | const int opcode2 = 0; | |
3227 | return (0x5ac00000 | sf << 31 | S << 29 | opcode2 << 16 | opcode << 10 | xOrZr(rn) << 5 | xOrZr(rd)); | |
3228 | } | |
3229 | ||
3230 | ALWAYS_INLINE static int dataProcessing2Source(Datasize sf, RegisterID rm, DataOp2Source opcode, RegisterID rn, RegisterID rd) | |
3231 | { | |
3232 | const int S = 0; | |
3233 | return (0x1ac00000 | sf << 31 | S << 29 | xOrZr(rm) << 16 | opcode << 10 | xOrZr(rn) << 5 | xOrZr(rd)); | |
3234 | } | |
3235 | ||
3236 | ALWAYS_INLINE static int dataProcessing3Source(Datasize sf, DataOp3Source opcode, RegisterID rm, RegisterID ra, RegisterID rn, RegisterID rd) | |
3237 | { | |
3238 | int op54 = opcode >> 4; | |
3239 | int op31 = (opcode >> 1) & 7; | |
3240 | int op0 = opcode & 1; | |
3241 | return (0x1b000000 | sf << 31 | op54 << 29 | op31 << 21 | xOrZr(rm) << 16 | op0 << 15 | xOrZr(ra) << 10 | xOrZr(rn) << 5 | xOrZr(rd)); | |
3242 | } | |
3243 | ||
3244 | ALWAYS_INLINE static int excepnGeneration(ExcepnOp opc, uint16_t imm16, int LL) | |
3245 | { | |
3246 | ASSERT((opc == ExcepnOp_BREAKPOINT || opc == ExcepnOp_HALT) ? !LL : (LL && (LL < 4))); | |
3247 | const int op2 = 0; | |
3248 | return (0xd4000000 | opc << 21 | imm16 << 5 | op2 << 2 | LL); | |
3249 | } | |
3250 | ||
3251 | ALWAYS_INLINE static int extract(Datasize sf, RegisterID rm, int imms, RegisterID rn, RegisterID rd) | |
3252 | { | |
3253 | ASSERT(imms < (sf ? 64 : 32)); | |
3254 | const int op21 = 0; | |
3255 | const int N = sf; | |
3256 | const int o0 = 0; | |
3257 | return (0x13800000 | sf << 31 | op21 << 29 | N << 22 | o0 << 21 | xOrZr(rm) << 16 | imms << 10 | xOrZr(rn) << 5 | xOrZr(rd)); | |
3258 | } | |
3259 | ||
3260 | ALWAYS_INLINE static int floatingPointCompare(Datasize type, FPRegisterID rm, FPRegisterID rn, FPCmpOp opcode2) | |
3261 | { | |
3262 | const int M = 0; | |
3263 | const int S = 0; | |
3264 | const int op = 0; | |
3265 | return (0x1e202000 | M << 31 | S << 29 | type << 22 | rm << 16 | op << 14 | rn << 5 | opcode2); | |
3266 | } | |
3267 | ||
3268 | ALWAYS_INLINE static int floatingPointConditionalCompare(Datasize type, FPRegisterID rm, Condition cond, FPRegisterID rn, FPCondCmpOp op, int nzcv) | |
3269 | { | |
3270 | ASSERT(nzcv < 16); | |
3271 | const int M = 0; | |
3272 | const int S = 0; | |
3273 | return (0x1e200400 | M << 31 | S << 29 | type << 22 | rm << 16 | cond << 12 | rn << 5 | op << 4 | nzcv); | |
3274 | } | |
3275 | ||
3276 | ALWAYS_INLINE static int floatingPointConditionalSelect(Datasize type, FPRegisterID rm, Condition cond, FPRegisterID rn, FPRegisterID rd) | |
3277 | { | |
3278 | const int M = 0; | |
3279 | const int S = 0; | |
3280 | return (0x1e200c00 | M << 31 | S << 29 | type << 22 | rm << 16 | cond << 12 | rn << 5 | rd); | |
3281 | } | |
3282 | ||
3283 | ALWAYS_INLINE static int floatingPointImmediate(Datasize type, int imm8, FPRegisterID rd) | |
3284 | { | |
3285 | const int M = 0; | |
3286 | const int S = 0; | |
3287 | const int imm5 = 0; | |
3288 | return (0x1e201000 | M << 31 | S << 29 | type << 22 | (imm8 & 0xff) << 13 | imm5 << 5 | rd); | |
3289 | } | |
3290 | ||
3291 | ALWAYS_INLINE static int floatingPointIntegerConversions(Datasize sf, Datasize type, FPIntConvOp rmodeOpcode, FPRegisterID rn, FPRegisterID rd) | |
3292 | { | |
3293 | const int S = 0; | |
3294 | return (0x1e200000 | sf << 31 | S << 29 | type << 22 | rmodeOpcode << 16 | rn << 5 | rd); | |
3295 | } | |
3296 | ||
3297 | ALWAYS_INLINE static int floatingPointIntegerConversions(Datasize sf, Datasize type, FPIntConvOp rmodeOpcode, FPRegisterID rn, RegisterID rd) | |
3298 | { | |
3299 | return floatingPointIntegerConversions(sf, type, rmodeOpcode, rn, xOrZrAsFPR(rd)); | |
3300 | } | |
3301 | ||
3302 | ALWAYS_INLINE static int floatingPointIntegerConversions(Datasize sf, Datasize type, FPIntConvOp rmodeOpcode, RegisterID rn, FPRegisterID rd) | |
3303 | { | |
3304 | return floatingPointIntegerConversions(sf, type, rmodeOpcode, xOrZrAsFPR(rn), rd); | |
3305 | } | |
3306 | ||
3307 | ALWAYS_INLINE static int floatingPointDataProcessing1Source(Datasize type, FPDataOp1Source opcode, FPRegisterID rn, FPRegisterID rd) | |
3308 | { | |
3309 | const int M = 0; | |
3310 | const int S = 0; | |
3311 | return (0x1e204000 | M << 31 | S << 29 | type << 22 | opcode << 15 | rn << 5 | rd); | |
3312 | } | |
3313 | ||
3314 | ALWAYS_INLINE static int floatingPointDataProcessing2Source(Datasize type, FPRegisterID rm, FPDataOp2Source opcode, FPRegisterID rn, FPRegisterID rd) | |
3315 | { | |
3316 | const int M = 0; | |
3317 | const int S = 0; | |
3318 | return (0x1e200800 | M << 31 | S << 29 | type << 22 | rm << 16 | opcode << 12 | rn << 5 | rd); | |
3319 | } | |
3320 | ||
3321 | // 'o1' means negate | |
3322 | ALWAYS_INLINE static int floatingPointDataProcessing3Source(Datasize type, bool o1, FPRegisterID rm, AddOp o2, FPRegisterID ra, FPRegisterID rn, FPRegisterID rd) | |
3323 | { | |
3324 | const int M = 0; | |
3325 | const int S = 0; | |
3326 | return (0x1f000000 | M << 31 | S << 29 | type << 22 | o1 << 21 | rm << 16 | o2 << 15 | ra << 10 | rn << 5 | rd); | |
3327 | } | |
3328 | ||
3329 | // 'V' means vector | |
3330 | ALWAYS_INLINE static int loadRegisterLiteral(LdrLiteralOp opc, bool V, int imm19, FPRegisterID rt) | |
3331 | { | |
3332 | ASSERT(((imm19 << 13) >> 13) == imm19); | |
3333 | return (0x18000000 | opc << 30 | V << 26 | (imm19 & 0x7ffff) << 5 | rt); | |
3334 | } | |
3335 | ||
3336 | ALWAYS_INLINE static int loadRegisterLiteral(LdrLiteralOp opc, bool V, int imm19, RegisterID rt) | |
3337 | { | |
3338 | return loadRegisterLiteral(opc, V, imm19, xOrZrAsFPR(rt)); | |
3339 | } | |
3340 | ||
3341 | // 'V' means vector | |
3342 | ALWAYS_INLINE static int loadStoreRegisterPostIndex(MemOpSize size, bool V, MemOp opc, int imm9, RegisterID rn, FPRegisterID rt) | |
3343 | { | |
3344 | ASSERT(!(size && V && (opc & 2))); // Maximum vector size is 128 bits. | |
3345 | ASSERT(!((size & 2) && !V && (opc == 3))); // signed 32-bit load must be extending from 8/16 bits. | |
3346 | ASSERT(isInt9(imm9)); | |
3347 | return (0x38000400 | size << 30 | V << 26 | opc << 22 | (imm9 & 0x1ff) << 12 | xOrSp(rn) << 5 | rt); | |
3348 | } | |
3349 | ||
3350 | ALWAYS_INLINE static int loadStoreRegisterPostIndex(MemOpSize size, bool V, MemOp opc, int imm9, RegisterID rn, RegisterID rt) | |
3351 | { | |
3352 | return loadStoreRegisterPostIndex(size, V, opc, imm9, rn, xOrZrAsFPR(rt)); | |
3353 | } | |
3354 | ||
3355 | // 'V' means vector | |
3356 | ALWAYS_INLINE static int loadStoreRegisterPreIndex(MemOpSize size, bool V, MemOp opc, int imm9, RegisterID rn, FPRegisterID rt) | |
3357 | { | |
3358 | ASSERT(!(size && V && (opc & 2))); // Maximum vector size is 128 bits. | |
3359 | ASSERT(!((size & 2) && !V && (opc == 3))); // signed 32-bit load must be extending from 8/16 bits. | |
3360 | ASSERT(isInt9(imm9)); | |
3361 | return (0x38000c00 | size << 30 | V << 26 | opc << 22 | (imm9 & 0x1ff) << 12 | xOrSp(rn) << 5 | rt); | |
3362 | } | |
3363 | ||
3364 | ALWAYS_INLINE static int loadStoreRegisterPreIndex(MemOpSize size, bool V, MemOp opc, int imm9, RegisterID rn, RegisterID rt) | |
3365 | { | |
3366 | return loadStoreRegisterPreIndex(size, V, opc, imm9, rn, xOrZrAsFPR(rt)); | |
3367 | } | |
3368 | ||
3369 | // 'V' means vector | |
3370 | // 'S' means shift rm | |
3371 | ALWAYS_INLINE static int loadStoreRegisterRegisterOffset(MemOpSize size, bool V, MemOp opc, RegisterID rm, ExtendType option, bool S, RegisterID rn, FPRegisterID rt) | |
3372 | { | |
3373 | ASSERT(!(size && V && (opc & 2))); // Maximum vector size is 128 bits. | |
3374 | ASSERT(!((size & 2) && !V && (opc == 3))); // signed 32-bit load must be extending from 8/16 bits. | |
3375 | ASSERT(option & 2); // The ExtendType for the address must be 32/64 bit, signed or unsigned - not 8/16bit. | |
3376 | return (0x38200800 | size << 30 | V << 26 | opc << 22 | xOrZr(rm) << 16 | option << 13 | S << 12 | xOrSp(rn) << 5 | rt); | |
3377 | } | |
3378 | ||
3379 | ALWAYS_INLINE static int loadStoreRegisterRegisterOffset(MemOpSize size, bool V, MemOp opc, RegisterID rm, ExtendType option, bool S, RegisterID rn, RegisterID rt) | |
3380 | { | |
3381 | return loadStoreRegisterRegisterOffset(size, V, opc, rm, option, S, rn, xOrZrAsFPR(rt)); | |
3382 | } | |
3383 | ||
3384 | // 'V' means vector | |
3385 | ALWAYS_INLINE static int loadStoreRegisterUnscaledImmediate(MemOpSize size, bool V, MemOp opc, int imm9, RegisterID rn, FPRegisterID rt) | |
3386 | { | |
3387 | ASSERT(!(size && V && (opc & 2))); // Maximum vector size is 128 bits. | |
3388 | ASSERT(!((size & 2) && !V && (opc == 3))); // signed 32-bit load must be extending from 8/16 bits. | |
3389 | ASSERT(isInt9(imm9)); | |
3390 | return (0x38000000 | size << 30 | V << 26 | opc << 22 | (imm9 & 0x1ff) << 12 | xOrSp(rn) << 5 | rt); | |
3391 | } | |
3392 | ||
3393 | ALWAYS_INLINE static int loadStoreRegisterUnscaledImmediate(MemOpSize size, bool V, MemOp opc, int imm9, RegisterID rn, RegisterID rt) | |
3394 | { | |
3395 | ASSERT(isInt9(imm9)); | |
3396 | return loadStoreRegisterUnscaledImmediate(size, V, opc, imm9, rn, xOrZrAsFPR(rt)); | |
3397 | } | |
3398 | ||
3399 | // 'V' means vector | |
3400 | ALWAYS_INLINE static int loadStoreRegisterUnsignedImmediate(MemOpSize size, bool V, MemOp opc, int imm12, RegisterID rn, FPRegisterID rt) | |
3401 | { | |
3402 | ASSERT(!(size && V && (opc & 2))); // Maximum vector size is 128 bits. | |
3403 | ASSERT(!((size & 2) && !V && (opc == 3))); // signed 32-bit load must be extending from 8/16 bits. | |
3404 | ASSERT(isUInt12(imm12)); | |
3405 | return (0x39000000 | size << 30 | V << 26 | opc << 22 | (imm12 & 0xfff) << 10 | xOrSp(rn) << 5 | rt); | |
3406 | } | |
3407 | ||
3408 | ALWAYS_INLINE static int loadStoreRegisterUnsignedImmediate(MemOpSize size, bool V, MemOp opc, int imm12, RegisterID rn, RegisterID rt) | |
3409 | { | |
3410 | return loadStoreRegisterUnsignedImmediate(size, V, opc, imm12, rn, xOrZrAsFPR(rt)); | |
3411 | } | |
3412 | ||
3413 | ALWAYS_INLINE static int logicalImmediate(Datasize sf, LogicalOp opc, int N_immr_imms, RegisterID rn, RegisterID rd) | |
3414 | { | |
3415 | ASSERT(!(N_immr_imms & (sf ? ~0x1fff : ~0xfff))); | |
3416 | return (0x12000000 | sf << 31 | opc << 29 | N_immr_imms << 10 | xOrZr(rn) << 5 | xOrZrOrSp(opc == LogicalOp_ANDS, rd)); | |
3417 | } | |
3418 | ||
3419 | // 'N' means negate rm | |
3420 | ALWAYS_INLINE static int logicalShiftedRegister(Datasize sf, LogicalOp opc, ShiftType shift, bool N, RegisterID rm, int imm6, RegisterID rn, RegisterID rd) | |
3421 | { | |
3422 | ASSERT(!(imm6 & (sf ? ~63 : ~31))); | |
3423 | return (0x0a000000 | sf << 31 | opc << 29 | shift << 22 | N << 21 | xOrZr(rm) << 16 | (imm6 & 0x3f) << 10 | xOrZr(rn) << 5 | xOrZr(rd)); | |
3424 | } | |
3425 | ||
3426 | ALWAYS_INLINE static int moveWideImediate(Datasize sf, MoveWideOp opc, int hw, uint16_t imm16, RegisterID rd) | |
3427 | { | |
3428 | ASSERT(hw < (sf ? 4 : 2)); | |
3429 | return (0x12800000 | sf << 31 | opc << 29 | hw << 21 | (int)imm16 << 5 | xOrZr(rd)); | |
3430 | } | |
3431 | ||
3432 | // 'op' means link | |
3433 | ALWAYS_INLINE static int unconditionalBranchImmediate(bool op, int32_t imm26) | |
3434 | { | |
3435 | ASSERT(imm26 == (imm26 << 6) >> 6); | |
3436 | return (0x14000000 | op << 31 | (imm26 & 0x3ffffff)); | |
3437 | } | |
3438 | ||
3439 | // 'op' means page | |
3440 | ALWAYS_INLINE static int pcRelative(bool op, int32_t imm21, RegisterID rd) | |
3441 | { | |
3442 | ASSERT(imm21 == (imm21 << 11) >> 11); | |
3443 | int32_t immlo = imm21 & 3; | |
3444 | int32_t immhi = (imm21 >> 2) & 0x7ffff; | |
3445 | return (0x10000000 | op << 31 | immlo << 29 | immhi << 5 | xOrZr(rd)); | |
3446 | } | |
3447 | ||
3448 | ALWAYS_INLINE static int system(bool L, int op0, int op1, int crn, int crm, int op2, RegisterID rt) | |
3449 | { | |
3450 | return (0xd5000000 | L << 21 | op0 << 19 | op1 << 16 | crn << 12 | crm << 8 | op2 << 5 | xOrZr(rt)); | |
3451 | } | |
3452 | ||
3453 | ALWAYS_INLINE static int hintPseudo(int imm) | |
3454 | { | |
3455 | ASSERT(!(imm & ~0x7f)); | |
3456 | return system(0, 0, 3, 2, (imm >> 3) & 0xf, imm & 0x7, ARM64Registers::zr); | |
3457 | } | |
3458 | ||
3459 | ALWAYS_INLINE static int nopPseudo() | |
3460 | { | |
3461 | return hintPseudo(0); | |
3462 | } | |
3463 | ||
3464 | // 'op' means negate | |
3465 | ALWAYS_INLINE static int testAndBranchImmediate(bool op, int b50, int imm14, RegisterID rt) | |
3466 | { | |
3467 | ASSERT(!(b50 & ~0x3f)); | |
3468 | ASSERT(imm14 == (imm14 << 18) >> 18); | |
3469 | int b5 = b50 >> 5; | |
3470 | int b40 = b50 & 0x1f; | |
3471 | return (0x36000000 | b5 << 31 | op << 24 | b40 << 19 | (imm14 & 0x3fff) << 5 | xOrZr(rt)); | |
3472 | } | |
3473 | ||
3474 | ALWAYS_INLINE static int unconditionalBranchRegister(BranchType opc, RegisterID rn) | |
3475 | { | |
3476 | // The only allocated values for op2 is 0x1f, for op3 & op4 are 0. | |
3477 | const int op2 = 0x1f; | |
3478 | const int op3 = 0; | |
3479 | const int op4 = 0; | |
3480 | return (0xd6000000 | opc << 21 | op2 << 16 | op3 << 10 | xOrZr(rn) << 5 | op4); | |
3481 | } | |
3482 | ||
3483 | AssemblerBuffer m_buffer; | |
3484 | Vector<LinkRecord, 0, UnsafeVectorOverflow> m_jumpsToLink; | |
3485 | int m_indexOfLastWatchpoint; | |
3486 | int m_indexOfTailOfLastWatchpoint; | |
3487 | }; | |
3488 | ||
3489 | } // namespace JSC | |
3490 | ||
3491 | #undef CHECK_DATASIZE_OF | |
3492 | #undef DATASIZE_OF | |
3493 | #undef MEMOPSIZE_OF | |
3494 | #undef CHECK_DATASIZE | |
3495 | #undef DATASIZE | |
3496 | #undef MEMOPSIZE | |
3497 | #undef CHECK_FP_MEMOP_DATASIZE | |
3498 | ||
3499 | #endif // ENABLE(ASSEMBLER) && CPU(ARM64) | |
3500 | ||
3501 | #endif // ARMAssembler_h |