]> git.saurik.com Git - apple/javascriptcore.git/blob - assembler/MacroAssemblerARM64.h
JavaScriptCore-7601.1.46.3.tar.gz
[apple/javascriptcore.git] / assembler / MacroAssemblerARM64.h
1 /*
2 * Copyright (C) 2012, 2014 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26 #ifndef MacroAssemblerARM64_h
27 #define MacroAssemblerARM64_h
28
29 #if ENABLE(ASSEMBLER)
30
31 #include "ARM64Assembler.h"
32 #include "AbstractMacroAssembler.h"
33 #include <wtf/MathExtras.h>
34
35 namespace JSC {
36
37 class MacroAssemblerARM64 : public AbstractMacroAssembler<ARM64Assembler, MacroAssemblerARM64> {
38 static const RegisterID dataTempRegister = ARM64Registers::ip0;
39 static const RegisterID memoryTempRegister = ARM64Registers::ip1;
40 static const ARM64Registers::FPRegisterID fpTempRegister = ARM64Registers::q31;
41 static const ARM64Assembler::SetFlags S = ARM64Assembler::S;
42 static const intptr_t maskHalfWord0 = 0xffffl;
43 static const intptr_t maskHalfWord1 = 0xffff0000l;
44 static const intptr_t maskUpperWord = 0xffffffff00000000l;
45
46 // 4 instructions - 3 to load the function pointer, + blr.
47 static const ptrdiff_t REPATCH_OFFSET_CALL_TO_POINTER = -16;
48
49 public:
50 MacroAssemblerARM64()
51 : m_dataMemoryTempRegister(this, dataTempRegister)
52 , m_cachedMemoryTempRegister(this, memoryTempRegister)
53 , m_makeJumpPatchable(false)
54 {
55 }
56
57 typedef ARM64Assembler::LinkRecord LinkRecord;
58 typedef ARM64Assembler::JumpType JumpType;
59 typedef ARM64Assembler::JumpLinkType JumpLinkType;
60 typedef ARM64Assembler::Condition Condition;
61
62 static const ARM64Assembler::Condition DefaultCondition = ARM64Assembler::ConditionInvalid;
63 static const ARM64Assembler::JumpType DefaultJump = ARM64Assembler::JumpNoConditionFixedSize;
64
65 Vector<LinkRecord, 0, UnsafeVectorOverflow>& jumpsToLink() { return m_assembler.jumpsToLink(); }
66 void* unlinkedCode() { return m_assembler.unlinkedCode(); }
67 static bool canCompact(JumpType jumpType) { return ARM64Assembler::canCompact(jumpType); }
68 static JumpLinkType computeJumpType(JumpType jumpType, const uint8_t* from, const uint8_t* to) { return ARM64Assembler::computeJumpType(jumpType, from, to); }
69 static JumpLinkType computeJumpType(LinkRecord& record, const uint8_t* from, const uint8_t* to) { return ARM64Assembler::computeJumpType(record, from, to); }
70 static int jumpSizeDelta(JumpType jumpType, JumpLinkType jumpLinkType) { return ARM64Assembler::jumpSizeDelta(jumpType, jumpLinkType); }
71 static void link(LinkRecord& record, uint8_t* from, uint8_t* to) { return ARM64Assembler::link(record, from, to); }
72
73 static const Scale ScalePtr = TimesEight;
74
75 static bool isCompactPtrAlignedAddressOffset(ptrdiff_t value)
76 {
77 // This is the largest 32-bit access allowed, aligned to 64-bit boundary.
78 return !(value & ~0x3ff8);
79 }
80
81 enum RelationalCondition {
82 Equal = ARM64Assembler::ConditionEQ,
83 NotEqual = ARM64Assembler::ConditionNE,
84 Above = ARM64Assembler::ConditionHI,
85 AboveOrEqual = ARM64Assembler::ConditionHS,
86 Below = ARM64Assembler::ConditionLO,
87 BelowOrEqual = ARM64Assembler::ConditionLS,
88 GreaterThan = ARM64Assembler::ConditionGT,
89 GreaterThanOrEqual = ARM64Assembler::ConditionGE,
90 LessThan = ARM64Assembler::ConditionLT,
91 LessThanOrEqual = ARM64Assembler::ConditionLE
92 };
93
94 enum ResultCondition {
95 Overflow = ARM64Assembler::ConditionVS,
96 Signed = ARM64Assembler::ConditionMI,
97 PositiveOrZero = ARM64Assembler::ConditionPL,
98 Zero = ARM64Assembler::ConditionEQ,
99 NonZero = ARM64Assembler::ConditionNE
100 };
101
102 enum ZeroCondition {
103 IsZero = ARM64Assembler::ConditionEQ,
104 IsNonZero = ARM64Assembler::ConditionNE
105 };
106
107 enum DoubleCondition {
108 // These conditions will only evaluate to true if the comparison is ordered - i.e. neither operand is NaN.
109 DoubleEqual = ARM64Assembler::ConditionEQ,
110 DoubleNotEqual = ARM64Assembler::ConditionVC, // Not the right flag! check for this & handle differently.
111 DoubleGreaterThan = ARM64Assembler::ConditionGT,
112 DoubleGreaterThanOrEqual = ARM64Assembler::ConditionGE,
113 DoubleLessThan = ARM64Assembler::ConditionLO,
114 DoubleLessThanOrEqual = ARM64Assembler::ConditionLS,
115 // If either operand is NaN, these conditions always evaluate to true.
116 DoubleEqualOrUnordered = ARM64Assembler::ConditionVS, // Not the right flag! check for this & handle differently.
117 DoubleNotEqualOrUnordered = ARM64Assembler::ConditionNE,
118 DoubleGreaterThanOrUnordered = ARM64Assembler::ConditionHI,
119 DoubleGreaterThanOrEqualOrUnordered = ARM64Assembler::ConditionHS,
120 DoubleLessThanOrUnordered = ARM64Assembler::ConditionLT,
121 DoubleLessThanOrEqualOrUnordered = ARM64Assembler::ConditionLE,
122 };
123
124 static const RegisterID stackPointerRegister = ARM64Registers::sp;
125 static const RegisterID framePointerRegister = ARM64Registers::fp;
126 static const RegisterID linkRegister = ARM64Registers::lr;
127
128 // FIXME: Get reasonable implementations for these
129 static bool shouldBlindForSpecificArch(uint32_t value) { return value >= 0x00ffffff; }
130 static bool shouldBlindForSpecificArch(uint64_t value) { return value >= 0x00ffffff; }
131
132 // Integer operations:
133
134 void add32(RegisterID src, RegisterID dest)
135 {
136 m_assembler.add<32>(dest, dest, src);
137 }
138
139 void add32(TrustedImm32 imm, RegisterID dest)
140 {
141 add32(imm, dest, dest);
142 }
143
144 void add32(TrustedImm32 imm, RegisterID src, RegisterID dest)
145 {
146 if (isUInt12(imm.m_value))
147 m_assembler.add<32>(dest, src, UInt12(imm.m_value));
148 else if (isUInt12(-imm.m_value))
149 m_assembler.sub<32>(dest, src, UInt12(-imm.m_value));
150 else {
151 move(imm, getCachedDataTempRegisterIDAndInvalidate());
152 m_assembler.add<32>(dest, src, dataTempRegister);
153 }
154 }
155
156 void add32(TrustedImm32 imm, Address address)
157 {
158 load32(address, getCachedDataTempRegisterIDAndInvalidate());
159
160 if (isUInt12(imm.m_value))
161 m_assembler.add<32>(dataTempRegister, dataTempRegister, UInt12(imm.m_value));
162 else if (isUInt12(-imm.m_value))
163 m_assembler.sub<32>(dataTempRegister, dataTempRegister, UInt12(-imm.m_value));
164 else {
165 move(imm, getCachedMemoryTempRegisterIDAndInvalidate());
166 m_assembler.add<32>(dataTempRegister, dataTempRegister, memoryTempRegister);
167 }
168
169 store32(dataTempRegister, address);
170 }
171
172 void add32(TrustedImm32 imm, AbsoluteAddress address)
173 {
174 load32(address.m_ptr, getCachedDataTempRegisterIDAndInvalidate());
175
176 if (isUInt12(imm.m_value)) {
177 m_assembler.add<32>(dataTempRegister, dataTempRegister, UInt12(imm.m_value));
178 store32(dataTempRegister, address.m_ptr);
179 return;
180 }
181
182 if (isUInt12(-imm.m_value)) {
183 m_assembler.sub<32>(dataTempRegister, dataTempRegister, UInt12(-imm.m_value));
184 store32(dataTempRegister, address.m_ptr);
185 return;
186 }
187
188 move(imm, getCachedMemoryTempRegisterIDAndInvalidate());
189 m_assembler.add<32>(dataTempRegister, dataTempRegister, memoryTempRegister);
190 store32(dataTempRegister, address.m_ptr);
191 }
192
193 void add32(Address src, RegisterID dest)
194 {
195 load32(src, getCachedDataTempRegisterIDAndInvalidate());
196 add32(dataTempRegister, dest);
197 }
198
199 void add64(RegisterID src, RegisterID dest)
200 {
201 if (src == ARM64Registers::sp)
202 m_assembler.add<64>(dest, src, dest);
203 else
204 m_assembler.add<64>(dest, dest, src);
205 }
206
207 void add64(TrustedImm32 imm, RegisterID dest)
208 {
209 if (isUInt12(imm.m_value)) {
210 m_assembler.add<64>(dest, dest, UInt12(imm.m_value));
211 return;
212 }
213 if (isUInt12(-imm.m_value)) {
214 m_assembler.sub<64>(dest, dest, UInt12(-imm.m_value));
215 return;
216 }
217
218 signExtend32ToPtr(imm, getCachedDataTempRegisterIDAndInvalidate());
219 m_assembler.add<64>(dest, dest, dataTempRegister);
220 }
221
222 void add64(TrustedImm64 imm, RegisterID dest)
223 {
224 intptr_t immediate = imm.m_value;
225
226 if (isUInt12(immediate)) {
227 m_assembler.add<64>(dest, dest, UInt12(static_cast<int32_t>(immediate)));
228 return;
229 }
230 if (isUInt12(-immediate)) {
231 m_assembler.sub<64>(dest, dest, UInt12(static_cast<int32_t>(-immediate)));
232 return;
233 }
234
235 move(imm, getCachedDataTempRegisterIDAndInvalidate());
236 m_assembler.add<64>(dest, dest, dataTempRegister);
237 }
238
239 void add64(TrustedImm32 imm, RegisterID src, RegisterID dest)
240 {
241 if (isUInt12(imm.m_value)) {
242 m_assembler.add<64>(dest, src, UInt12(imm.m_value));
243 return;
244 }
245 if (isUInt12(-imm.m_value)) {
246 m_assembler.sub<64>(dest, src, UInt12(-imm.m_value));
247 return;
248 }
249
250 signExtend32ToPtr(imm, getCachedDataTempRegisterIDAndInvalidate());
251 m_assembler.add<64>(dest, src, dataTempRegister);
252 }
253
254 void add64(TrustedImm32 imm, Address address)
255 {
256 load64(address, getCachedDataTempRegisterIDAndInvalidate());
257
258 if (isUInt12(imm.m_value))
259 m_assembler.add<64>(dataTempRegister, dataTempRegister, UInt12(imm.m_value));
260 else if (isUInt12(-imm.m_value))
261 m_assembler.sub<64>(dataTempRegister, dataTempRegister, UInt12(-imm.m_value));
262 else {
263 signExtend32ToPtr(imm, getCachedMemoryTempRegisterIDAndInvalidate());
264 m_assembler.add<64>(dataTempRegister, dataTempRegister, memoryTempRegister);
265 }
266
267 store64(dataTempRegister, address);
268 }
269
270 void add64(TrustedImm32 imm, AbsoluteAddress address)
271 {
272 load64(address.m_ptr, getCachedDataTempRegisterIDAndInvalidate());
273
274 if (isUInt12(imm.m_value)) {
275 m_assembler.add<64>(dataTempRegister, dataTempRegister, UInt12(imm.m_value));
276 store64(dataTempRegister, address.m_ptr);
277 return;
278 }
279
280 if (isUInt12(-imm.m_value)) {
281 m_assembler.sub<64>(dataTempRegister, dataTempRegister, UInt12(-imm.m_value));
282 store64(dataTempRegister, address.m_ptr);
283 return;
284 }
285
286 signExtend32ToPtr(imm, getCachedMemoryTempRegisterIDAndInvalidate());
287 m_assembler.add<64>(dataTempRegister, dataTempRegister, memoryTempRegister);
288 store64(dataTempRegister, address.m_ptr);
289 }
290
291 void addPtrNoFlags(TrustedImm32 imm, RegisterID srcDest)
292 {
293 add64(imm, srcDest);
294 }
295
296 void add64(Address src, RegisterID dest)
297 {
298 load64(src, getCachedDataTempRegisterIDAndInvalidate());
299 m_assembler.add<64>(dest, dest, dataTempRegister);
300 }
301
302 void add64(AbsoluteAddress src, RegisterID dest)
303 {
304 load64(src.m_ptr, getCachedDataTempRegisterIDAndInvalidate());
305 m_assembler.add<64>(dest, dest, dataTempRegister);
306 }
307
308 void and32(RegisterID src, RegisterID dest)
309 {
310 and32(dest, src, dest);
311 }
312
313 void and32(RegisterID op1, RegisterID op2, RegisterID dest)
314 {
315 m_assembler.and_<32>(dest, op1, op2);
316 }
317
318 void and32(TrustedImm32 imm, RegisterID dest)
319 {
320 and32(imm, dest, dest);
321 }
322
323 void and32(TrustedImm32 imm, RegisterID src, RegisterID dest)
324 {
325 LogicalImmediate logicalImm = LogicalImmediate::create32(imm.m_value);
326
327 if (logicalImm.isValid()) {
328 m_assembler.and_<32>(dest, src, logicalImm);
329 return;
330 }
331
332 move(imm, getCachedDataTempRegisterIDAndInvalidate());
333 m_assembler.and_<32>(dest, src, dataTempRegister);
334 }
335
336 void and32(Address src, RegisterID dest)
337 {
338 load32(src, dataTempRegister);
339 and32(dataTempRegister, dest);
340 }
341
342 void and64(RegisterID src, RegisterID dest)
343 {
344 m_assembler.and_<64>(dest, dest, src);
345 }
346
347 void and64(TrustedImm32 imm, RegisterID dest)
348 {
349 LogicalImmediate logicalImm = LogicalImmediate::create64(static_cast<intptr_t>(static_cast<int64_t>(imm.m_value)));
350
351 if (logicalImm.isValid()) {
352 m_assembler.and_<64>(dest, dest, logicalImm);
353 return;
354 }
355
356 signExtend32ToPtr(imm, getCachedDataTempRegisterIDAndInvalidate());
357 m_assembler.and_<64>(dest, dest, dataTempRegister);
358 }
359
360 void and64(TrustedImmPtr imm, RegisterID dest)
361 {
362 LogicalImmediate logicalImm = LogicalImmediate::create64(reinterpret_cast<uint64_t>(imm.m_value));
363
364 if (logicalImm.isValid()) {
365 m_assembler.and_<64>(dest, dest, logicalImm);
366 return;
367 }
368
369 move(imm, getCachedDataTempRegisterIDAndInvalidate());
370 m_assembler.and_<64>(dest, dest, dataTempRegister);
371 }
372
373 void countLeadingZeros32(RegisterID src, RegisterID dest)
374 {
375 m_assembler.clz<32>(dest, src);
376 }
377
378 void lshift32(RegisterID src, RegisterID shiftAmount, RegisterID dest)
379 {
380 m_assembler.lsl<32>(dest, src, shiftAmount);
381 }
382
383 void lshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
384 {
385 m_assembler.lsl<32>(dest, src, imm.m_value & 0x1f);
386 }
387
388 void lshift32(RegisterID shiftAmount, RegisterID dest)
389 {
390 lshift32(dest, shiftAmount, dest);
391 }
392
393 void lshift32(TrustedImm32 imm, RegisterID dest)
394 {
395 lshift32(dest, imm, dest);
396 }
397
398 void lshift64(RegisterID src, RegisterID shiftAmount, RegisterID dest)
399 {
400 m_assembler.lsl<64>(dest, src, shiftAmount);
401 }
402
403 void lshift64(RegisterID src, TrustedImm32 imm, RegisterID dest)
404 {
405 m_assembler.lsl<64>(dest, src, imm.m_value & 0x3f);
406 }
407
408 void lshift64(RegisterID shiftAmount, RegisterID dest)
409 {
410 lshift64(dest, shiftAmount, dest);
411 }
412
413 void lshift64(TrustedImm32 imm, RegisterID dest)
414 {
415 lshift64(dest, imm, dest);
416 }
417
418 void mul32(RegisterID src, RegisterID dest)
419 {
420 m_assembler.mul<32>(dest, dest, src);
421 }
422
423 void mul64(RegisterID src, RegisterID dest)
424 {
425 m_assembler.mul<64>(dest, dest, src);
426 }
427
428 void mul32(TrustedImm32 imm, RegisterID src, RegisterID dest)
429 {
430 move(imm, getCachedDataTempRegisterIDAndInvalidate());
431 m_assembler.mul<32>(dest, src, dataTempRegister);
432 }
433
434 void neg32(RegisterID dest)
435 {
436 m_assembler.neg<32>(dest, dest);
437 }
438
439 void neg64(RegisterID dest)
440 {
441 m_assembler.neg<64>(dest, dest);
442 }
443
444 void or32(RegisterID src, RegisterID dest)
445 {
446 or32(dest, src, dest);
447 }
448
449 void or32(RegisterID op1, RegisterID op2, RegisterID dest)
450 {
451 m_assembler.orr<32>(dest, op1, op2);
452 }
453
454 void or32(TrustedImm32 imm, RegisterID dest)
455 {
456 or32(imm, dest, dest);
457 }
458
459 void or32(TrustedImm32 imm, RegisterID src, RegisterID dest)
460 {
461 LogicalImmediate logicalImm = LogicalImmediate::create32(imm.m_value);
462
463 if (logicalImm.isValid()) {
464 m_assembler.orr<32>(dest, src, logicalImm);
465 return;
466 }
467
468 move(imm, getCachedDataTempRegisterIDAndInvalidate());
469 m_assembler.orr<32>(dest, src, dataTempRegister);
470 }
471
472 void or32(RegisterID src, AbsoluteAddress address)
473 {
474 load32(address.m_ptr, getCachedDataTempRegisterIDAndInvalidate());
475 m_assembler.orr<32>(dataTempRegister, dataTempRegister, src);
476 store32(dataTempRegister, address.m_ptr);
477 }
478
479 void or32(TrustedImm32 imm, Address address)
480 {
481 load32(address, getCachedDataTempRegisterIDAndInvalidate());
482 or32(imm, dataTempRegister, dataTempRegister);
483 store32(dataTempRegister, address);
484 }
485
486 void or64(RegisterID src, RegisterID dest)
487 {
488 or64(dest, src, dest);
489 }
490
491 void or64(RegisterID op1, RegisterID op2, RegisterID dest)
492 {
493 m_assembler.orr<64>(dest, op1, op2);
494 }
495
496 void or64(TrustedImm32 imm, RegisterID dest)
497 {
498 or64(imm, dest, dest);
499 }
500
501 void or64(TrustedImm32 imm, RegisterID src, RegisterID dest)
502 {
503 LogicalImmediate logicalImm = LogicalImmediate::create64(static_cast<intptr_t>(static_cast<int64_t>(imm.m_value)));
504
505 if (logicalImm.isValid()) {
506 m_assembler.orr<64>(dest, src, logicalImm);
507 return;
508 }
509
510 signExtend32ToPtr(imm, getCachedDataTempRegisterIDAndInvalidate());
511 m_assembler.orr<64>(dest, src, dataTempRegister);
512 }
513
514 void or64(TrustedImm64 imm, RegisterID dest)
515 {
516 LogicalImmediate logicalImm = LogicalImmediate::create64(static_cast<intptr_t>(static_cast<int64_t>(imm.m_value)));
517
518 if (logicalImm.isValid()) {
519 m_assembler.orr<64>(dest, dest, logicalImm);
520 return;
521 }
522
523 move(imm, getCachedDataTempRegisterIDAndInvalidate());
524 m_assembler.orr<64>(dest, dest, dataTempRegister);
525 }
526
527 void rotateRight64(TrustedImm32 imm, RegisterID srcDst)
528 {
529 m_assembler.ror<64>(srcDst, srcDst, imm.m_value & 63);
530 }
531
532 void rshift32(RegisterID src, RegisterID shiftAmount, RegisterID dest)
533 {
534 m_assembler.asr<32>(dest, src, shiftAmount);
535 }
536
537 void rshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
538 {
539 m_assembler.asr<32>(dest, src, imm.m_value & 0x1f);
540 }
541
542 void rshift32(RegisterID shiftAmount, RegisterID dest)
543 {
544 rshift32(dest, shiftAmount, dest);
545 }
546
547 void rshift32(TrustedImm32 imm, RegisterID dest)
548 {
549 rshift32(dest, imm, dest);
550 }
551
552 void rshift64(RegisterID src, RegisterID shiftAmount, RegisterID dest)
553 {
554 m_assembler.asr<64>(dest, src, shiftAmount);
555 }
556
557 void rshift64(RegisterID src, TrustedImm32 imm, RegisterID dest)
558 {
559 m_assembler.asr<64>(dest, src, imm.m_value & 0x3f);
560 }
561
562 void rshift64(RegisterID shiftAmount, RegisterID dest)
563 {
564 rshift64(dest, shiftAmount, dest);
565 }
566
567 void rshift64(TrustedImm32 imm, RegisterID dest)
568 {
569 rshift64(dest, imm, dest);
570 }
571
572 void sub32(RegisterID src, RegisterID dest)
573 {
574 m_assembler.sub<32>(dest, dest, src);
575 }
576
577 void sub32(TrustedImm32 imm, RegisterID dest)
578 {
579 if (isUInt12(imm.m_value)) {
580 m_assembler.sub<32>(dest, dest, UInt12(imm.m_value));
581 return;
582 }
583 if (isUInt12(-imm.m_value)) {
584 m_assembler.add<32>(dest, dest, UInt12(-imm.m_value));
585 return;
586 }
587
588 move(imm, getCachedDataTempRegisterIDAndInvalidate());
589 m_assembler.sub<32>(dest, dest, dataTempRegister);
590 }
591
592 void sub32(TrustedImm32 imm, Address address)
593 {
594 load32(address, getCachedDataTempRegisterIDAndInvalidate());
595
596 if (isUInt12(imm.m_value))
597 m_assembler.sub<32>(dataTempRegister, dataTempRegister, UInt12(imm.m_value));
598 else if (isUInt12(-imm.m_value))
599 m_assembler.add<32>(dataTempRegister, dataTempRegister, UInt12(-imm.m_value));
600 else {
601 move(imm, getCachedMemoryTempRegisterIDAndInvalidate());
602 m_assembler.sub<32>(dataTempRegister, dataTempRegister, memoryTempRegister);
603 }
604
605 store32(dataTempRegister, address);
606 }
607
608 void sub32(TrustedImm32 imm, AbsoluteAddress address)
609 {
610 load32(address.m_ptr, getCachedDataTempRegisterIDAndInvalidate());
611
612 if (isUInt12(imm.m_value)) {
613 m_assembler.sub<32>(dataTempRegister, dataTempRegister, UInt12(imm.m_value));
614 store32(dataTempRegister, address.m_ptr);
615 return;
616 }
617
618 if (isUInt12(-imm.m_value)) {
619 m_assembler.add<32>(dataTempRegister, dataTempRegister, UInt12(-imm.m_value));
620 store32(dataTempRegister, address.m_ptr);
621 return;
622 }
623
624 move(imm, getCachedMemoryTempRegisterIDAndInvalidate());
625 m_assembler.sub<32>(dataTempRegister, dataTempRegister, memoryTempRegister);
626 store32(dataTempRegister, address.m_ptr);
627 }
628
629 void sub32(Address src, RegisterID dest)
630 {
631 load32(src, getCachedDataTempRegisterIDAndInvalidate());
632 sub32(dataTempRegister, dest);
633 }
634
635 void sub64(RegisterID src, RegisterID dest)
636 {
637 m_assembler.sub<64>(dest, dest, src);
638 }
639
640 void sub64(TrustedImm32 imm, RegisterID dest)
641 {
642 if (isUInt12(imm.m_value)) {
643 m_assembler.sub<64>(dest, dest, UInt12(imm.m_value));
644 return;
645 }
646 if (isUInt12(-imm.m_value)) {
647 m_assembler.add<64>(dest, dest, UInt12(-imm.m_value));
648 return;
649 }
650
651 signExtend32ToPtr(imm, getCachedDataTempRegisterIDAndInvalidate());
652 m_assembler.sub<64>(dest, dest, dataTempRegister);
653 }
654
655 void sub64(TrustedImm64 imm, RegisterID dest)
656 {
657 intptr_t immediate = imm.m_value;
658
659 if (isUInt12(immediate)) {
660 m_assembler.sub<64>(dest, dest, UInt12(static_cast<int32_t>(immediate)));
661 return;
662 }
663 if (isUInt12(-immediate)) {
664 m_assembler.add<64>(dest, dest, UInt12(static_cast<int32_t>(-immediate)));
665 return;
666 }
667
668 move(imm, getCachedDataTempRegisterIDAndInvalidate());
669 m_assembler.sub<64>(dest, dest, dataTempRegister);
670 }
671
672 void urshift32(RegisterID src, RegisterID shiftAmount, RegisterID dest)
673 {
674 m_assembler.lsr<32>(dest, src, shiftAmount);
675 }
676
677 void urshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
678 {
679 m_assembler.lsr<32>(dest, src, imm.m_value & 0x1f);
680 }
681
682 void urshift32(RegisterID shiftAmount, RegisterID dest)
683 {
684 urshift32(dest, shiftAmount, dest);
685 }
686
687 void urshift32(TrustedImm32 imm, RegisterID dest)
688 {
689 urshift32(dest, imm, dest);
690 }
691
692 void urshift64(RegisterID src, RegisterID shiftAmount, RegisterID dest)
693 {
694 m_assembler.lsr<64>(dest, src, shiftAmount);
695 }
696
697 void urshift64(RegisterID src, TrustedImm32 imm, RegisterID dest)
698 {
699 m_assembler.lsr<64>(dest, src, imm.m_value & 0x1f);
700 }
701
702 void urshift64(RegisterID shiftAmount, RegisterID dest)
703 {
704 urshift64(dest, shiftAmount, dest);
705 }
706
707 void urshift64(TrustedImm32 imm, RegisterID dest)
708 {
709 urshift64(dest, imm, dest);
710 }
711
712 void xor32(RegisterID src, RegisterID dest)
713 {
714 xor32(dest, src, dest);
715 }
716
717 void xor32(RegisterID op1, RegisterID op2, RegisterID dest)
718 {
719 m_assembler.eor<32>(dest, op1, op2);
720 }
721
722 void xor32(TrustedImm32 imm, RegisterID dest)
723 {
724 xor32(imm, dest, dest);
725 }
726
727 void xor32(TrustedImm32 imm, RegisterID src, RegisterID dest)
728 {
729 if (imm.m_value == -1)
730 m_assembler.mvn<32>(dest, src);
731 else {
732 LogicalImmediate logicalImm = LogicalImmediate::create32(imm.m_value);
733
734 if (logicalImm.isValid()) {
735 m_assembler.eor<32>(dest, src, logicalImm);
736 return;
737 }
738
739 move(imm, getCachedDataTempRegisterIDAndInvalidate());
740 m_assembler.eor<32>(dest, src, dataTempRegister);
741 }
742 }
743
744 void xor64(RegisterID src, Address address)
745 {
746 load64(address, getCachedDataTempRegisterIDAndInvalidate());
747 m_assembler.eor<64>(dataTempRegister, dataTempRegister, src);
748 store64(dataTempRegister, address);
749 }
750
751 void xor64(RegisterID src, RegisterID dest)
752 {
753 xor64(dest, src, dest);
754 }
755
756 void xor64(RegisterID op1, RegisterID op2, RegisterID dest)
757 {
758 m_assembler.eor<64>(dest, op1, op2);
759 }
760
761 void xor64(TrustedImm32 imm, RegisterID dest)
762 {
763 xor64(imm, dest, dest);
764 }
765
766 void xor64(TrustedImm32 imm, RegisterID src, RegisterID dest)
767 {
768 if (imm.m_value == -1)
769 m_assembler.mvn<64>(dest, src);
770 else {
771 LogicalImmediate logicalImm = LogicalImmediate::create64(static_cast<intptr_t>(static_cast<int64_t>(imm.m_value)));
772
773 if (logicalImm.isValid()) {
774 m_assembler.eor<64>(dest, src, logicalImm);
775 return;
776 }
777
778 signExtend32ToPtr(imm, getCachedDataTempRegisterIDAndInvalidate());
779 m_assembler.eor<64>(dest, src, dataTempRegister);
780 }
781 }
782
783
784 // Memory access operations:
785
786 void load64(ImplicitAddress address, RegisterID dest)
787 {
788 if (tryLoadWithOffset<64>(dest, address.base, address.offset))
789 return;
790
791 signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
792 m_assembler.ldr<64>(dest, address.base, memoryTempRegister);
793 }
794
795 void load64(BaseIndex address, RegisterID dest)
796 {
797 if (!address.offset && (!address.scale || address.scale == 3)) {
798 m_assembler.ldr<64>(dest, address.base, address.index, ARM64Assembler::UXTX, address.scale);
799 return;
800 }
801
802 signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
803 m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
804 m_assembler.ldr<64>(dest, address.base, memoryTempRegister);
805 }
806
807 void load64(const void* address, RegisterID dest)
808 {
809 load<64>(address, dest);
810 }
811
812 DataLabel32 load64WithAddressOffsetPatch(Address address, RegisterID dest)
813 {
814 DataLabel32 label(this);
815 signExtend32ToPtrWithFixedWidth(address.offset, getCachedMemoryTempRegisterIDAndInvalidate());
816 m_assembler.ldr<64>(dest, address.base, memoryTempRegister, ARM64Assembler::SXTW, 0);
817 return label;
818 }
819
820 DataLabelCompact load64WithCompactAddressOffsetPatch(Address address, RegisterID dest)
821 {
822 ASSERT(isCompactPtrAlignedAddressOffset(address.offset));
823 DataLabelCompact label(this);
824 m_assembler.ldr<64>(dest, address.base, address.offset);
825 return label;
826 }
827
828 void abortWithReason(AbortReason reason)
829 {
830 move(TrustedImm32(reason), dataTempRegister);
831 breakpoint();
832 }
833
834 void abortWithReason(AbortReason reason, intptr_t misc)
835 {
836 move(TrustedImm64(misc), memoryTempRegister);
837 abortWithReason(reason);
838 }
839
840 ConvertibleLoadLabel convertibleLoadPtr(Address address, RegisterID dest)
841 {
842 ConvertibleLoadLabel result(this);
843 ASSERT(!(address.offset & ~0xff8));
844 m_assembler.ldr<64>(dest, address.base, address.offset);
845 return result;
846 }
847
848 void load32(ImplicitAddress address, RegisterID dest)
849 {
850 if (tryLoadWithOffset<32>(dest, address.base, address.offset))
851 return;
852
853 signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
854 m_assembler.ldr<32>(dest, address.base, memoryTempRegister);
855 }
856
857 void load32(BaseIndex address, RegisterID dest)
858 {
859 if (!address.offset && (!address.scale || address.scale == 2)) {
860 m_assembler.ldr<32>(dest, address.base, address.index, ARM64Assembler::UXTX, address.scale);
861 return;
862 }
863
864 signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
865 m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
866 m_assembler.ldr<32>(dest, address.base, memoryTempRegister);
867 }
868
869 void load32(const void* address, RegisterID dest)
870 {
871 load<32>(address, dest);
872 }
873
874 DataLabel32 load32WithAddressOffsetPatch(Address address, RegisterID dest)
875 {
876 DataLabel32 label(this);
877 signExtend32ToPtrWithFixedWidth(address.offset, getCachedMemoryTempRegisterIDAndInvalidate());
878 m_assembler.ldr<32>(dest, address.base, memoryTempRegister, ARM64Assembler::SXTW, 0);
879 return label;
880 }
881
882 DataLabelCompact load32WithCompactAddressOffsetPatch(Address address, RegisterID dest)
883 {
884 ASSERT(isCompactPtrAlignedAddressOffset(address.offset));
885 DataLabelCompact label(this);
886 m_assembler.ldr<32>(dest, address.base, address.offset);
887 return label;
888 }
889
890 void load32WithUnalignedHalfWords(BaseIndex address, RegisterID dest)
891 {
892 load32(address, dest);
893 }
894
895 void load16(ImplicitAddress address, RegisterID dest)
896 {
897 if (tryLoadWithOffset<16>(dest, address.base, address.offset))
898 return;
899
900 signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
901 m_assembler.ldrh(dest, address.base, memoryTempRegister);
902 }
903
904 void load16(BaseIndex address, RegisterID dest)
905 {
906 if (!address.offset && (!address.scale || address.scale == 1)) {
907 m_assembler.ldrh(dest, address.base, address.index, ARM64Assembler::UXTX, address.scale);
908 return;
909 }
910
911 signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
912 m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
913 m_assembler.ldrh(dest, address.base, memoryTempRegister);
914 }
915
916 void load16Unaligned(BaseIndex address, RegisterID dest)
917 {
918 load16(address, dest);
919 }
920
921 void load16SignedExtendTo32(BaseIndex address, RegisterID dest)
922 {
923 if (!address.offset && (!address.scale || address.scale == 1)) {
924 m_assembler.ldrsh<32>(dest, address.base, address.index, ARM64Assembler::UXTX, address.scale);
925 return;
926 }
927
928 signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
929 m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
930 m_assembler.ldrsh<32>(dest, address.base, memoryTempRegister);
931 }
932
933 void load8(ImplicitAddress address, RegisterID dest)
934 {
935 if (tryLoadWithOffset<8>(dest, address.base, address.offset))
936 return;
937
938 signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
939 m_assembler.ldrb(dest, address.base, memoryTempRegister);
940 }
941
942 void load8(BaseIndex address, RegisterID dest)
943 {
944 if (!address.offset && !address.scale) {
945 m_assembler.ldrb(dest, address.base, address.index, ARM64Assembler::UXTX, address.scale);
946 return;
947 }
948
949 signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
950 m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
951 m_assembler.ldrb(dest, address.base, memoryTempRegister);
952 }
953
954 void load8(const void* address, RegisterID dest)
955 {
956 moveToCachedReg(TrustedImmPtr(address), m_cachedMemoryTempRegister);
957 m_assembler.ldrb(dest, memoryTempRegister, ARM64Registers::zr);
958 if (dest == memoryTempRegister)
959 m_cachedMemoryTempRegister.invalidate();
960 }
961
962 void load8SignedExtendTo32(BaseIndex address, RegisterID dest)
963 {
964 if (!address.offset && !address.scale) {
965 m_assembler.ldrsb<32>(dest, address.base, address.index, ARM64Assembler::UXTX, address.scale);
966 return;
967 }
968
969 signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
970 m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
971 m_assembler.ldrsb<32>(dest, address.base, memoryTempRegister);
972 }
973
974 void store64(RegisterID src, ImplicitAddress address)
975 {
976 if (tryStoreWithOffset<64>(src, address.base, address.offset))
977 return;
978
979 signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
980 m_assembler.str<64>(src, address.base, memoryTempRegister);
981 }
982
983 void store64(RegisterID src, BaseIndex address)
984 {
985 if (!address.offset && (!address.scale || address.scale == 3)) {
986 m_assembler.str<64>(src, address.base, address.index, ARM64Assembler::UXTX, address.scale);
987 return;
988 }
989
990 signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
991 m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
992 m_assembler.str<64>(src, address.base, memoryTempRegister);
993 }
994
995 void store64(RegisterID src, const void* address)
996 {
997 store<64>(src, address);
998 }
999
1000 void store64(TrustedImm64 imm, ImplicitAddress address)
1001 {
1002 if (!imm.m_value) {
1003 store64(ARM64Registers::zr, address);
1004 return;
1005 }
1006
1007 moveToCachedReg(imm, m_dataMemoryTempRegister);
1008 store64(dataTempRegister, address);
1009 }
1010
1011 void store64(TrustedImm64 imm, BaseIndex address)
1012 {
1013 if (!imm.m_value) {
1014 store64(ARM64Registers::zr, address);
1015 return;
1016 }
1017
1018 moveToCachedReg(imm, m_dataMemoryTempRegister);
1019 store64(dataTempRegister, address);
1020 }
1021
1022 DataLabel32 store64WithAddressOffsetPatch(RegisterID src, Address address)
1023 {
1024 DataLabel32 label(this);
1025 signExtend32ToPtrWithFixedWidth(address.offset, getCachedMemoryTempRegisterIDAndInvalidate());
1026 m_assembler.str<64>(src, address.base, memoryTempRegister, ARM64Assembler::SXTW, 0);
1027 return label;
1028 }
1029
1030 void store32(RegisterID src, ImplicitAddress address)
1031 {
1032 if (tryStoreWithOffset<32>(src, address.base, address.offset))
1033 return;
1034
1035 signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1036 m_assembler.str<32>(src, address.base, memoryTempRegister);
1037 }
1038
1039 void store32(RegisterID src, BaseIndex address)
1040 {
1041 if (!address.offset && (!address.scale || address.scale == 2)) {
1042 m_assembler.str<32>(src, address.base, address.index, ARM64Assembler::UXTX, address.scale);
1043 return;
1044 }
1045
1046 signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1047 m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
1048 m_assembler.str<32>(src, address.base, memoryTempRegister);
1049 }
1050
1051 void store32(RegisterID src, const void* address)
1052 {
1053 store<32>(src, address);
1054 }
1055
1056 void store32(TrustedImm32 imm, ImplicitAddress address)
1057 {
1058 if (!imm.m_value) {
1059 store32(ARM64Registers::zr, address);
1060 return;
1061 }
1062
1063 moveToCachedReg(imm, m_dataMemoryTempRegister);
1064 store32(dataTempRegister, address);
1065 }
1066
1067 void store32(TrustedImm32 imm, BaseIndex address)
1068 {
1069 if (!imm.m_value) {
1070 store32(ARM64Registers::zr, address);
1071 return;
1072 }
1073
1074 moveToCachedReg(imm, m_dataMemoryTempRegister);
1075 store32(dataTempRegister, address);
1076 }
1077
1078 void store32(TrustedImm32 imm, const void* address)
1079 {
1080 if (!imm.m_value) {
1081 store32(ARM64Registers::zr, address);
1082 return;
1083 }
1084
1085 moveToCachedReg(imm, m_dataMemoryTempRegister);
1086 store32(dataTempRegister, address);
1087 }
1088
1089 DataLabel32 store32WithAddressOffsetPatch(RegisterID src, Address address)
1090 {
1091 DataLabel32 label(this);
1092 signExtend32ToPtrWithFixedWidth(address.offset, getCachedMemoryTempRegisterIDAndInvalidate());
1093 m_assembler.str<32>(src, address.base, memoryTempRegister, ARM64Assembler::SXTW, 0);
1094 return label;
1095 }
1096
1097 void store16(RegisterID src, BaseIndex address)
1098 {
1099 if (!address.offset && (!address.scale || address.scale == 1)) {
1100 m_assembler.strh(src, address.base, address.index, ARM64Assembler::UXTX, address.scale);
1101 return;
1102 }
1103
1104 signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1105 m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
1106 m_assembler.strh(src, address.base, memoryTempRegister);
1107 }
1108
1109 void store8(RegisterID src, BaseIndex address)
1110 {
1111 if (!address.offset && !address.scale) {
1112 m_assembler.strb(src, address.base, address.index, ARM64Assembler::UXTX, address.scale);
1113 return;
1114 }
1115
1116 signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1117 m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
1118 m_assembler.strb(src, address.base, memoryTempRegister);
1119 }
1120
1121 void store8(RegisterID src, void* address)
1122 {
1123 move(TrustedImmPtr(address), getCachedMemoryTempRegisterIDAndInvalidate());
1124 m_assembler.strb(src, memoryTempRegister, 0);
1125 }
1126
1127 void store8(RegisterID src, ImplicitAddress address)
1128 {
1129 if (tryStoreWithOffset<8>(src, address.base, address.offset))
1130 return;
1131
1132 signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1133 m_assembler.str<8>(src, address.base, memoryTempRegister);
1134 }
1135
1136 void store8(TrustedImm32 imm, void* address)
1137 {
1138 if (!imm.m_value) {
1139 store8(ARM64Registers::zr, address);
1140 return;
1141 }
1142
1143 move(imm, getCachedDataTempRegisterIDAndInvalidate());
1144 store8(dataTempRegister, address);
1145 }
1146
1147 void store8(TrustedImm32 imm, ImplicitAddress address)
1148 {
1149 if (!imm.m_value) {
1150 store8(ARM64Registers::zr, address);
1151 return;
1152 }
1153
1154 move(imm, getCachedDataTempRegisterIDAndInvalidate());
1155 store8(dataTempRegister, address);
1156 }
1157
1158 // Floating-point operations:
1159
1160 static bool supportsFloatingPoint() { return true; }
1161 static bool supportsFloatingPointTruncate() { return true; }
1162 static bool supportsFloatingPointSqrt() { return true; }
1163 static bool supportsFloatingPointAbs() { return true; }
1164
1165 enum BranchTruncateType { BranchIfTruncateFailed, BranchIfTruncateSuccessful };
1166
1167 void absDouble(FPRegisterID src, FPRegisterID dest)
1168 {
1169 m_assembler.fabs<64>(dest, src);
1170 }
1171
1172 void addDouble(FPRegisterID src, FPRegisterID dest)
1173 {
1174 addDouble(dest, src, dest);
1175 }
1176
1177 void addDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
1178 {
1179 m_assembler.fadd<64>(dest, op1, op2);
1180 }
1181
1182 void addDouble(Address src, FPRegisterID dest)
1183 {
1184 loadDouble(src, fpTempRegister);
1185 addDouble(fpTempRegister, dest);
1186 }
1187
1188 void addDouble(AbsoluteAddress address, FPRegisterID dest)
1189 {
1190 loadDouble(TrustedImmPtr(address.m_ptr), fpTempRegister);
1191 addDouble(fpTempRegister, dest);
1192 }
1193
1194 void ceilDouble(FPRegisterID src, FPRegisterID dest)
1195 {
1196 m_assembler.frintp<64>(dest, src);
1197 }
1198
1199 void floorDouble(FPRegisterID src, FPRegisterID dest)
1200 {
1201 m_assembler.frintm<64>(dest, src);
1202 }
1203
1204 // Convert 'src' to an integer, and places the resulting 'dest'.
1205 // If the result is not representable as a 32 bit value, branch.
1206 // May also branch for some values that are representable in 32 bits
1207 // (specifically, in this case, 0).
1208 void branchConvertDoubleToInt32(FPRegisterID src, RegisterID dest, JumpList& failureCases, FPRegisterID, bool negZeroCheck = true)
1209 {
1210 m_assembler.fcvtns<32, 64>(dest, src);
1211
1212 // Convert the integer result back to float & compare to the original value - if not equal or unordered (NaN) then jump.
1213 m_assembler.scvtf<64, 32>(fpTempRegister, dest);
1214 failureCases.append(branchDouble(DoubleNotEqualOrUnordered, src, fpTempRegister));
1215
1216 // Test for negative zero.
1217 if (negZeroCheck) {
1218 Jump valueIsNonZero = branchTest32(NonZero, dest);
1219 RegisterID scratch = getCachedMemoryTempRegisterIDAndInvalidate();
1220 m_assembler.fmov<64>(scratch, src);
1221 failureCases.append(makeTestBitAndBranch(scratch, 63, IsNonZero));
1222 valueIsNonZero.link(this);
1223 }
1224 }
1225
1226 Jump branchDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right)
1227 {
1228 m_assembler.fcmp<64>(left, right);
1229
1230 if (cond == DoubleNotEqual) {
1231 // ConditionNE jumps if NotEqual *or* unordered - force the unordered cases not to jump.
1232 Jump unordered = makeBranch(ARM64Assembler::ConditionVS);
1233 Jump result = makeBranch(ARM64Assembler::ConditionNE);
1234 unordered.link(this);
1235 return result;
1236 }
1237 if (cond == DoubleEqualOrUnordered) {
1238 Jump unordered = makeBranch(ARM64Assembler::ConditionVS);
1239 Jump notEqual = makeBranch(ARM64Assembler::ConditionNE);
1240 unordered.link(this);
1241 // We get here if either unordered or equal.
1242 Jump result = jump();
1243 notEqual.link(this);
1244 return result;
1245 }
1246 return makeBranch(cond);
1247 }
1248
1249 Jump branchDoubleNonZero(FPRegisterID reg, FPRegisterID)
1250 {
1251 m_assembler.fcmp_0<64>(reg);
1252 Jump unordered = makeBranch(ARM64Assembler::ConditionVS);
1253 Jump result = makeBranch(ARM64Assembler::ConditionNE);
1254 unordered.link(this);
1255 return result;
1256 }
1257
1258 Jump branchDoubleZeroOrNaN(FPRegisterID reg, FPRegisterID)
1259 {
1260 m_assembler.fcmp_0<64>(reg);
1261 Jump unordered = makeBranch(ARM64Assembler::ConditionVS);
1262 Jump notEqual = makeBranch(ARM64Assembler::ConditionNE);
1263 unordered.link(this);
1264 // We get here if either unordered or equal.
1265 Jump result = jump();
1266 notEqual.link(this);
1267 return result;
1268 }
1269
1270 Jump branchTruncateDoubleToInt32(FPRegisterID src, RegisterID dest, BranchTruncateType branchType = BranchIfTruncateFailed)
1271 {
1272 // Truncate to a 64-bit integer in dataTempRegister, copy the low 32-bit to dest.
1273 m_assembler.fcvtzs<64, 64>(getCachedDataTempRegisterIDAndInvalidate(), src);
1274 zeroExtend32ToPtr(dataTempRegister, dest);
1275 // Check thlow 32-bits sign extend to be equal to the full value.
1276 m_assembler.cmp<64>(dataTempRegister, dataTempRegister, ARM64Assembler::SXTW, 0);
1277 return Jump(makeBranch(branchType == BranchIfTruncateSuccessful ? Equal : NotEqual));
1278 }
1279
1280 void convertDoubleToFloat(FPRegisterID src, FPRegisterID dest)
1281 {
1282 m_assembler.fcvt<32, 64>(dest, src);
1283 }
1284
1285 void convertFloatToDouble(FPRegisterID src, FPRegisterID dest)
1286 {
1287 m_assembler.fcvt<64, 32>(dest, src);
1288 }
1289
1290 void convertInt32ToDouble(TrustedImm32 imm, FPRegisterID dest)
1291 {
1292 move(imm, getCachedDataTempRegisterIDAndInvalidate());
1293 convertInt32ToDouble(dataTempRegister, dest);
1294 }
1295
1296 void convertInt32ToDouble(RegisterID src, FPRegisterID dest)
1297 {
1298 m_assembler.scvtf<64, 32>(dest, src);
1299 }
1300
1301 void convertInt32ToDouble(Address address, FPRegisterID dest)
1302 {
1303 load32(address, getCachedDataTempRegisterIDAndInvalidate());
1304 convertInt32ToDouble(dataTempRegister, dest);
1305 }
1306
1307 void convertInt32ToDouble(AbsoluteAddress address, FPRegisterID dest)
1308 {
1309 load32(address.m_ptr, getCachedDataTempRegisterIDAndInvalidate());
1310 convertInt32ToDouble(dataTempRegister, dest);
1311 }
1312
1313 void convertInt64ToDouble(RegisterID src, FPRegisterID dest)
1314 {
1315 m_assembler.scvtf<64, 64>(dest, src);
1316 }
1317
1318 void divDouble(FPRegisterID src, FPRegisterID dest)
1319 {
1320 divDouble(dest, src, dest);
1321 }
1322
1323 void divDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
1324 {
1325 m_assembler.fdiv<64>(dest, op1, op2);
1326 }
1327
1328 void loadDouble(ImplicitAddress address, FPRegisterID dest)
1329 {
1330 if (tryLoadWithOffset<64>(dest, address.base, address.offset))
1331 return;
1332
1333 signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1334 m_assembler.ldr<64>(dest, address.base, memoryTempRegister);
1335 }
1336
1337 void loadDouble(BaseIndex address, FPRegisterID dest)
1338 {
1339 if (!address.offset && (!address.scale || address.scale == 3)) {
1340 m_assembler.ldr<64>(dest, address.base, address.index, ARM64Assembler::UXTX, address.scale);
1341 return;
1342 }
1343
1344 signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1345 m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
1346 m_assembler.ldr<64>(dest, address.base, memoryTempRegister);
1347 }
1348
1349 void loadDouble(TrustedImmPtr address, FPRegisterID dest)
1350 {
1351 moveToCachedReg(address, m_cachedMemoryTempRegister);
1352 m_assembler.ldr<64>(dest, memoryTempRegister, ARM64Registers::zr);
1353 }
1354
1355 void loadFloat(BaseIndex address, FPRegisterID dest)
1356 {
1357 if (!address.offset && (!address.scale || address.scale == 2)) {
1358 m_assembler.ldr<32>(dest, address.base, address.index, ARM64Assembler::UXTX, address.scale);
1359 return;
1360 }
1361
1362 signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1363 m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
1364 m_assembler.ldr<32>(dest, address.base, memoryTempRegister);
1365 }
1366
1367 void moveDouble(FPRegisterID src, FPRegisterID dest)
1368 {
1369 m_assembler.fmov<64>(dest, src);
1370 }
1371
1372 void moveDoubleTo64(FPRegisterID src, RegisterID dest)
1373 {
1374 m_assembler.fmov<64>(dest, src);
1375 }
1376
1377 void move64ToDouble(RegisterID src, FPRegisterID dest)
1378 {
1379 m_assembler.fmov<64>(dest, src);
1380 }
1381
1382 void mulDouble(FPRegisterID src, FPRegisterID dest)
1383 {
1384 mulDouble(dest, src, dest);
1385 }
1386
1387 void mulDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
1388 {
1389 m_assembler.fmul<64>(dest, op1, op2);
1390 }
1391
1392 void mulDouble(Address src, FPRegisterID dest)
1393 {
1394 loadDouble(src, fpTempRegister);
1395 mulDouble(fpTempRegister, dest);
1396 }
1397
1398 void negateDouble(FPRegisterID src, FPRegisterID dest)
1399 {
1400 m_assembler.fneg<64>(dest, src);
1401 }
1402
1403 void sqrtDouble(FPRegisterID src, FPRegisterID dest)
1404 {
1405 m_assembler.fsqrt<64>(dest, src);
1406 }
1407
1408 void storeDouble(FPRegisterID src, ImplicitAddress address)
1409 {
1410 if (tryStoreWithOffset<64>(src, address.base, address.offset))
1411 return;
1412
1413 signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1414 m_assembler.str<64>(src, address.base, memoryTempRegister);
1415 }
1416
1417 void storeDouble(FPRegisterID src, TrustedImmPtr address)
1418 {
1419 moveToCachedReg(address, m_cachedMemoryTempRegister);
1420 m_assembler.str<64>(src, memoryTempRegister, ARM64Registers::zr);
1421 }
1422
1423 void storeDouble(FPRegisterID src, BaseIndex address)
1424 {
1425 if (!address.offset && (!address.scale || address.scale == 3)) {
1426 m_assembler.str<64>(src, address.base, address.index, ARM64Assembler::UXTX, address.scale);
1427 return;
1428 }
1429
1430 signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1431 m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
1432 m_assembler.str<64>(src, address.base, memoryTempRegister);
1433 }
1434
1435 void storeFloat(FPRegisterID src, BaseIndex address)
1436 {
1437 if (!address.offset && (!address.scale || address.scale == 2)) {
1438 m_assembler.str<32>(src, address.base, address.index, ARM64Assembler::UXTX, address.scale);
1439 return;
1440 }
1441
1442 signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1443 m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
1444 m_assembler.str<32>(src, address.base, memoryTempRegister);
1445 }
1446
1447 void subDouble(FPRegisterID src, FPRegisterID dest)
1448 {
1449 subDouble(dest, src, dest);
1450 }
1451
1452 void subDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
1453 {
1454 m_assembler.fsub<64>(dest, op1, op2);
1455 }
1456
1457 void subDouble(Address src, FPRegisterID dest)
1458 {
1459 loadDouble(src, fpTempRegister);
1460 subDouble(fpTempRegister, dest);
1461 }
1462
1463 // Result is undefined if the value is outside of the integer range.
1464 void truncateDoubleToInt32(FPRegisterID src, RegisterID dest)
1465 {
1466 m_assembler.fcvtzs<32, 64>(dest, src);
1467 }
1468
1469 void truncateDoubleToUint32(FPRegisterID src, RegisterID dest)
1470 {
1471 m_assembler.fcvtzu<32, 64>(dest, src);
1472 }
1473
1474
1475 // Stack manipulation operations:
1476 //
1477 // The ABI is assumed to provide a stack abstraction to memory,
1478 // containing machine word sized units of data. Push and pop
1479 // operations add and remove a single register sized unit of data
1480 // to or from the stack. These operations are not supported on
1481 // ARM64. Peek and poke operations read or write values on the
1482 // stack, without moving the current stack position. Additionally,
1483 // there are popToRestore and pushToSave operations, which are
1484 // designed just for quick-and-dirty saving and restoring of
1485 // temporary values. These operations don't claim to have any
1486 // ABI compatibility.
1487
1488 void pop(RegisterID) NO_RETURN_DUE_TO_CRASH
1489 {
1490 CRASH();
1491 }
1492
1493 void push(RegisterID) NO_RETURN_DUE_TO_CRASH
1494 {
1495 CRASH();
1496 }
1497
1498 void push(Address) NO_RETURN_DUE_TO_CRASH
1499 {
1500 CRASH();
1501 }
1502
1503 void push(TrustedImm32) NO_RETURN_DUE_TO_CRASH
1504 {
1505 CRASH();
1506 }
1507
1508 void popPair(RegisterID dest1, RegisterID dest2)
1509 {
1510 m_assembler.ldp<64>(dest1, dest2, ARM64Registers::sp, PairPostIndex(16));
1511 }
1512
1513 void pushPair(RegisterID src1, RegisterID src2)
1514 {
1515 m_assembler.stp<64>(src1, src2, ARM64Registers::sp, PairPreIndex(-16));
1516 }
1517
1518 void popToRestore(RegisterID dest)
1519 {
1520 m_assembler.ldr<64>(dest, ARM64Registers::sp, PostIndex(16));
1521 }
1522
1523 void pushToSave(RegisterID src)
1524 {
1525 m_assembler.str<64>(src, ARM64Registers::sp, PreIndex(-16));
1526 }
1527
1528 void pushToSaveImmediateWithoutTouchingRegisters(TrustedImm32 imm)
1529 {
1530 RegisterID reg = dataTempRegister;
1531 pushPair(reg, reg);
1532 move(imm, reg);
1533 store64(reg, stackPointerRegister);
1534 load64(Address(stackPointerRegister, 8), reg);
1535 }
1536
1537 void pushToSave(Address address)
1538 {
1539 load32(address, getCachedDataTempRegisterIDAndInvalidate());
1540 pushToSave(dataTempRegister);
1541 }
1542
1543 void pushToSave(TrustedImm32 imm)
1544 {
1545 move(imm, getCachedDataTempRegisterIDAndInvalidate());
1546 pushToSave(dataTempRegister);
1547 }
1548
1549 void popToRestore(FPRegisterID dest)
1550 {
1551 loadDouble(stackPointerRegister, dest);
1552 add64(TrustedImm32(16), stackPointerRegister);
1553 }
1554
1555 void pushToSave(FPRegisterID src)
1556 {
1557 sub64(TrustedImm32(16), stackPointerRegister);
1558 storeDouble(src, stackPointerRegister);
1559 }
1560
1561 static ptrdiff_t pushToSaveByteOffset() { return 16; }
1562
1563 // Register move operations:
1564
1565 void move(RegisterID src, RegisterID dest)
1566 {
1567 if (src != dest)
1568 m_assembler.mov<64>(dest, src);
1569 }
1570
1571 void move(TrustedImm32 imm, RegisterID dest)
1572 {
1573 moveInternal<TrustedImm32, int32_t>(imm, dest);
1574 }
1575
1576 void move(TrustedImmPtr imm, RegisterID dest)
1577 {
1578 moveInternal<TrustedImmPtr, intptr_t>(imm, dest);
1579 }
1580
1581 void move(TrustedImm64 imm, RegisterID dest)
1582 {
1583 moveInternal<TrustedImm64, int64_t>(imm, dest);
1584 }
1585
1586 void swap(RegisterID reg1, RegisterID reg2)
1587 {
1588 move(reg1, getCachedDataTempRegisterIDAndInvalidate());
1589 move(reg2, reg1);
1590 move(dataTempRegister, reg2);
1591 }
1592
1593 void signExtend32ToPtr(RegisterID src, RegisterID dest)
1594 {
1595 m_assembler.sxtw(dest, src);
1596 }
1597
1598 void zeroExtend32ToPtr(RegisterID src, RegisterID dest)
1599 {
1600 m_assembler.uxtw(dest, src);
1601 }
1602
1603
1604 // Forwards / external control flow operations:
1605 //
1606 // This set of jump and conditional branch operations return a Jump
1607 // object which may linked at a later point, allow forwards jump,
1608 // or jumps that will require external linkage (after the code has been
1609 // relocated).
1610 //
1611 // For branches, signed <, >, <= and >= are denoted as l, g, le, and ge
1612 // respecitvely, for unsigned comparisons the names b, a, be, and ae are
1613 // used (representing the names 'below' and 'above').
1614 //
1615 // Operands to the comparision are provided in the expected order, e.g.
1616 // jle32(reg1, TrustedImm32(5)) will branch if the value held in reg1, when
1617 // treated as a signed 32bit value, is less than or equal to 5.
1618 //
1619 // jz and jnz test whether the first operand is equal to zero, and take
1620 // an optional second operand of a mask under which to perform the test.
1621
1622 Jump branch32(RelationalCondition cond, RegisterID left, RegisterID right)
1623 {
1624 m_assembler.cmp<32>(left, right);
1625 return Jump(makeBranch(cond));
1626 }
1627
1628 Jump branch32(RelationalCondition cond, RegisterID left, TrustedImm32 right)
1629 {
1630 if (isUInt12(right.m_value))
1631 m_assembler.cmp<32>(left, UInt12(right.m_value));
1632 else if (isUInt12(-right.m_value))
1633 m_assembler.cmn<32>(left, UInt12(-right.m_value));
1634 else {
1635 moveToCachedReg(right, m_dataMemoryTempRegister);
1636 m_assembler.cmp<32>(left, dataTempRegister);
1637 }
1638 return Jump(makeBranch(cond));
1639 }
1640
1641 Jump branch32(RelationalCondition cond, RegisterID left, Address right)
1642 {
1643 load32(right, getCachedMemoryTempRegisterIDAndInvalidate());
1644 return branch32(cond, left, memoryTempRegister);
1645 }
1646
1647 Jump branch32(RelationalCondition cond, Address left, RegisterID right)
1648 {
1649 load32(left, getCachedMemoryTempRegisterIDAndInvalidate());
1650 return branch32(cond, memoryTempRegister, right);
1651 }
1652
1653 Jump branch32(RelationalCondition cond, Address left, TrustedImm32 right)
1654 {
1655 load32(left, getCachedMemoryTempRegisterIDAndInvalidate());
1656 return branch32(cond, memoryTempRegister, right);
1657 }
1658
1659 Jump branch32(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
1660 {
1661 load32(left, getCachedMemoryTempRegisterIDAndInvalidate());
1662 return branch32(cond, memoryTempRegister, right);
1663 }
1664
1665 Jump branch32(RelationalCondition cond, AbsoluteAddress left, RegisterID right)
1666 {
1667 load32(left.m_ptr, getCachedDataTempRegisterIDAndInvalidate());
1668 return branch32(cond, dataTempRegister, right);
1669 }
1670
1671 Jump branch32(RelationalCondition cond, AbsoluteAddress left, TrustedImm32 right)
1672 {
1673 load32(left.m_ptr, getCachedMemoryTempRegisterIDAndInvalidate());
1674 return branch32(cond, memoryTempRegister, right);
1675 }
1676
1677 Jump branch64(RelationalCondition cond, RegisterID left, RegisterID right)
1678 {
1679 if (right == ARM64Registers::sp) {
1680 if (cond == Equal && left != ARM64Registers::sp) {
1681 // CMP can only use SP for the left argument, since we are testing for equality, the order
1682 // does not matter here.
1683 std::swap(left, right);
1684 } else {
1685 move(right, getCachedDataTempRegisterIDAndInvalidate());
1686 right = dataTempRegister;
1687 }
1688 }
1689 m_assembler.cmp<64>(left, right);
1690 return Jump(makeBranch(cond));
1691 }
1692
1693 Jump branch64(RelationalCondition cond, RegisterID left, TrustedImm64 right)
1694 {
1695 intptr_t immediate = right.m_value;
1696 if (isUInt12(immediate))
1697 m_assembler.cmp<64>(left, UInt12(static_cast<int32_t>(immediate)));
1698 else if (isUInt12(-immediate))
1699 m_assembler.cmn<64>(left, UInt12(static_cast<int32_t>(-immediate)));
1700 else {
1701 moveToCachedReg(right, m_dataMemoryTempRegister);
1702 m_assembler.cmp<64>(left, dataTempRegister);
1703 }
1704 return Jump(makeBranch(cond));
1705 }
1706
1707 Jump branch64(RelationalCondition cond, RegisterID left, Address right)
1708 {
1709 load64(right, getCachedMemoryTempRegisterIDAndInvalidate());
1710 return branch64(cond, left, memoryTempRegister);
1711 }
1712
1713 Jump branch64(RelationalCondition cond, AbsoluteAddress left, RegisterID right)
1714 {
1715 load64(left.m_ptr, getCachedDataTempRegisterIDAndInvalidate());
1716 return branch64(cond, dataTempRegister, right);
1717 }
1718
1719 Jump branch64(RelationalCondition cond, Address left, RegisterID right)
1720 {
1721 load64(left, getCachedMemoryTempRegisterIDAndInvalidate());
1722 return branch64(cond, memoryTempRegister, right);
1723 }
1724
1725 Jump branch64(RelationalCondition cond, Address left, TrustedImm64 right)
1726 {
1727 load64(left, getCachedMemoryTempRegisterIDAndInvalidate());
1728 return branch64(cond, memoryTempRegister, right);
1729 }
1730
1731 Jump branchPtr(RelationalCondition cond, BaseIndex left, RegisterID right)
1732 {
1733 load64(left, getCachedMemoryTempRegisterIDAndInvalidate());
1734 return branch64(cond, memoryTempRegister, right);
1735 }
1736
1737 Jump branch8(RelationalCondition cond, Address left, TrustedImm32 right)
1738 {
1739 ASSERT(!(0xffffff00 & right.m_value));
1740 load8(left, getCachedMemoryTempRegisterIDAndInvalidate());
1741 return branch32(cond, memoryTempRegister, right);
1742 }
1743
1744 Jump branch8(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
1745 {
1746 ASSERT(!(0xffffff00 & right.m_value));
1747 load8(left, getCachedMemoryTempRegisterIDAndInvalidate());
1748 return branch32(cond, memoryTempRegister, right);
1749 }
1750
1751 Jump branch8(RelationalCondition cond, AbsoluteAddress left, TrustedImm32 right)
1752 {
1753 ASSERT(!(0xffffff00 & right.m_value));
1754 load8(left.m_ptr, getCachedMemoryTempRegisterIDAndInvalidate());
1755 return branch32(cond, memoryTempRegister, right);
1756 }
1757
1758 Jump branchTest32(ResultCondition cond, RegisterID reg, RegisterID mask)
1759 {
1760 m_assembler.tst<32>(reg, mask);
1761 return Jump(makeBranch(cond));
1762 }
1763
1764 void test32(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
1765 {
1766 if (mask.m_value == -1)
1767 m_assembler.tst<32>(reg, reg);
1768 else {
1769 bool testedWithImmediate = false;
1770 if ((cond == Zero) || (cond == NonZero)) {
1771 LogicalImmediate logicalImm = LogicalImmediate::create32(mask.m_value);
1772
1773 if (logicalImm.isValid()) {
1774 m_assembler.tst<32>(reg, logicalImm);
1775 testedWithImmediate = true;
1776 }
1777 }
1778 if (!testedWithImmediate) {
1779 move(mask, getCachedDataTempRegisterIDAndInvalidate());
1780 m_assembler.tst<32>(reg, dataTempRegister);
1781 }
1782 }
1783 }
1784
1785 Jump branch(ResultCondition cond)
1786 {
1787 return Jump(makeBranch(cond));
1788 }
1789
1790 Jump branchTest32(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
1791 {
1792 if (mask.m_value == -1) {
1793 if ((cond == Zero) || (cond == NonZero))
1794 return Jump(makeCompareAndBranch<32>(static_cast<ZeroCondition>(cond), reg));
1795 m_assembler.tst<32>(reg, reg);
1796 } else if (hasOneBitSet(mask.m_value) && ((cond == Zero) || (cond == NonZero)))
1797 return Jump(makeTestBitAndBranch(reg, getLSBSet(mask.m_value), static_cast<ZeroCondition>(cond)));
1798 else {
1799 if ((cond == Zero) || (cond == NonZero)) {
1800 LogicalImmediate logicalImm = LogicalImmediate::create32(mask.m_value);
1801
1802 if (logicalImm.isValid()) {
1803 m_assembler.tst<32>(reg, logicalImm);
1804 return Jump(makeBranch(cond));
1805 }
1806 }
1807
1808 move(mask, getCachedDataTempRegisterIDAndInvalidate());
1809 m_assembler.tst<32>(reg, dataTempRegister);
1810 }
1811 return Jump(makeBranch(cond));
1812 }
1813
1814 Jump branchTest32(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
1815 {
1816 load32(address, getCachedMemoryTempRegisterIDAndInvalidate());
1817 return branchTest32(cond, memoryTempRegister, mask);
1818 }
1819
1820 Jump branchTest32(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
1821 {
1822 load32(address, getCachedMemoryTempRegisterIDAndInvalidate());
1823 return branchTest32(cond, memoryTempRegister, mask);
1824 }
1825
1826 Jump branchTest64(ResultCondition cond, RegisterID reg, RegisterID mask)
1827 {
1828 m_assembler.tst<64>(reg, mask);
1829 return Jump(makeBranch(cond));
1830 }
1831
1832 Jump branchTest64(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
1833 {
1834 if (mask.m_value == -1) {
1835 if ((cond == Zero) || (cond == NonZero))
1836 return Jump(makeCompareAndBranch<64>(static_cast<ZeroCondition>(cond), reg));
1837 m_assembler.tst<64>(reg, reg);
1838 } else if (hasOneBitSet(mask.m_value) && ((cond == Zero) || (cond == NonZero)))
1839 return Jump(makeTestBitAndBranch(reg, getLSBSet(mask.m_value), static_cast<ZeroCondition>(cond)));
1840 else {
1841 if ((cond == Zero) || (cond == NonZero)) {
1842 LogicalImmediate logicalImm = LogicalImmediate::create64(mask.m_value);
1843
1844 if (logicalImm.isValid()) {
1845 m_assembler.tst<64>(reg, logicalImm);
1846 return Jump(makeBranch(cond));
1847 }
1848 }
1849
1850 signExtend32ToPtr(mask, getCachedDataTempRegisterIDAndInvalidate());
1851 m_assembler.tst<64>(reg, dataTempRegister);
1852 }
1853 return Jump(makeBranch(cond));
1854 }
1855
1856 Jump branchTest64(ResultCondition cond, Address address, RegisterID mask)
1857 {
1858 load64(address, getCachedDataTempRegisterIDAndInvalidate());
1859 return branchTest64(cond, dataTempRegister, mask);
1860 }
1861
1862 Jump branchTest64(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
1863 {
1864 load64(address, getCachedDataTempRegisterIDAndInvalidate());
1865 return branchTest64(cond, dataTempRegister, mask);
1866 }
1867
1868 Jump branchTest64(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
1869 {
1870 load64(address, getCachedDataTempRegisterIDAndInvalidate());
1871 return branchTest64(cond, dataTempRegister, mask);
1872 }
1873
1874 Jump branchTest64(ResultCondition cond, AbsoluteAddress address, TrustedImm32 mask = TrustedImm32(-1))
1875 {
1876 load64(address.m_ptr, getCachedDataTempRegisterIDAndInvalidate());
1877 return branchTest64(cond, dataTempRegister, mask);
1878 }
1879
1880 Jump branchTest8(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
1881 {
1882 load8(address, getCachedDataTempRegisterIDAndInvalidate());
1883 return branchTest32(cond, dataTempRegister, mask);
1884 }
1885
1886 Jump branchTest8(ResultCondition cond, AbsoluteAddress address, TrustedImm32 mask = TrustedImm32(-1))
1887 {
1888 load8(address.m_ptr, getCachedDataTempRegisterIDAndInvalidate());
1889 return branchTest32(cond, dataTempRegister, mask);
1890 }
1891
1892 Jump branchTest8(ResultCondition cond, ExtendedAddress address, TrustedImm32 mask = TrustedImm32(-1))
1893 {
1894 move(TrustedImmPtr(reinterpret_cast<void*>(address.offset)), getCachedDataTempRegisterIDAndInvalidate());
1895 m_assembler.ldrb(dataTempRegister, address.base, dataTempRegister);
1896 return branchTest32(cond, dataTempRegister, mask);
1897 }
1898
1899 Jump branchTest8(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
1900 {
1901 load8(address, getCachedDataTempRegisterIDAndInvalidate());
1902 return branchTest32(cond, dataTempRegister, mask);
1903 }
1904
1905 Jump branch32WithUnalignedHalfWords(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
1906 {
1907 return branch32(cond, left, right);
1908 }
1909
1910
1911 // Arithmetic control flow operations:
1912 //
1913 // This set of conditional branch operations branch based
1914 // on the result of an arithmetic operation. The operation
1915 // is performed as normal, storing the result.
1916 //
1917 // * jz operations branch if the result is zero.
1918 // * jo operations branch if the (signed) arithmetic
1919 // operation caused an overflow to occur.
1920
1921 Jump branchAdd32(ResultCondition cond, RegisterID op1, RegisterID op2, RegisterID dest)
1922 {
1923 m_assembler.add<32, S>(dest, op1, op2);
1924 return Jump(makeBranch(cond));
1925 }
1926
1927 Jump branchAdd32(ResultCondition cond, RegisterID op1, TrustedImm32 imm, RegisterID dest)
1928 {
1929 if (isUInt12(imm.m_value)) {
1930 m_assembler.add<32, S>(dest, op1, UInt12(imm.m_value));
1931 return Jump(makeBranch(cond));
1932 }
1933 if (isUInt12(-imm.m_value)) {
1934 m_assembler.sub<32, S>(dest, op1, UInt12(-imm.m_value));
1935 return Jump(makeBranch(cond));
1936 }
1937
1938 signExtend32ToPtr(imm, getCachedDataTempRegisterIDAndInvalidate());
1939 return branchAdd32(cond, op1, dataTempRegister, dest);
1940 }
1941
1942 Jump branchAdd32(ResultCondition cond, Address src, RegisterID dest)
1943 {
1944 load32(src, getCachedDataTempRegisterIDAndInvalidate());
1945 return branchAdd32(cond, dest, dataTempRegister, dest);
1946 }
1947
1948 Jump branchAdd32(ResultCondition cond, RegisterID src, RegisterID dest)
1949 {
1950 return branchAdd32(cond, dest, src, dest);
1951 }
1952
1953 Jump branchAdd32(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
1954 {
1955 return branchAdd32(cond, dest, imm, dest);
1956 }
1957
1958 Jump branchAdd32(ResultCondition cond, TrustedImm32 imm, AbsoluteAddress address)
1959 {
1960 load32(address.m_ptr, getCachedDataTempRegisterIDAndInvalidate());
1961
1962 if (isUInt12(imm.m_value)) {
1963 m_assembler.add<32, S>(dataTempRegister, dataTempRegister, UInt12(imm.m_value));
1964 store32(dataTempRegister, address.m_ptr);
1965 } else if (isUInt12(-imm.m_value)) {
1966 m_assembler.sub<32, S>(dataTempRegister, dataTempRegister, UInt12(-imm.m_value));
1967 store32(dataTempRegister, address.m_ptr);
1968 } else {
1969 move(imm, getCachedMemoryTempRegisterIDAndInvalidate());
1970 m_assembler.add<32, S>(dataTempRegister, dataTempRegister, memoryTempRegister);
1971 store32(dataTempRegister, address.m_ptr);
1972 }
1973
1974 return Jump(makeBranch(cond));
1975 }
1976
1977 Jump branchAdd64(ResultCondition cond, RegisterID op1, RegisterID op2, RegisterID dest)
1978 {
1979 m_assembler.add<64, S>(dest, op1, op2);
1980 return Jump(makeBranch(cond));
1981 }
1982
1983 Jump branchAdd64(ResultCondition cond, RegisterID op1, TrustedImm32 imm, RegisterID dest)
1984 {
1985 if (isUInt12(imm.m_value)) {
1986 m_assembler.add<64, S>(dest, op1, UInt12(imm.m_value));
1987 return Jump(makeBranch(cond));
1988 }
1989 if (isUInt12(-imm.m_value)) {
1990 m_assembler.sub<64, S>(dest, op1, UInt12(-imm.m_value));
1991 return Jump(makeBranch(cond));
1992 }
1993
1994 move(imm, getCachedDataTempRegisterIDAndInvalidate());
1995 return branchAdd64(cond, op1, dataTempRegister, dest);
1996 }
1997
1998 Jump branchAdd64(ResultCondition cond, RegisterID src, RegisterID dest)
1999 {
2000 return branchAdd64(cond, dest, src, dest);
2001 }
2002
2003 Jump branchAdd64(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
2004 {
2005 return branchAdd64(cond, dest, imm, dest);
2006 }
2007
2008 Jump branchMul32(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest)
2009 {
2010 ASSERT(cond != Signed);
2011
2012 if (cond != Overflow) {
2013 m_assembler.mul<32>(dest, src1, src2);
2014 return branchTest32(cond, dest);
2015 }
2016
2017 // This is a signed multiple of two 32-bit values, producing a 64-bit result.
2018 m_assembler.smull(dest, src1, src2);
2019 // Copy bits 63..32 of the result to bits 31..0 of dataTempRegister.
2020 m_assembler.asr<64>(getCachedDataTempRegisterIDAndInvalidate(), dest, 32);
2021 // Splat bit 31 of the result to bits 31..0 of memoryTempRegister.
2022 m_assembler.asr<32>(getCachedMemoryTempRegisterIDAndInvalidate(), dest, 31);
2023 // After a mul32 the top 32 bits of the register should be clear.
2024 zeroExtend32ToPtr(dest, dest);
2025 // Check that bits 31..63 of the original result were all equal.
2026 return branch32(NotEqual, memoryTempRegister, dataTempRegister);
2027 }
2028
2029 Jump branchMul32(ResultCondition cond, RegisterID src, RegisterID dest)
2030 {
2031 return branchMul32(cond, dest, src, dest);
2032 }
2033
2034 Jump branchMul32(ResultCondition cond, TrustedImm32 imm, RegisterID src, RegisterID dest)
2035 {
2036 move(imm, getCachedDataTempRegisterIDAndInvalidate());
2037 return branchMul32(cond, dataTempRegister, src, dest);
2038 }
2039
2040 Jump branchMul64(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest)
2041 {
2042 ASSERT(cond != Signed);
2043
2044 // This is a signed multiple of two 64-bit values, producing a 64-bit result.
2045 m_assembler.mul<64>(dest, src1, src2);
2046
2047 if (cond != Overflow)
2048 return branchTest64(cond, dest);
2049
2050 // Compute bits 127..64 of the result into dataTempRegister.
2051 m_assembler.smulh(getCachedDataTempRegisterIDAndInvalidate(), src1, src2);
2052 // Splat bit 63 of the result to bits 63..0 of memoryTempRegister.
2053 m_assembler.asr<64>(getCachedMemoryTempRegisterIDAndInvalidate(), dest, 63);
2054 // Check that bits 31..63 of the original result were all equal.
2055 return branch64(NotEqual, memoryTempRegister, dataTempRegister);
2056 }
2057
2058 Jump branchMul64(ResultCondition cond, RegisterID src, RegisterID dest)
2059 {
2060 return branchMul64(cond, dest, src, dest);
2061 }
2062
2063 Jump branchNeg32(ResultCondition cond, RegisterID dest)
2064 {
2065 m_assembler.neg<32, S>(dest, dest);
2066 return Jump(makeBranch(cond));
2067 }
2068
2069 Jump branchNeg64(ResultCondition cond, RegisterID srcDest)
2070 {
2071 m_assembler.neg<64, S>(srcDest, srcDest);
2072 return Jump(makeBranch(cond));
2073 }
2074
2075 Jump branchSub32(ResultCondition cond, RegisterID dest)
2076 {
2077 m_assembler.neg<32, S>(dest, dest);
2078 return Jump(makeBranch(cond));
2079 }
2080
2081 Jump branchSub32(ResultCondition cond, RegisterID op1, RegisterID op2, RegisterID dest)
2082 {
2083 m_assembler.sub<32, S>(dest, op1, op2);
2084 return Jump(makeBranch(cond));
2085 }
2086
2087 Jump branchSub32(ResultCondition cond, RegisterID op1, TrustedImm32 imm, RegisterID dest)
2088 {
2089 if (isUInt12(imm.m_value)) {
2090 m_assembler.sub<32, S>(dest, op1, UInt12(imm.m_value));
2091 return Jump(makeBranch(cond));
2092 }
2093 if (isUInt12(-imm.m_value)) {
2094 m_assembler.add<32, S>(dest, op1, UInt12(-imm.m_value));
2095 return Jump(makeBranch(cond));
2096 }
2097
2098 signExtend32ToPtr(imm, getCachedDataTempRegisterIDAndInvalidate());
2099 return branchSub32(cond, op1, dataTempRegister, dest);
2100 }
2101
2102 Jump branchSub32(ResultCondition cond, RegisterID src, RegisterID dest)
2103 {
2104 return branchSub32(cond, dest, src, dest);
2105 }
2106
2107 Jump branchSub32(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
2108 {
2109 return branchSub32(cond, dest, imm, dest);
2110 }
2111
2112 Jump branchSub64(ResultCondition cond, RegisterID op1, RegisterID op2, RegisterID dest)
2113 {
2114 m_assembler.sub<64, S>(dest, op1, op2);
2115 return Jump(makeBranch(cond));
2116 }
2117
2118 Jump branchSub64(ResultCondition cond, RegisterID op1, TrustedImm32 imm, RegisterID dest)
2119 {
2120 if (isUInt12(imm.m_value)) {
2121 m_assembler.sub<64, S>(dest, op1, UInt12(imm.m_value));
2122 return Jump(makeBranch(cond));
2123 }
2124 if (isUInt12(-imm.m_value)) {
2125 m_assembler.add<64, S>(dest, op1, UInt12(-imm.m_value));
2126 return Jump(makeBranch(cond));
2127 }
2128
2129 move(imm, getCachedDataTempRegisterIDAndInvalidate());
2130 return branchSub64(cond, op1, dataTempRegister, dest);
2131 }
2132
2133 Jump branchSub64(ResultCondition cond, RegisterID src, RegisterID dest)
2134 {
2135 return branchSub64(cond, dest, src, dest);
2136 }
2137
2138 Jump branchSub64(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
2139 {
2140 return branchSub64(cond, dest, imm, dest);
2141 }
2142
2143
2144 // Jumps, calls, returns
2145
2146 ALWAYS_INLINE Call call()
2147 {
2148 AssemblerLabel pointerLabel = m_assembler.label();
2149 moveWithFixedWidth(TrustedImmPtr(0), getCachedDataTempRegisterIDAndInvalidate());
2150 invalidateAllTempRegisters();
2151 m_assembler.blr(dataTempRegister);
2152 AssemblerLabel callLabel = m_assembler.label();
2153 ASSERT_UNUSED(pointerLabel, ARM64Assembler::getDifferenceBetweenLabels(callLabel, pointerLabel) == REPATCH_OFFSET_CALL_TO_POINTER);
2154 return Call(callLabel, Call::Linkable);
2155 }
2156
2157 ALWAYS_INLINE Call call(RegisterID target)
2158 {
2159 invalidateAllTempRegisters();
2160 m_assembler.blr(target);
2161 return Call(m_assembler.label(), Call::None);
2162 }
2163
2164 ALWAYS_INLINE Call call(Address address)
2165 {
2166 load64(address, getCachedDataTempRegisterIDAndInvalidate());
2167 return call(dataTempRegister);
2168 }
2169
2170 ALWAYS_INLINE Jump jump()
2171 {
2172 AssemblerLabel label = m_assembler.label();
2173 m_assembler.b();
2174 return Jump(label, m_makeJumpPatchable ? ARM64Assembler::JumpNoConditionFixedSize : ARM64Assembler::JumpNoCondition);
2175 }
2176
2177 void jump(RegisterID target)
2178 {
2179 m_assembler.br(target);
2180 }
2181
2182 void jump(Address address)
2183 {
2184 load64(address, getCachedDataTempRegisterIDAndInvalidate());
2185 m_assembler.br(dataTempRegister);
2186 }
2187
2188 void jump(AbsoluteAddress address)
2189 {
2190 move(TrustedImmPtr(address.m_ptr), getCachedDataTempRegisterIDAndInvalidate());
2191 load64(Address(dataTempRegister), dataTempRegister);
2192 m_assembler.br(dataTempRegister);
2193 }
2194
2195 ALWAYS_INLINE Call makeTailRecursiveCall(Jump oldJump)
2196 {
2197 oldJump.link(this);
2198 return tailRecursiveCall();
2199 }
2200
2201 ALWAYS_INLINE Call nearCall()
2202 {
2203 m_assembler.bl();
2204 return Call(m_assembler.label(), Call::LinkableNear);
2205 }
2206
2207 ALWAYS_INLINE void ret()
2208 {
2209 m_assembler.ret();
2210 }
2211
2212 ALWAYS_INLINE Call tailRecursiveCall()
2213 {
2214 // Like a normal call, but don't link.
2215 AssemblerLabel pointerLabel = m_assembler.label();
2216 moveWithFixedWidth(TrustedImmPtr(0), getCachedDataTempRegisterIDAndInvalidate());
2217 m_assembler.br(dataTempRegister);
2218 AssemblerLabel callLabel = m_assembler.label();
2219 ASSERT_UNUSED(pointerLabel, ARM64Assembler::getDifferenceBetweenLabels(callLabel, pointerLabel) == REPATCH_OFFSET_CALL_TO_POINTER);
2220 return Call(callLabel, Call::Linkable);
2221 }
2222
2223
2224 // Comparisons operations
2225
2226 void compare32(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID dest)
2227 {
2228 m_assembler.cmp<32>(left, right);
2229 m_assembler.cset<32>(dest, ARM64Condition(cond));
2230 }
2231
2232 void compare32(RelationalCondition cond, Address left, RegisterID right, RegisterID dest)
2233 {
2234 load32(left, getCachedDataTempRegisterIDAndInvalidate());
2235 m_assembler.cmp<32>(dataTempRegister, right);
2236 m_assembler.cset<32>(dest, ARM64Condition(cond));
2237 }
2238
2239 void compare32(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest)
2240 {
2241 move(right, getCachedDataTempRegisterIDAndInvalidate());
2242 m_assembler.cmp<32>(left, dataTempRegister);
2243 m_assembler.cset<32>(dest, ARM64Condition(cond));
2244 }
2245
2246 void compare64(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID dest)
2247 {
2248 m_assembler.cmp<64>(left, right);
2249 m_assembler.cset<32>(dest, ARM64Condition(cond));
2250 }
2251
2252 void compare64(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest)
2253 {
2254 signExtend32ToPtr(right, getCachedDataTempRegisterIDAndInvalidate());
2255 m_assembler.cmp<64>(left, dataTempRegister);
2256 m_assembler.cset<32>(dest, ARM64Condition(cond));
2257 }
2258
2259 void compare8(RelationalCondition cond, Address left, TrustedImm32 right, RegisterID dest)
2260 {
2261 load8(left, getCachedMemoryTempRegisterIDAndInvalidate());
2262 move(right, getCachedDataTempRegisterIDAndInvalidate());
2263 compare32(cond, memoryTempRegister, dataTempRegister, dest);
2264 }
2265
2266 void test32(ResultCondition cond, RegisterID src, TrustedImm32 mask, RegisterID dest)
2267 {
2268 if (mask.m_value == -1)
2269 m_assembler.tst<32>(src, src);
2270 else {
2271 signExtend32ToPtr(mask, getCachedDataTempRegisterIDAndInvalidate());
2272 m_assembler.tst<32>(src, dataTempRegister);
2273 }
2274 m_assembler.cset<32>(dest, ARM64Condition(cond));
2275 }
2276
2277 void test32(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest)
2278 {
2279 load32(address, getCachedDataTempRegisterIDAndInvalidate());
2280 test32(cond, dataTempRegister, mask, dest);
2281 }
2282
2283 void test8(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest)
2284 {
2285 load8(address, getCachedDataTempRegisterIDAndInvalidate());
2286 test32(cond, dataTempRegister, mask, dest);
2287 }
2288
2289 void test64(ResultCondition cond, RegisterID op1, RegisterID op2, RegisterID dest)
2290 {
2291 m_assembler.tst<64>(op1, op2);
2292 m_assembler.cset<32>(dest, ARM64Condition(cond));
2293 }
2294
2295 void test64(ResultCondition cond, RegisterID src, TrustedImm32 mask, RegisterID dest)
2296 {
2297 if (mask.m_value == -1)
2298 m_assembler.tst<64>(src, src);
2299 else {
2300 signExtend32ToPtr(mask, getCachedDataTempRegisterIDAndInvalidate());
2301 m_assembler.tst<64>(src, dataTempRegister);
2302 }
2303 m_assembler.cset<32>(dest, ARM64Condition(cond));
2304 }
2305
2306
2307 // Patchable operations
2308
2309 ALWAYS_INLINE DataLabel32 moveWithPatch(TrustedImm32 imm, RegisterID dest)
2310 {
2311 DataLabel32 label(this);
2312 moveWithFixedWidth(imm, dest);
2313 return label;
2314 }
2315
2316 ALWAYS_INLINE DataLabelPtr moveWithPatch(TrustedImmPtr imm, RegisterID dest)
2317 {
2318 DataLabelPtr label(this);
2319 moveWithFixedWidth(imm, dest);
2320 return label;
2321 }
2322
2323 ALWAYS_INLINE Jump branchPtrWithPatch(RelationalCondition cond, RegisterID left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
2324 {
2325 dataLabel = DataLabelPtr(this);
2326 moveWithPatch(initialRightValue, getCachedDataTempRegisterIDAndInvalidate());
2327 return branch64(cond, left, dataTempRegister);
2328 }
2329
2330 ALWAYS_INLINE Jump branchPtrWithPatch(RelationalCondition cond, Address left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
2331 {
2332 dataLabel = DataLabelPtr(this);
2333 moveWithPatch(initialRightValue, getCachedDataTempRegisterIDAndInvalidate());
2334 return branch64(cond, left, dataTempRegister);
2335 }
2336
2337 ALWAYS_INLINE Jump branch32WithPatch(RelationalCondition cond, Address left, DataLabel32& dataLabel, TrustedImm32 initialRightValue = TrustedImm32(0))
2338 {
2339 dataLabel = DataLabel32(this);
2340 moveWithPatch(initialRightValue, getCachedDataTempRegisterIDAndInvalidate());
2341 return branch32(cond, left, dataTempRegister);
2342 }
2343
2344 PatchableJump patchableBranchPtr(RelationalCondition cond, Address left, TrustedImmPtr right = TrustedImmPtr(0))
2345 {
2346 m_makeJumpPatchable = true;
2347 Jump result = branch32(cond, left, TrustedImm32(right));
2348 m_makeJumpPatchable = false;
2349 return PatchableJump(result);
2350 }
2351
2352 PatchableJump patchableBranchTest32(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
2353 {
2354 m_makeJumpPatchable = true;
2355 Jump result = branchTest32(cond, reg, mask);
2356 m_makeJumpPatchable = false;
2357 return PatchableJump(result);
2358 }
2359
2360 PatchableJump patchableBranch32(RelationalCondition cond, RegisterID reg, TrustedImm32 imm)
2361 {
2362 m_makeJumpPatchable = true;
2363 Jump result = branch32(cond, reg, imm);
2364 m_makeJumpPatchable = false;
2365 return PatchableJump(result);
2366 }
2367
2368 PatchableJump patchableBranchPtrWithPatch(RelationalCondition cond, Address left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
2369 {
2370 m_makeJumpPatchable = true;
2371 Jump result = branchPtrWithPatch(cond, left, dataLabel, initialRightValue);
2372 m_makeJumpPatchable = false;
2373 return PatchableJump(result);
2374 }
2375
2376 PatchableJump patchableBranch32WithPatch(RelationalCondition cond, Address left, DataLabel32& dataLabel, TrustedImm32 initialRightValue = TrustedImm32(0))
2377 {
2378 m_makeJumpPatchable = true;
2379 Jump result = branch32WithPatch(cond, left, dataLabel, initialRightValue);
2380 m_makeJumpPatchable = false;
2381 return PatchableJump(result);
2382 }
2383
2384 PatchableJump patchableJump()
2385 {
2386 m_makeJumpPatchable = true;
2387 Jump result = jump();
2388 m_makeJumpPatchable = false;
2389 return PatchableJump(result);
2390 }
2391
2392 ALWAYS_INLINE DataLabelPtr storePtrWithPatch(TrustedImmPtr initialValue, ImplicitAddress address)
2393 {
2394 DataLabelPtr label(this);
2395 moveWithFixedWidth(initialValue, getCachedDataTempRegisterIDAndInvalidate());
2396 store64(dataTempRegister, address);
2397 return label;
2398 }
2399
2400 ALWAYS_INLINE DataLabelPtr storePtrWithPatch(ImplicitAddress address)
2401 {
2402 return storePtrWithPatch(TrustedImmPtr(0), address);
2403 }
2404
2405 static void reemitInitialMoveWithPatch(void* address, void* value)
2406 {
2407 ARM64Assembler::setPointer(static_cast<int*>(address), value, dataTempRegister, true);
2408 }
2409
2410 // Miscellaneous operations:
2411
2412 void breakpoint(uint16_t imm = 0)
2413 {
2414 m_assembler.brk(imm);
2415 }
2416
2417 void nop()
2418 {
2419 m_assembler.nop();
2420 }
2421
2422 void memoryFence()
2423 {
2424 m_assembler.dmbSY();
2425 }
2426
2427
2428 // Misc helper functions.
2429
2430 // Invert a relational condition, e.g. == becomes !=, < becomes >=, etc.
2431 static RelationalCondition invert(RelationalCondition cond)
2432 {
2433 return static_cast<RelationalCondition>(ARM64Assembler::invert(static_cast<ARM64Assembler::Condition>(cond)));
2434 }
2435
2436 static FunctionPtr readCallTarget(CodeLocationCall call)
2437 {
2438 return FunctionPtr(reinterpret_cast<void(*)()>(ARM64Assembler::readCallTarget(call.dataLocation())));
2439 }
2440
2441 static void replaceWithJump(CodeLocationLabel instructionStart, CodeLocationLabel destination)
2442 {
2443 ARM64Assembler::replaceWithJump(instructionStart.dataLocation(), destination.dataLocation());
2444 }
2445
2446 static ptrdiff_t maxJumpReplacementSize()
2447 {
2448 return ARM64Assembler::maxJumpReplacementSize();
2449 }
2450
2451 RegisterID scratchRegisterForBlinding()
2452 {
2453 // We *do not* have a scratch register for blinding.
2454 RELEASE_ASSERT_NOT_REACHED();
2455 return getCachedDataTempRegisterIDAndInvalidate();
2456 }
2457
2458 static bool canJumpReplacePatchableBranchPtrWithPatch() { return false; }
2459 static bool canJumpReplacePatchableBranch32WithPatch() { return false; }
2460
2461 static CodeLocationLabel startOfBranchPtrWithPatchOnRegister(CodeLocationDataLabelPtr label)
2462 {
2463 return label.labelAtOffset(0);
2464 }
2465
2466 static CodeLocationLabel startOfPatchableBranchPtrWithPatchOnAddress(CodeLocationDataLabelPtr)
2467 {
2468 UNREACHABLE_FOR_PLATFORM();
2469 return CodeLocationLabel();
2470 }
2471
2472 static CodeLocationLabel startOfPatchableBranch32WithPatchOnAddress(CodeLocationDataLabel32)
2473 {
2474 UNREACHABLE_FOR_PLATFORM();
2475 return CodeLocationLabel();
2476 }
2477
2478 static void revertJumpReplacementToBranchPtrWithPatch(CodeLocationLabel instructionStart, RegisterID, void* initialValue)
2479 {
2480 reemitInitialMoveWithPatch(instructionStart.dataLocation(), initialValue);
2481 }
2482
2483 static void revertJumpReplacementToPatchableBranchPtrWithPatch(CodeLocationLabel, Address, void*)
2484 {
2485 UNREACHABLE_FOR_PLATFORM();
2486 }
2487
2488 static void revertJumpReplacementToPatchableBranch32WithPatch(CodeLocationLabel, Address, int32_t)
2489 {
2490 UNREACHABLE_FOR_PLATFORM();
2491 }
2492
2493 protected:
2494 ALWAYS_INLINE Jump makeBranch(ARM64Assembler::Condition cond)
2495 {
2496 m_assembler.b_cond(cond);
2497 AssemblerLabel label = m_assembler.label();
2498 m_assembler.nop();
2499 return Jump(label, m_makeJumpPatchable ? ARM64Assembler::JumpConditionFixedSize : ARM64Assembler::JumpCondition, cond);
2500 }
2501 ALWAYS_INLINE Jump makeBranch(RelationalCondition cond) { return makeBranch(ARM64Condition(cond)); }
2502 ALWAYS_INLINE Jump makeBranch(ResultCondition cond) { return makeBranch(ARM64Condition(cond)); }
2503 ALWAYS_INLINE Jump makeBranch(DoubleCondition cond) { return makeBranch(ARM64Condition(cond)); }
2504
2505 template <int dataSize>
2506 ALWAYS_INLINE Jump makeCompareAndBranch(ZeroCondition cond, RegisterID reg)
2507 {
2508 if (cond == IsZero)
2509 m_assembler.cbz<dataSize>(reg);
2510 else
2511 m_assembler.cbnz<dataSize>(reg);
2512 AssemblerLabel label = m_assembler.label();
2513 m_assembler.nop();
2514 return Jump(label, m_makeJumpPatchable ? ARM64Assembler::JumpCompareAndBranchFixedSize : ARM64Assembler::JumpCompareAndBranch, static_cast<ARM64Assembler::Condition>(cond), dataSize == 64, reg);
2515 }
2516
2517 ALWAYS_INLINE Jump makeTestBitAndBranch(RegisterID reg, unsigned bit, ZeroCondition cond)
2518 {
2519 ASSERT(bit < 64);
2520 bit &= 0x3f;
2521 if (cond == IsZero)
2522 m_assembler.tbz(reg, bit);
2523 else
2524 m_assembler.tbnz(reg, bit);
2525 AssemblerLabel label = m_assembler.label();
2526 m_assembler.nop();
2527 return Jump(label, m_makeJumpPatchable ? ARM64Assembler::JumpTestBitFixedSize : ARM64Assembler::JumpTestBit, static_cast<ARM64Assembler::Condition>(cond), bit, reg);
2528 }
2529
2530 ARM64Assembler::Condition ARM64Condition(RelationalCondition cond)
2531 {
2532 return static_cast<ARM64Assembler::Condition>(cond);
2533 }
2534
2535 ARM64Assembler::Condition ARM64Condition(ResultCondition cond)
2536 {
2537 return static_cast<ARM64Assembler::Condition>(cond);
2538 }
2539
2540 ARM64Assembler::Condition ARM64Condition(DoubleCondition cond)
2541 {
2542 return static_cast<ARM64Assembler::Condition>(cond);
2543 }
2544
2545 private:
2546 ALWAYS_INLINE RegisterID getCachedDataTempRegisterIDAndInvalidate() { return m_dataMemoryTempRegister.registerIDInvalidate(); }
2547 ALWAYS_INLINE RegisterID getCachedMemoryTempRegisterIDAndInvalidate() { return m_cachedMemoryTempRegister.registerIDInvalidate(); }
2548
2549 ALWAYS_INLINE bool isInIntRange(intptr_t value)
2550 {
2551 return value == ((value << 32) >> 32);
2552 }
2553
2554 template<typename ImmediateType, typename rawType>
2555 void moveInternal(ImmediateType imm, RegisterID dest)
2556 {
2557 const int dataSize = sizeof(rawType) * 8;
2558 const int numberHalfWords = dataSize / 16;
2559 rawType value = bitwise_cast<rawType>(imm.m_value);
2560 uint16_t halfword[numberHalfWords];
2561
2562 // Handle 0 and ~0 here to simplify code below
2563 if (!value) {
2564 m_assembler.movz<dataSize>(dest, 0);
2565 return;
2566 }
2567 if (!~value) {
2568 m_assembler.movn<dataSize>(dest, 0);
2569 return;
2570 }
2571
2572 LogicalImmediate logicalImm = dataSize == 64 ? LogicalImmediate::create64(static_cast<uint64_t>(value)) : LogicalImmediate::create32(static_cast<uint32_t>(value));
2573
2574 if (logicalImm.isValid()) {
2575 m_assembler.movi<dataSize>(dest, logicalImm);
2576 return;
2577 }
2578
2579 // Figure out how many halfwords are 0 or FFFF, then choose movz or movn accordingly.
2580 int zeroOrNegateVote = 0;
2581 for (int i = 0; i < numberHalfWords; ++i) {
2582 halfword[i] = getHalfword(value, i);
2583 if (!halfword[i])
2584 zeroOrNegateVote++;
2585 else if (halfword[i] == 0xffff)
2586 zeroOrNegateVote--;
2587 }
2588
2589 bool needToClearRegister = true;
2590 if (zeroOrNegateVote >= 0) {
2591 for (int i = 0; i < numberHalfWords; i++) {
2592 if (halfword[i]) {
2593 if (needToClearRegister) {
2594 m_assembler.movz<dataSize>(dest, halfword[i], 16*i);
2595 needToClearRegister = false;
2596 } else
2597 m_assembler.movk<dataSize>(dest, halfword[i], 16*i);
2598 }
2599 }
2600 } else {
2601 for (int i = 0; i < numberHalfWords; i++) {
2602 if (halfword[i] != 0xffff) {
2603 if (needToClearRegister) {
2604 m_assembler.movn<dataSize>(dest, ~halfword[i], 16*i);
2605 needToClearRegister = false;
2606 } else
2607 m_assembler.movk<dataSize>(dest, halfword[i], 16*i);
2608 }
2609 }
2610 }
2611 }
2612
2613 template<int datasize>
2614 ALWAYS_INLINE void loadUnsignedImmediate(RegisterID rt, RegisterID rn, unsigned pimm)
2615 {
2616 m_assembler.ldr<datasize>(rt, rn, pimm);
2617 }
2618
2619 template<int datasize>
2620 ALWAYS_INLINE void loadUnscaledImmediate(RegisterID rt, RegisterID rn, int simm)
2621 {
2622 m_assembler.ldur<datasize>(rt, rn, simm);
2623 }
2624
2625 template<int datasize>
2626 ALWAYS_INLINE void storeUnsignedImmediate(RegisterID rt, RegisterID rn, unsigned pimm)
2627 {
2628 m_assembler.str<datasize>(rt, rn, pimm);
2629 }
2630
2631 template<int datasize>
2632 ALWAYS_INLINE void storeUnscaledImmediate(RegisterID rt, RegisterID rn, int simm)
2633 {
2634 m_assembler.stur<datasize>(rt, rn, simm);
2635 }
2636
2637 void moveWithFixedWidth(TrustedImm32 imm, RegisterID dest)
2638 {
2639 int32_t value = imm.m_value;
2640 m_assembler.movz<32>(dest, getHalfword(value, 0));
2641 m_assembler.movk<32>(dest, getHalfword(value, 1), 16);
2642 }
2643
2644 void moveWithFixedWidth(TrustedImmPtr imm, RegisterID dest)
2645 {
2646 intptr_t value = reinterpret_cast<intptr_t>(imm.m_value);
2647 m_assembler.movz<64>(dest, getHalfword(value, 0));
2648 m_assembler.movk<64>(dest, getHalfword(value, 1), 16);
2649 m_assembler.movk<64>(dest, getHalfword(value, 2), 32);
2650 }
2651
2652 void signExtend32ToPtrWithFixedWidth(int32_t value, RegisterID dest)
2653 {
2654 if (value >= 0) {
2655 m_assembler.movz<32>(dest, getHalfword(value, 0));
2656 m_assembler.movk<32>(dest, getHalfword(value, 1), 16);
2657 } else {
2658 m_assembler.movn<32>(dest, ~getHalfword(value, 0));
2659 m_assembler.movk<32>(dest, getHalfword(value, 1), 16);
2660 }
2661 }
2662
2663 void signExtend32ToPtr(TrustedImm32 imm, RegisterID dest)
2664 {
2665 move(TrustedImmPtr(reinterpret_cast<void*>(static_cast<intptr_t>(imm.m_value))), dest);
2666 }
2667
2668 template<int datasize>
2669 ALWAYS_INLINE void load(const void* address, RegisterID dest)
2670 {
2671 intptr_t currentRegisterContents;
2672 if (m_cachedMemoryTempRegister.value(currentRegisterContents)) {
2673 intptr_t addressAsInt = reinterpret_cast<intptr_t>(address);
2674 intptr_t addressDelta = addressAsInt - currentRegisterContents;
2675
2676 if (dest == memoryTempRegister)
2677 m_cachedMemoryTempRegister.invalidate();
2678
2679 if (isInIntRange(addressDelta)) {
2680 if (ARM64Assembler::canEncodeSImmOffset(addressDelta)) {
2681 m_assembler.ldur<datasize>(dest, memoryTempRegister, addressDelta);
2682 return;
2683 }
2684
2685 if (ARM64Assembler::canEncodePImmOffset<datasize>(addressDelta)) {
2686 m_assembler.ldr<datasize>(dest, memoryTempRegister, addressDelta);
2687 return;
2688 }
2689 }
2690
2691 if ((addressAsInt & (~maskHalfWord0)) == (currentRegisterContents & (~maskHalfWord0))) {
2692 m_assembler.movk<64>(memoryTempRegister, addressAsInt & maskHalfWord0, 0);
2693 m_cachedMemoryTempRegister.setValue(reinterpret_cast<intptr_t>(address));
2694 m_assembler.ldr<datasize>(dest, memoryTempRegister, ARM64Registers::zr);
2695 return;
2696 }
2697 }
2698
2699 move(TrustedImmPtr(address), memoryTempRegister);
2700 if (dest == memoryTempRegister)
2701 m_cachedMemoryTempRegister.invalidate();
2702 else
2703 m_cachedMemoryTempRegister.setValue(reinterpret_cast<intptr_t>(address));
2704 m_assembler.ldr<datasize>(dest, memoryTempRegister, ARM64Registers::zr);
2705 }
2706
2707 template<int datasize>
2708 ALWAYS_INLINE void store(RegisterID src, const void* address)
2709 {
2710 intptr_t currentRegisterContents;
2711 if (m_cachedMemoryTempRegister.value(currentRegisterContents)) {
2712 intptr_t addressAsInt = reinterpret_cast<intptr_t>(address);
2713 intptr_t addressDelta = addressAsInt - currentRegisterContents;
2714
2715 if (isInIntRange(addressDelta)) {
2716 if (ARM64Assembler::canEncodeSImmOffset(addressDelta)) {
2717 m_assembler.stur<datasize>(src, memoryTempRegister, addressDelta);
2718 return;
2719 }
2720
2721 if (ARM64Assembler::canEncodePImmOffset<datasize>(addressDelta)) {
2722 m_assembler.str<datasize>(src, memoryTempRegister, addressDelta);
2723 return;
2724 }
2725 }
2726
2727 if ((addressAsInt & (~maskHalfWord0)) == (currentRegisterContents & (~maskHalfWord0))) {
2728 m_assembler.movk<64>(memoryTempRegister, addressAsInt & maskHalfWord0, 0);
2729 m_cachedMemoryTempRegister.setValue(reinterpret_cast<intptr_t>(address));
2730 m_assembler.str<datasize>(src, memoryTempRegister, ARM64Registers::zr);
2731 return;
2732 }
2733 }
2734
2735 move(TrustedImmPtr(address), memoryTempRegister);
2736 m_cachedMemoryTempRegister.setValue(reinterpret_cast<intptr_t>(address));
2737 m_assembler.str<datasize>(src, memoryTempRegister, ARM64Registers::zr);
2738 }
2739
2740 template <int dataSize>
2741 ALWAYS_INLINE bool tryMoveUsingCacheRegisterContents(intptr_t immediate, CachedTempRegister& dest)
2742 {
2743 intptr_t currentRegisterContents;
2744 if (dest.value(currentRegisterContents)) {
2745 if (currentRegisterContents == immediate)
2746 return true;
2747
2748 LogicalImmediate logicalImm = dataSize == 64 ? LogicalImmediate::create64(static_cast<uint64_t>(immediate)) : LogicalImmediate::create32(static_cast<uint32_t>(immediate));
2749
2750 if (logicalImm.isValid()) {
2751 m_assembler.movi<dataSize>(dest.registerIDNoInvalidate(), logicalImm);
2752 dest.setValue(immediate);
2753 return true;
2754 }
2755
2756 if ((immediate & maskUpperWord) == (currentRegisterContents & maskUpperWord)) {
2757 if ((immediate & maskHalfWord1) != (currentRegisterContents & maskHalfWord1))
2758 m_assembler.movk<dataSize>(dest.registerIDNoInvalidate(), (immediate & maskHalfWord1) >> 16, 16);
2759
2760 if ((immediate & maskHalfWord0) != (currentRegisterContents & maskHalfWord0))
2761 m_assembler.movk<dataSize>(dest.registerIDNoInvalidate(), immediate & maskHalfWord0, 0);
2762
2763 dest.setValue(immediate);
2764 return true;
2765 }
2766 }
2767
2768 return false;
2769 }
2770
2771 void moveToCachedReg(TrustedImm32 imm, CachedTempRegister& dest)
2772 {
2773 if (tryMoveUsingCacheRegisterContents<32>(static_cast<intptr_t>(imm.m_value), dest))
2774 return;
2775
2776 moveInternal<TrustedImm32, int32_t>(imm, dest.registerIDNoInvalidate());
2777 dest.setValue(imm.m_value);
2778 }
2779
2780 void moveToCachedReg(TrustedImmPtr imm, CachedTempRegister& dest)
2781 {
2782 if (tryMoveUsingCacheRegisterContents<64>(imm.asIntptr(), dest))
2783 return;
2784
2785 moveInternal<TrustedImmPtr, intptr_t>(imm, dest.registerIDNoInvalidate());
2786 dest.setValue(imm.asIntptr());
2787 }
2788
2789 void moveToCachedReg(TrustedImm64 imm, CachedTempRegister& dest)
2790 {
2791 if (tryMoveUsingCacheRegisterContents<64>(static_cast<intptr_t>(imm.m_value), dest))
2792 return;
2793
2794 moveInternal<TrustedImm64, int64_t>(imm, dest.registerIDNoInvalidate());
2795 dest.setValue(imm.m_value);
2796 }
2797
2798 template<int datasize>
2799 ALWAYS_INLINE bool tryLoadWithOffset(RegisterID rt, RegisterID rn, int32_t offset)
2800 {
2801 if (ARM64Assembler::canEncodeSImmOffset(offset)) {
2802 loadUnscaledImmediate<datasize>(rt, rn, offset);
2803 return true;
2804 }
2805 if (ARM64Assembler::canEncodePImmOffset<datasize>(offset)) {
2806 loadUnsignedImmediate<datasize>(rt, rn, static_cast<unsigned>(offset));
2807 return true;
2808 }
2809 return false;
2810 }
2811
2812 template<int datasize>
2813 ALWAYS_INLINE bool tryLoadWithOffset(FPRegisterID rt, RegisterID rn, int32_t offset)
2814 {
2815 if (ARM64Assembler::canEncodeSImmOffset(offset)) {
2816 m_assembler.ldur<datasize>(rt, rn, offset);
2817 return true;
2818 }
2819 if (ARM64Assembler::canEncodePImmOffset<datasize>(offset)) {
2820 m_assembler.ldr<datasize>(rt, rn, static_cast<unsigned>(offset));
2821 return true;
2822 }
2823 return false;
2824 }
2825
2826 template<int datasize>
2827 ALWAYS_INLINE bool tryStoreWithOffset(RegisterID rt, RegisterID rn, int32_t offset)
2828 {
2829 if (ARM64Assembler::canEncodeSImmOffset(offset)) {
2830 storeUnscaledImmediate<datasize>(rt, rn, offset);
2831 return true;
2832 }
2833 if (ARM64Assembler::canEncodePImmOffset<datasize>(offset)) {
2834 storeUnsignedImmediate<datasize>(rt, rn, static_cast<unsigned>(offset));
2835 return true;
2836 }
2837 return false;
2838 }
2839
2840 template<int datasize>
2841 ALWAYS_INLINE bool tryStoreWithOffset(FPRegisterID rt, RegisterID rn, int32_t offset)
2842 {
2843 if (ARM64Assembler::canEncodeSImmOffset(offset)) {
2844 m_assembler.stur<datasize>(rt, rn, offset);
2845 return true;
2846 }
2847 if (ARM64Assembler::canEncodePImmOffset<datasize>(offset)) {
2848 m_assembler.str<datasize>(rt, rn, static_cast<unsigned>(offset));
2849 return true;
2850 }
2851 return false;
2852 }
2853
2854 friend class LinkBuffer;
2855 friend class RepatchBuffer;
2856
2857 static void linkCall(void* code, Call call, FunctionPtr function)
2858 {
2859 if (call.isFlagSet(Call::Near))
2860 ARM64Assembler::linkCall(code, call.m_label, function.value());
2861 else
2862 ARM64Assembler::linkPointer(code, call.m_label.labelAtOffset(REPATCH_OFFSET_CALL_TO_POINTER), function.value());
2863 }
2864
2865 static void repatchCall(CodeLocationCall call, CodeLocationLabel destination)
2866 {
2867 ARM64Assembler::repatchPointer(call.dataLabelPtrAtOffset(REPATCH_OFFSET_CALL_TO_POINTER).dataLocation(), destination.executableAddress());
2868 }
2869
2870 static void repatchCall(CodeLocationCall call, FunctionPtr destination)
2871 {
2872 ARM64Assembler::repatchPointer(call.dataLabelPtrAtOffset(REPATCH_OFFSET_CALL_TO_POINTER).dataLocation(), destination.executableAddress());
2873 }
2874
2875 CachedTempRegister m_dataMemoryTempRegister;
2876 CachedTempRegister m_cachedMemoryTempRegister;
2877 bool m_makeJumpPatchable;
2878 };
2879
2880 // Extend the {load,store}{Unsigned,Unscaled}Immediate templated general register methods to cover all load/store sizes
2881 template<>
2882 ALWAYS_INLINE void MacroAssemblerARM64::loadUnsignedImmediate<8>(RegisterID rt, RegisterID rn, unsigned pimm)
2883 {
2884 m_assembler.ldrb(rt, rn, pimm);
2885 }
2886
2887 template<>
2888 ALWAYS_INLINE void MacroAssemblerARM64::loadUnsignedImmediate<16>(RegisterID rt, RegisterID rn, unsigned pimm)
2889 {
2890 m_assembler.ldrh(rt, rn, pimm);
2891 }
2892
2893 template<>
2894 ALWAYS_INLINE void MacroAssemblerARM64::loadUnscaledImmediate<8>(RegisterID rt, RegisterID rn, int simm)
2895 {
2896 m_assembler.ldurb(rt, rn, simm);
2897 }
2898
2899 template<>
2900 ALWAYS_INLINE void MacroAssemblerARM64::loadUnscaledImmediate<16>(RegisterID rt, RegisterID rn, int simm)
2901 {
2902 m_assembler.ldurh(rt, rn, simm);
2903 }
2904
2905 template<>
2906 ALWAYS_INLINE void MacroAssemblerARM64::storeUnsignedImmediate<8>(RegisterID rt, RegisterID rn, unsigned pimm)
2907 {
2908 m_assembler.strb(rt, rn, pimm);
2909 }
2910
2911 template<>
2912 ALWAYS_INLINE void MacroAssemblerARM64::storeUnsignedImmediate<16>(RegisterID rt, RegisterID rn, unsigned pimm)
2913 {
2914 m_assembler.strh(rt, rn, pimm);
2915 }
2916
2917 template<>
2918 ALWAYS_INLINE void MacroAssemblerARM64::storeUnscaledImmediate<8>(RegisterID rt, RegisterID rn, int simm)
2919 {
2920 m_assembler.sturb(rt, rn, simm);
2921 }
2922
2923 template<>
2924 ALWAYS_INLINE void MacroAssemblerARM64::storeUnscaledImmediate<16>(RegisterID rt, RegisterID rn, int simm)
2925 {
2926 m_assembler.sturh(rt, rn, simm);
2927 }
2928
2929 } // namespace JSC
2930
2931 #endif // ENABLE(ASSEMBLER)
2932
2933 #endif // MacroAssemblerARM64_h