]> git.saurik.com Git - apple/javascriptcore.git/blob - assembler/MacroAssemblerARM64.h
2033e05d3566e6c563c8c0cc194d024439563355
[apple/javascriptcore.git] / assembler / MacroAssemblerARM64.h
1 /*
2 * Copyright (C) 2012 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26 #ifndef MacroAssemblerARM64_h
27 #define MacroAssemblerARM64_h
28
29 #if ENABLE(ASSEMBLER)
30
31 #include "ARM64Assembler.h"
32 #include "AbstractMacroAssembler.h"
33 #include <wtf/MathExtras.h>
34
35 namespace JSC {
36
37 class MacroAssemblerARM64 : public AbstractMacroAssembler<ARM64Assembler> {
38 static const RegisterID dataTempRegister = ARM64Registers::ip0;
39 static const RegisterID memoryTempRegister = ARM64Registers::ip1;
40 static const ARM64Registers::FPRegisterID fpTempRegister = ARM64Registers::q31;
41 static const ARM64Assembler::SetFlags S = ARM64Assembler::S;
42 static const intptr_t maskHalfWord0 = 0xffffl;
43 static const intptr_t maskHalfWord1 = 0xffff0000l;
44 static const intptr_t maskUpperWord = 0xffffffff00000000l;
45
46 // 4 instructions - 3 to load the function pointer, + blr.
47 static const ptrdiff_t REPATCH_OFFSET_CALL_TO_POINTER = -16;
48
49 public:
50 MacroAssemblerARM64()
51 : m_dataMemoryTempRegister(this, dataTempRegister)
52 , m_cachedMemoryTempRegister(this, memoryTempRegister)
53 , m_makeJumpPatchable(false)
54 {
55 }
56
57 typedef ARM64Registers::FPRegisterID FPRegisterID;
58 typedef ARM64Assembler::LinkRecord LinkRecord;
59 typedef ARM64Assembler::JumpType JumpType;
60 typedef ARM64Assembler::JumpLinkType JumpLinkType;
61 typedef ARM64Assembler::Condition Condition;
62
63 static const ARM64Assembler::Condition DefaultCondition = ARM64Assembler::ConditionInvalid;
64 static const ARM64Assembler::JumpType DefaultJump = ARM64Assembler::JumpNoConditionFixedSize;
65
66 Vector<LinkRecord, 0, UnsafeVectorOverflow>& jumpsToLink() { return m_assembler.jumpsToLink(); }
67 void* unlinkedCode() { return m_assembler.unlinkedCode(); }
68 bool canCompact(JumpType jumpType) { return m_assembler.canCompact(jumpType); }
69 JumpLinkType computeJumpType(JumpType jumpType, const uint8_t* from, const uint8_t* to) { return m_assembler.computeJumpType(jumpType, from, to); }
70 JumpLinkType computeJumpType(LinkRecord& record, const uint8_t* from, const uint8_t* to) { return m_assembler.computeJumpType(record, from, to); }
71 void recordLinkOffsets(int32_t regionStart, int32_t regionEnd, int32_t offset) {return m_assembler.recordLinkOffsets(regionStart, regionEnd, offset); }
72 int jumpSizeDelta(JumpType jumpType, JumpLinkType jumpLinkType) { return m_assembler.jumpSizeDelta(jumpType, jumpLinkType); }
73 void link(LinkRecord& record, uint8_t* from, uint8_t* to) { return m_assembler.link(record, from, to); }
74 int executableOffsetFor(int location) { return m_assembler.executableOffsetFor(location); }
75
76 static const Scale ScalePtr = TimesEight;
77
78 static bool isCompactPtrAlignedAddressOffset(ptrdiff_t value)
79 {
80 // This is the largest 32-bit access allowed, aligned to 64-bit boundary.
81 return !(value & ~0x3ff8);
82 }
83
84 enum RelationalCondition {
85 Equal = ARM64Assembler::ConditionEQ,
86 NotEqual = ARM64Assembler::ConditionNE,
87 Above = ARM64Assembler::ConditionHI,
88 AboveOrEqual = ARM64Assembler::ConditionHS,
89 Below = ARM64Assembler::ConditionLO,
90 BelowOrEqual = ARM64Assembler::ConditionLS,
91 GreaterThan = ARM64Assembler::ConditionGT,
92 GreaterThanOrEqual = ARM64Assembler::ConditionGE,
93 LessThan = ARM64Assembler::ConditionLT,
94 LessThanOrEqual = ARM64Assembler::ConditionLE
95 };
96
97 enum ResultCondition {
98 Overflow = ARM64Assembler::ConditionVS,
99 Signed = ARM64Assembler::ConditionMI,
100 PositiveOrZero = ARM64Assembler::ConditionPL,
101 Zero = ARM64Assembler::ConditionEQ,
102 NonZero = ARM64Assembler::ConditionNE
103 };
104
105 enum ZeroCondition {
106 IsZero = ARM64Assembler::ConditionEQ,
107 IsNonZero = ARM64Assembler::ConditionNE
108 };
109
110 enum DoubleCondition {
111 // These conditions will only evaluate to true if the comparison is ordered - i.e. neither operand is NaN.
112 DoubleEqual = ARM64Assembler::ConditionEQ,
113 DoubleNotEqual = ARM64Assembler::ConditionVC, // Not the right flag! check for this & handle differently.
114 DoubleGreaterThan = ARM64Assembler::ConditionGT,
115 DoubleGreaterThanOrEqual = ARM64Assembler::ConditionGE,
116 DoubleLessThan = ARM64Assembler::ConditionLO,
117 DoubleLessThanOrEqual = ARM64Assembler::ConditionLS,
118 // If either operand is NaN, these conditions always evaluate to true.
119 DoubleEqualOrUnordered = ARM64Assembler::ConditionVS, // Not the right flag! check for this & handle differently.
120 DoubleNotEqualOrUnordered = ARM64Assembler::ConditionNE,
121 DoubleGreaterThanOrUnordered = ARM64Assembler::ConditionHI,
122 DoubleGreaterThanOrEqualOrUnordered = ARM64Assembler::ConditionHS,
123 DoubleLessThanOrUnordered = ARM64Assembler::ConditionLT,
124 DoubleLessThanOrEqualOrUnordered = ARM64Assembler::ConditionLE,
125 };
126
127 static const RegisterID stackPointerRegister = ARM64Registers::sp;
128 static const RegisterID linkRegister = ARM64Registers::lr;
129
130
131 // Integer operations:
132
133 void add32(RegisterID src, RegisterID dest)
134 {
135 m_assembler.add<32>(dest, dest, src);
136 }
137
138 void add32(TrustedImm32 imm, RegisterID dest)
139 {
140 add32(imm, dest, dest);
141 }
142
143 void add32(TrustedImm32 imm, RegisterID src, RegisterID dest)
144 {
145 if (isUInt12(imm.m_value))
146 m_assembler.add<32>(dest, src, UInt12(imm.m_value));
147 else if (isUInt12(-imm.m_value))
148 m_assembler.sub<32>(dest, src, UInt12(-imm.m_value));
149 else {
150 move(imm, getCachedDataTempRegisterIDAndInvalidate());
151 m_assembler.add<32>(dest, src, dataTempRegister);
152 }
153 }
154
155 void add32(TrustedImm32 imm, Address address)
156 {
157 load32(address, getCachedDataTempRegisterIDAndInvalidate());
158
159 if (isUInt12(imm.m_value))
160 m_assembler.add<32>(dataTempRegister, dataTempRegister, UInt12(imm.m_value));
161 else if (isUInt12(-imm.m_value))
162 m_assembler.sub<32>(dataTempRegister, dataTempRegister, UInt12(-imm.m_value));
163 else {
164 move(imm, getCachedMemoryTempRegisterIDAndInvalidate());
165 m_assembler.add<32>(dataTempRegister, dataTempRegister, memoryTempRegister);
166 }
167
168 store32(dataTempRegister, address);
169 }
170
171 void add32(TrustedImm32 imm, AbsoluteAddress address)
172 {
173 load32(address.m_ptr, getCachedDataTempRegisterIDAndInvalidate());
174
175 if (isUInt12(imm.m_value)) {
176 m_assembler.add<32>(dataTempRegister, dataTempRegister, UInt12(imm.m_value));
177 store32(dataTempRegister, address.m_ptr);
178 return;
179 }
180
181 if (isUInt12(-imm.m_value)) {
182 m_assembler.sub<32>(dataTempRegister, dataTempRegister, UInt12(-imm.m_value));
183 store32(dataTempRegister, address.m_ptr);
184 return;
185 }
186
187 move(imm, getCachedMemoryTempRegisterIDAndInvalidate());
188 m_assembler.add<32>(dataTempRegister, dataTempRegister, memoryTempRegister);
189 store32(dataTempRegister, address.m_ptr);
190 }
191
192 void add32(Address src, RegisterID dest)
193 {
194 load32(src, getCachedDataTempRegisterIDAndInvalidate());
195 add32(dataTempRegister, dest);
196 }
197
198 void add64(RegisterID src, RegisterID dest)
199 {
200 m_assembler.add<64>(dest, dest, src);
201 }
202
203 void add64(TrustedImm32 imm, RegisterID dest)
204 {
205 if (isUInt12(imm.m_value)) {
206 m_assembler.add<64>(dest, dest, UInt12(imm.m_value));
207 return;
208 }
209 if (isUInt12(-imm.m_value)) {
210 m_assembler.sub<64>(dest, dest, UInt12(-imm.m_value));
211 return;
212 }
213
214 signExtend32ToPtr(imm, getCachedDataTempRegisterIDAndInvalidate());
215 m_assembler.add<64>(dest, dest, dataTempRegister);
216 }
217
218 void add64(TrustedImm64 imm, RegisterID dest)
219 {
220 intptr_t immediate = imm.m_value;
221
222 if (isUInt12(immediate)) {
223 m_assembler.add<64>(dest, dest, UInt12(static_cast<int32_t>(immediate)));
224 return;
225 }
226 if (isUInt12(-immediate)) {
227 m_assembler.sub<64>(dest, dest, UInt12(static_cast<int32_t>(-immediate)));
228 return;
229 }
230
231 move(imm, getCachedDataTempRegisterIDAndInvalidate());
232 m_assembler.add<64>(dest, dest, dataTempRegister);
233 }
234
235 void add64(TrustedImm32 imm, RegisterID src, RegisterID dest)
236 {
237 if (isUInt12(imm.m_value)) {
238 m_assembler.add<64>(dest, src, UInt12(imm.m_value));
239 return;
240 }
241 if (isUInt12(-imm.m_value)) {
242 m_assembler.sub<64>(dest, src, UInt12(-imm.m_value));
243 return;
244 }
245
246 signExtend32ToPtr(imm, getCachedDataTempRegisterIDAndInvalidate());
247 m_assembler.add<64>(dest, src, dataTempRegister);
248 }
249
250 void add64(TrustedImm32 imm, Address address)
251 {
252 load64(address, getCachedDataTempRegisterIDAndInvalidate());
253
254 if (isUInt12(imm.m_value))
255 m_assembler.add<64>(dataTempRegister, dataTempRegister, UInt12(imm.m_value));
256 else if (isUInt12(-imm.m_value))
257 m_assembler.sub<64>(dataTempRegister, dataTempRegister, UInt12(-imm.m_value));
258 else {
259 signExtend32ToPtr(imm, getCachedMemoryTempRegisterIDAndInvalidate());
260 m_assembler.add<64>(dataTempRegister, dataTempRegister, memoryTempRegister);
261 }
262
263 store64(dataTempRegister, address);
264 }
265
266 void add64(TrustedImm32 imm, AbsoluteAddress address)
267 {
268 load64(address.m_ptr, getCachedDataTempRegisterIDAndInvalidate());
269
270 if (isUInt12(imm.m_value)) {
271 m_assembler.add<64>(dataTempRegister, dataTempRegister, UInt12(imm.m_value));
272 store64(dataTempRegister, address.m_ptr);
273 return;
274 }
275
276 if (isUInt12(-imm.m_value)) {
277 m_assembler.sub<64>(dataTempRegister, dataTempRegister, UInt12(-imm.m_value));
278 store64(dataTempRegister, address.m_ptr);
279 return;
280 }
281
282 signExtend32ToPtr(imm, getCachedMemoryTempRegisterIDAndInvalidate());
283 m_assembler.add<64>(dataTempRegister, dataTempRegister, memoryTempRegister);
284 store64(dataTempRegister, address.m_ptr);
285 }
286
287 void add64(Address src, RegisterID dest)
288 {
289 load64(src, getCachedDataTempRegisterIDAndInvalidate());
290 m_assembler.add<64>(dest, dest, dataTempRegister);
291 }
292
293 void add64(AbsoluteAddress src, RegisterID dest)
294 {
295 load64(src.m_ptr, getCachedDataTempRegisterIDAndInvalidate());
296 m_assembler.add<64>(dest, dest, dataTempRegister);
297 }
298
299 void and32(RegisterID src, RegisterID dest)
300 {
301 and32(dest, src, dest);
302 }
303
304 void and32(RegisterID op1, RegisterID op2, RegisterID dest)
305 {
306 m_assembler.and_<32>(dest, op1, op2);
307 }
308
309 void and32(TrustedImm32 imm, RegisterID dest)
310 {
311 and32(imm, dest, dest);
312 }
313
314 void and32(TrustedImm32 imm, RegisterID src, RegisterID dest)
315 {
316 LogicalImmediate logicalImm = LogicalImmediate::create32(imm.m_value);
317
318 if (logicalImm.isValid()) {
319 m_assembler.and_<32>(dest, src, logicalImm);
320 return;
321 }
322
323 move(imm, getCachedDataTempRegisterIDAndInvalidate());
324 m_assembler.and_<32>(dest, src, dataTempRegister);
325 }
326
327 void and32(Address src, RegisterID dest)
328 {
329 load32(src, dataTempRegister);
330 and32(dataTempRegister, dest);
331 }
332
333 void and64(RegisterID src, RegisterID dest)
334 {
335 m_assembler.and_<64>(dest, dest, src);
336 }
337
338 void and64(TrustedImm32 imm, RegisterID dest)
339 {
340 LogicalImmediate logicalImm = LogicalImmediate::create64(static_cast<intptr_t>(static_cast<int64_t>(imm.m_value)));
341
342 if (logicalImm.isValid()) {
343 m_assembler.and_<64>(dest, dest, logicalImm);
344 return;
345 }
346
347 signExtend32ToPtr(imm, getCachedDataTempRegisterIDAndInvalidate());
348 m_assembler.and_<64>(dest, dest, dataTempRegister);
349 }
350
351 void countLeadingZeros32(RegisterID src, RegisterID dest)
352 {
353 m_assembler.clz<32>(dest, src);
354 }
355
356 void lshift32(RegisterID src, RegisterID shiftAmount, RegisterID dest)
357 {
358 m_assembler.lsl<32>(dest, src, shiftAmount);
359 }
360
361 void lshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
362 {
363 m_assembler.lsl<32>(dest, src, imm.m_value & 0x1f);
364 }
365
366 void lshift32(RegisterID shiftAmount, RegisterID dest)
367 {
368 lshift32(dest, shiftAmount, dest);
369 }
370
371 void lshift32(TrustedImm32 imm, RegisterID dest)
372 {
373 lshift32(dest, imm, dest);
374 }
375
376 void mul32(RegisterID src, RegisterID dest)
377 {
378 m_assembler.mul<32>(dest, dest, src);
379 }
380
381 void mul32(TrustedImm32 imm, RegisterID src, RegisterID dest)
382 {
383 move(imm, getCachedDataTempRegisterIDAndInvalidate());
384 m_assembler.mul<32>(dest, src, dataTempRegister);
385 }
386
387 void neg32(RegisterID dest)
388 {
389 m_assembler.neg<32>(dest, dest);
390 }
391
392 void neg64(RegisterID dest)
393 {
394 m_assembler.neg<64>(dest, dest);
395 }
396
397 void or32(RegisterID src, RegisterID dest)
398 {
399 or32(dest, src, dest);
400 }
401
402 void or32(RegisterID op1, RegisterID op2, RegisterID dest)
403 {
404 m_assembler.orr<32>(dest, op1, op2);
405 }
406
407 void or32(TrustedImm32 imm, RegisterID dest)
408 {
409 or32(imm, dest, dest);
410 }
411
412 void or32(TrustedImm32 imm, RegisterID src, RegisterID dest)
413 {
414 LogicalImmediate logicalImm = LogicalImmediate::create32(imm.m_value);
415
416 if (logicalImm.isValid()) {
417 m_assembler.orr<32>(dest, src, logicalImm);
418 return;
419 }
420
421 move(imm, getCachedDataTempRegisterIDAndInvalidate());
422 m_assembler.orr<32>(dest, src, dataTempRegister);
423 }
424
425 void or32(RegisterID src, AbsoluteAddress address)
426 {
427 load32(address.m_ptr, getCachedDataTempRegisterIDAndInvalidate());
428 m_assembler.orr<32>(dataTempRegister, dataTempRegister, src);
429 store32(dataTempRegister, address.m_ptr);
430 }
431
432 void or64(RegisterID src, RegisterID dest)
433 {
434 or64(dest, src, dest);
435 }
436
437 void or64(RegisterID op1, RegisterID op2, RegisterID dest)
438 {
439 m_assembler.orr<64>(dest, op1, op2);
440 }
441
442 void or64(TrustedImm32 imm, RegisterID dest)
443 {
444 or64(imm, dest, dest);
445 }
446
447 void or64(TrustedImm32 imm, RegisterID src, RegisterID dest)
448 {
449 LogicalImmediate logicalImm = LogicalImmediate::create64(static_cast<intptr_t>(static_cast<int64_t>(imm.m_value)));
450
451 if (logicalImm.isValid()) {
452 m_assembler.orr<64>(dest, dest, logicalImm);
453 return;
454 }
455
456 signExtend32ToPtr(imm, getCachedDataTempRegisterIDAndInvalidate());
457 m_assembler.orr<64>(dest, src, dataTempRegister);
458 }
459
460 void or64(TrustedImm64 imm, RegisterID dest)
461 {
462 LogicalImmediate logicalImm = LogicalImmediate::create64(static_cast<intptr_t>(static_cast<int64_t>(imm.m_value)));
463
464 if (logicalImm.isValid()) {
465 m_assembler.orr<64>(dest, dest, logicalImm);
466 return;
467 }
468
469 move(imm, getCachedDataTempRegisterIDAndInvalidate());
470 m_assembler.orr<64>(dest, dest, dataTempRegister);
471 }
472
473 void rotateRight64(TrustedImm32 imm, RegisterID srcDst)
474 {
475 m_assembler.ror<64>(srcDst, srcDst, imm.m_value & 63);
476 }
477
478 void rshift32(RegisterID src, RegisterID shiftAmount, RegisterID dest)
479 {
480 m_assembler.asr<32>(dest, src, shiftAmount);
481 }
482
483 void rshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
484 {
485 m_assembler.asr<32>(dest, src, imm.m_value & 0x1f);
486 }
487
488 void rshift32(RegisterID shiftAmount, RegisterID dest)
489 {
490 rshift32(dest, shiftAmount, dest);
491 }
492
493 void rshift32(TrustedImm32 imm, RegisterID dest)
494 {
495 rshift32(dest, imm, dest);
496 }
497
498 void sub32(RegisterID src, RegisterID dest)
499 {
500 m_assembler.sub<32>(dest, dest, src);
501 }
502
503 void sub32(TrustedImm32 imm, RegisterID dest)
504 {
505 if (isUInt12(imm.m_value)) {
506 m_assembler.sub<32>(dest, dest, UInt12(imm.m_value));
507 return;
508 }
509 if (isUInt12(-imm.m_value)) {
510 m_assembler.add<32>(dest, dest, UInt12(-imm.m_value));
511 return;
512 }
513
514 move(imm, getCachedDataTempRegisterIDAndInvalidate());
515 m_assembler.sub<32>(dest, dest, dataTempRegister);
516 }
517
518 void sub32(TrustedImm32 imm, Address address)
519 {
520 load32(address, getCachedDataTempRegisterIDAndInvalidate());
521
522 if (isUInt12(imm.m_value))
523 m_assembler.sub<32>(dataTempRegister, dataTempRegister, UInt12(imm.m_value));
524 else if (isUInt12(-imm.m_value))
525 m_assembler.add<32>(dataTempRegister, dataTempRegister, UInt12(-imm.m_value));
526 else {
527 move(imm, getCachedMemoryTempRegisterIDAndInvalidate());
528 m_assembler.sub<32>(dataTempRegister, dataTempRegister, memoryTempRegister);
529 }
530
531 store32(dataTempRegister, address);
532 }
533
534 void sub32(TrustedImm32 imm, AbsoluteAddress address)
535 {
536 load32(address.m_ptr, getCachedDataTempRegisterIDAndInvalidate());
537
538 if (isUInt12(imm.m_value)) {
539 m_assembler.sub<32>(dataTempRegister, dataTempRegister, UInt12(imm.m_value));
540 store32(dataTempRegister, address.m_ptr);
541 return;
542 }
543
544 if (isUInt12(-imm.m_value)) {
545 m_assembler.add<32>(dataTempRegister, dataTempRegister, UInt12(-imm.m_value));
546 store32(dataTempRegister, address.m_ptr);
547 return;
548 }
549
550 move(imm, getCachedMemoryTempRegisterIDAndInvalidate());
551 m_assembler.sub<32>(dataTempRegister, dataTempRegister, memoryTempRegister);
552 store32(dataTempRegister, address.m_ptr);
553 }
554
555 void sub32(Address src, RegisterID dest)
556 {
557 load32(src, getCachedDataTempRegisterIDAndInvalidate());
558 sub32(dataTempRegister, dest);
559 }
560
561 void sub64(RegisterID src, RegisterID dest)
562 {
563 m_assembler.sub<64>(dest, dest, src);
564 }
565
566 void sub64(TrustedImm32 imm, RegisterID dest)
567 {
568 if (isUInt12(imm.m_value)) {
569 m_assembler.sub<64>(dest, dest, UInt12(imm.m_value));
570 return;
571 }
572 if (isUInt12(-imm.m_value)) {
573 m_assembler.add<64>(dest, dest, UInt12(-imm.m_value));
574 return;
575 }
576
577 signExtend32ToPtr(imm, getCachedDataTempRegisterIDAndInvalidate());
578 m_assembler.sub<64>(dest, dest, dataTempRegister);
579 }
580
581 void sub64(TrustedImm64 imm, RegisterID dest)
582 {
583 intptr_t immediate = imm.m_value;
584
585 if (isUInt12(immediate)) {
586 m_assembler.sub<64>(dest, dest, UInt12(static_cast<int32_t>(immediate)));
587 return;
588 }
589 if (isUInt12(-immediate)) {
590 m_assembler.add<64>(dest, dest, UInt12(static_cast<int32_t>(-immediate)));
591 return;
592 }
593
594 move(imm, getCachedDataTempRegisterIDAndInvalidate());
595 m_assembler.sub<64>(dest, dest, dataTempRegister);
596 }
597
598 void urshift32(RegisterID src, RegisterID shiftAmount, RegisterID dest)
599 {
600 m_assembler.lsr<32>(dest, src, shiftAmount);
601 }
602
603 void urshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
604 {
605 m_assembler.lsr<32>(dest, src, imm.m_value & 0x1f);
606 }
607
608 void urshift32(RegisterID shiftAmount, RegisterID dest)
609 {
610 urshift32(dest, shiftAmount, dest);
611 }
612
613 void urshift32(TrustedImm32 imm, RegisterID dest)
614 {
615 urshift32(dest, imm, dest);
616 }
617
618 void xor32(RegisterID src, RegisterID dest)
619 {
620 xor32(dest, src, dest);
621 }
622
623 void xor32(RegisterID op1, RegisterID op2, RegisterID dest)
624 {
625 m_assembler.eor<32>(dest, op1, op2);
626 }
627
628 void xor32(TrustedImm32 imm, RegisterID dest)
629 {
630 xor32(imm, dest, dest);
631 }
632
633 void xor32(TrustedImm32 imm, RegisterID src, RegisterID dest)
634 {
635 if (imm.m_value == -1)
636 m_assembler.mvn<32>(dest, src);
637 else {
638 LogicalImmediate logicalImm = LogicalImmediate::create32(imm.m_value);
639
640 if (logicalImm.isValid()) {
641 m_assembler.eor<32>(dest, dest, logicalImm);
642 return;
643 }
644
645 move(imm, getCachedDataTempRegisterIDAndInvalidate());
646 m_assembler.eor<32>(dest, src, dataTempRegister);
647 }
648 }
649
650 void xor64(RegisterID src, Address address)
651 {
652 load64(address, getCachedDataTempRegisterIDAndInvalidate());
653 m_assembler.eor<64>(dataTempRegister, dataTempRegister, src);
654 store64(dataTempRegister, address);
655 }
656
657 void xor64(RegisterID src, RegisterID dest)
658 {
659 xor64(dest, src, dest);
660 }
661
662 void xor64(RegisterID op1, RegisterID op2, RegisterID dest)
663 {
664 m_assembler.eor<64>(dest, op1, op2);
665 }
666
667 void xor64(TrustedImm32 imm, RegisterID dest)
668 {
669 xor64(imm, dest, dest);
670 }
671
672 void xor64(TrustedImm32 imm, RegisterID src, RegisterID dest)
673 {
674 if (imm.m_value == -1)
675 m_assembler.mvn<64>(dest, src);
676 else {
677 LogicalImmediate logicalImm = LogicalImmediate::create64(static_cast<intptr_t>(static_cast<int64_t>(imm.m_value)));
678
679 if (logicalImm.isValid()) {
680 m_assembler.eor<64>(dest, dest, logicalImm);
681 return;
682 }
683
684 signExtend32ToPtr(imm, getCachedDataTempRegisterIDAndInvalidate());
685 m_assembler.eor<64>(dest, src, dataTempRegister);
686 }
687 }
688
689
690 // Memory access operations:
691
692 void load64(ImplicitAddress address, RegisterID dest)
693 {
694 if (tryLoadWithOffset<64>(dest, address.base, address.offset))
695 return;
696
697 signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
698 m_assembler.ldr<64>(dest, address.base, memoryTempRegister);
699 }
700
701 void load64(BaseIndex address, RegisterID dest)
702 {
703 if (!address.offset && (!address.scale || address.scale == 3)) {
704 m_assembler.ldr<64>(dest, address.base, address.index, ARM64Assembler::UXTX, address.scale);
705 return;
706 }
707
708 signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
709 m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
710 m_assembler.ldr<64>(dest, address.base, memoryTempRegister);
711 }
712
713 void load64(const void* address, RegisterID dest)
714 {
715 load<64>(address, dest);
716 }
717
718 DataLabel32 load64WithAddressOffsetPatch(Address address, RegisterID dest)
719 {
720 DataLabel32 label(this);
721 signExtend32ToPtrWithFixedWidth(address.offset, getCachedMemoryTempRegisterIDAndInvalidate());
722 m_assembler.ldr<64>(dest, address.base, memoryTempRegister, ARM64Assembler::SXTW, 0);
723 return label;
724 }
725
726 DataLabelCompact load64WithCompactAddressOffsetPatch(Address address, RegisterID dest)
727 {
728 ASSERT(isCompactPtrAlignedAddressOffset(address.offset));
729 DataLabelCompact label(this);
730 m_assembler.ldr<64>(dest, address.base, address.offset);
731 return label;
732 }
733
734 ConvertibleLoadLabel convertibleLoadPtr(Address address, RegisterID dest)
735 {
736 ConvertibleLoadLabel result(this);
737 ASSERT(!(address.offset & ~0xff8));
738 m_assembler.ldr<64>(dest, address.base, address.offset);
739 return result;
740 }
741
742 void load32(ImplicitAddress address, RegisterID dest)
743 {
744 if (tryLoadWithOffset<32>(dest, address.base, address.offset))
745 return;
746
747 signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
748 m_assembler.ldr<32>(dest, address.base, memoryTempRegister);
749 }
750
751 void load32(BaseIndex address, RegisterID dest)
752 {
753 if (!address.offset && (!address.scale || address.scale == 2)) {
754 m_assembler.ldr<32>(dest, address.base, address.index, ARM64Assembler::UXTX, address.scale);
755 return;
756 }
757
758 signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
759 m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
760 m_assembler.ldr<32>(dest, address.base, memoryTempRegister);
761 }
762
763 void load32(const void* address, RegisterID dest)
764 {
765 load<32>(address, dest);
766 }
767
768 DataLabel32 load32WithAddressOffsetPatch(Address address, RegisterID dest)
769 {
770 DataLabel32 label(this);
771 signExtend32ToPtrWithFixedWidth(address.offset, getCachedMemoryTempRegisterIDAndInvalidate());
772 m_assembler.ldr<32>(dest, address.base, memoryTempRegister, ARM64Assembler::SXTW, 0);
773 return label;
774 }
775
776 DataLabelCompact load32WithCompactAddressOffsetPatch(Address address, RegisterID dest)
777 {
778 ASSERT(isCompactPtrAlignedAddressOffset(address.offset));
779 DataLabelCompact label(this);
780 m_assembler.ldr<32>(dest, address.base, address.offset);
781 return label;
782 }
783
784 void load32WithUnalignedHalfWords(BaseIndex address, RegisterID dest)
785 {
786 load32(address, dest);
787 }
788
789 void load16(ImplicitAddress address, RegisterID dest)
790 {
791 if (tryLoadWithOffset<16>(dest, address.base, address.offset))
792 return;
793
794 signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
795 m_assembler.ldrh(dest, address.base, memoryTempRegister);
796 }
797
798 void load16(BaseIndex address, RegisterID dest)
799 {
800 if (!address.offset && (!address.scale || address.scale == 1)) {
801 m_assembler.ldrh(dest, address.base, address.index, ARM64Assembler::UXTX, address.scale);
802 return;
803 }
804
805 signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
806 m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
807 m_assembler.ldrh(dest, address.base, memoryTempRegister);
808 }
809
810 void load16Unaligned(BaseIndex address, RegisterID dest)
811 {
812 load16(address, dest);
813 }
814
815 void load16Signed(BaseIndex address, RegisterID dest)
816 {
817 if (!address.offset && (!address.scale || address.scale == 1)) {
818 m_assembler.ldrsh<64>(dest, address.base, address.index, ARM64Assembler::UXTX, address.scale);
819 return;
820 }
821
822 signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
823 m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
824 m_assembler.ldrsh<64>(dest, address.base, memoryTempRegister);
825 }
826
827 void load8(ImplicitAddress address, RegisterID dest)
828 {
829 if (tryLoadWithOffset<8>(dest, address.base, address.offset))
830 return;
831
832 signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
833 m_assembler.ldrb(dest, address.base, memoryTempRegister);
834 }
835
836 void load8(BaseIndex address, RegisterID dest)
837 {
838 if (!address.offset && !address.scale) {
839 m_assembler.ldrb(dest, address.base, address.index, ARM64Assembler::UXTX, address.scale);
840 return;
841 }
842
843 signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
844 m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
845 m_assembler.ldrb(dest, address.base, memoryTempRegister);
846 }
847
848 void load8(const void* address, RegisterID dest)
849 {
850 moveToCachedReg(TrustedImmPtr(address), m_cachedMemoryTempRegister);
851 m_assembler.ldrb(dest, memoryTempRegister, ARM64Registers::zr);
852 }
853
854 void load8Signed(BaseIndex address, RegisterID dest)
855 {
856 if (!address.offset && !address.scale) {
857 m_assembler.ldrsb<64>(dest, address.base, address.index, ARM64Assembler::UXTX, address.scale);
858 return;
859 }
860
861 signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
862 m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
863 m_assembler.ldrsb<64>(dest, address.base, memoryTempRegister);
864 }
865
866 void store64(RegisterID src, ImplicitAddress address)
867 {
868 if (tryStoreWithOffset<64>(src, address.base, address.offset))
869 return;
870
871 signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
872 m_assembler.str<64>(src, address.base, memoryTempRegister);
873 }
874
875 void store64(RegisterID src, BaseIndex address)
876 {
877 if (!address.offset && (!address.scale || address.scale == 3)) {
878 m_assembler.str<64>(src, address.base, address.index, ARM64Assembler::UXTX, address.scale);
879 return;
880 }
881
882 signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
883 m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
884 m_assembler.str<64>(src, address.base, memoryTempRegister);
885 }
886
887 void store64(RegisterID src, const void* address)
888 {
889 store<64>(src, address);
890 }
891
892 void store64(TrustedImm64 imm, ImplicitAddress address)
893 {
894 if (!imm.m_value) {
895 store64(ARM64Registers::zr, address);
896 return;
897 }
898
899 moveToCachedReg(imm, m_dataMemoryTempRegister);
900 store64(dataTempRegister, address);
901 }
902
903 void store64(TrustedImm64 imm, BaseIndex address)
904 {
905 if (!imm.m_value) {
906 store64(ARM64Registers::zr, address);
907 return;
908 }
909
910 moveToCachedReg(imm, m_dataMemoryTempRegister);
911 store64(dataTempRegister, address);
912 }
913
914 DataLabel32 store64WithAddressOffsetPatch(RegisterID src, Address address)
915 {
916 DataLabel32 label(this);
917 signExtend32ToPtrWithFixedWidth(address.offset, getCachedMemoryTempRegisterIDAndInvalidate());
918 m_assembler.str<64>(src, address.base, memoryTempRegister, ARM64Assembler::SXTW, 0);
919 return label;
920 }
921
922 void store32(RegisterID src, ImplicitAddress address)
923 {
924 if (tryStoreWithOffset<32>(src, address.base, address.offset))
925 return;
926
927 signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
928 m_assembler.str<32>(src, address.base, memoryTempRegister);
929 }
930
931 void store32(RegisterID src, BaseIndex address)
932 {
933 if (!address.offset && (!address.scale || address.scale == 2)) {
934 m_assembler.str<32>(src, address.base, address.index, ARM64Assembler::UXTX, address.scale);
935 return;
936 }
937
938 signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
939 m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
940 m_assembler.str<32>(src, address.base, memoryTempRegister);
941 }
942
943 void store32(RegisterID src, const void* address)
944 {
945 store<32>(src, address);
946 }
947
948 void store32(TrustedImm32 imm, ImplicitAddress address)
949 {
950 if (!imm.m_value) {
951 store32(ARM64Registers::zr, address);
952 return;
953 }
954
955 moveToCachedReg(imm, m_dataMemoryTempRegister);
956 store32(dataTempRegister, address);
957 }
958
959 void store32(TrustedImm32 imm, BaseIndex address)
960 {
961 if (!imm.m_value) {
962 store32(ARM64Registers::zr, address);
963 return;
964 }
965
966 moveToCachedReg(imm, m_dataMemoryTempRegister);
967 store32(dataTempRegister, address);
968 }
969
970 void store32(TrustedImm32 imm, const void* address)
971 {
972 if (!imm.m_value) {
973 store32(ARM64Registers::zr, address);
974 return;
975 }
976
977 moveToCachedReg(imm, m_dataMemoryTempRegister);
978 store32(dataTempRegister, address);
979 }
980
981 DataLabel32 store32WithAddressOffsetPatch(RegisterID src, Address address)
982 {
983 DataLabel32 label(this);
984 signExtend32ToPtrWithFixedWidth(address.offset, getCachedMemoryTempRegisterIDAndInvalidate());
985 m_assembler.str<32>(src, address.base, memoryTempRegister, ARM64Assembler::SXTW, 0);
986 return label;
987 }
988
989 void store16(RegisterID src, BaseIndex address)
990 {
991 if (!address.offset && (!address.scale || address.scale == 1)) {
992 m_assembler.strh(src, address.base, address.index, ARM64Assembler::UXTX, address.scale);
993 return;
994 }
995
996 signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
997 m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
998 m_assembler.strh(src, address.base, memoryTempRegister);
999 }
1000
1001 void store8(RegisterID src, BaseIndex address)
1002 {
1003 if (!address.offset && !address.scale) {
1004 m_assembler.strb(src, address.base, address.index, ARM64Assembler::UXTX, address.scale);
1005 return;
1006 }
1007
1008 signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1009 m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
1010 m_assembler.strb(src, address.base, memoryTempRegister);
1011 }
1012
1013 void store8(RegisterID src, void* address)
1014 {
1015 move(ImmPtr(address), getCachedMemoryTempRegisterIDAndInvalidate());
1016 m_assembler.strb(src, memoryTempRegister, 0);
1017 }
1018
1019 void store8(TrustedImm32 imm, void* address)
1020 {
1021 if (!imm.m_value) {
1022 store8(ARM64Registers::zr, address);
1023 return;
1024 }
1025
1026 move(imm, getCachedDataTempRegisterIDAndInvalidate());
1027 store8(dataTempRegister, address);
1028 }
1029
1030
1031 // Floating-point operations:
1032
1033 static bool supportsFloatingPoint() { return true; }
1034 static bool supportsFloatingPointTruncate() { return true; }
1035 static bool supportsFloatingPointSqrt() { return true; }
1036 static bool supportsFloatingPointAbs() { return true; }
1037
1038 enum BranchTruncateType { BranchIfTruncateFailed, BranchIfTruncateSuccessful };
1039
1040 void absDouble(FPRegisterID src, FPRegisterID dest)
1041 {
1042 m_assembler.fabs<64>(dest, src);
1043 }
1044
1045 void addDouble(FPRegisterID src, FPRegisterID dest)
1046 {
1047 addDouble(dest, src, dest);
1048 }
1049
1050 void addDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
1051 {
1052 m_assembler.fadd<64>(dest, op1, op2);
1053 }
1054
1055 void addDouble(Address src, FPRegisterID dest)
1056 {
1057 loadDouble(src, fpTempRegister);
1058 addDouble(fpTempRegister, dest);
1059 }
1060
1061 void addDouble(AbsoluteAddress address, FPRegisterID dest)
1062 {
1063 loadDouble(address.m_ptr, fpTempRegister);
1064 addDouble(fpTempRegister, dest);
1065 }
1066
1067 void ceilDouble(FPRegisterID src, FPRegisterID dest)
1068 {
1069 m_assembler.frintp<64>(dest, src);
1070 }
1071
1072 void floorDouble(FPRegisterID src, FPRegisterID dest)
1073 {
1074 m_assembler.frintm<64>(dest, src);
1075 }
1076
1077 // Convert 'src' to an integer, and places the resulting 'dest'.
1078 // If the result is not representable as a 32 bit value, branch.
1079 // May also branch for some values that are representable in 32 bits
1080 // (specifically, in this case, 0).
1081 void branchConvertDoubleToInt32(FPRegisterID src, RegisterID dest, JumpList& failureCases, FPRegisterID, bool negZeroCheck = true)
1082 {
1083 m_assembler.fcvtns<32, 64>(dest, src);
1084
1085 // Convert the integer result back to float & compare to the original value - if not equal or unordered (NaN) then jump.
1086 m_assembler.scvtf<64, 32>(fpTempRegister, dest);
1087 failureCases.append(branchDouble(DoubleNotEqualOrUnordered, src, fpTempRegister));
1088
1089 // If the result is zero, it might have been -0.0, and the double comparison won't catch this!
1090 if (negZeroCheck)
1091 failureCases.append(branchTest32(Zero, dest));
1092 }
1093
1094 Jump branchDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right)
1095 {
1096 m_assembler.fcmp<64>(left, right);
1097
1098 if (cond == DoubleNotEqual) {
1099 // ConditionNE jumps if NotEqual *or* unordered - force the unordered cases not to jump.
1100 Jump unordered = makeBranch(ARM64Assembler::ConditionVS);
1101 Jump result = makeBranch(ARM64Assembler::ConditionNE);
1102 unordered.link(this);
1103 return result;
1104 }
1105 if (cond == DoubleEqualOrUnordered) {
1106 Jump unordered = makeBranch(ARM64Assembler::ConditionVS);
1107 Jump notEqual = makeBranch(ARM64Assembler::ConditionNE);
1108 unordered.link(this);
1109 // We get here if either unordered or equal.
1110 Jump result = jump();
1111 notEqual.link(this);
1112 return result;
1113 }
1114 return makeBranch(cond);
1115 }
1116
1117 Jump branchDoubleNonZero(FPRegisterID reg, FPRegisterID)
1118 {
1119 m_assembler.fcmp_0<64>(reg);
1120 Jump unordered = makeBranch(ARM64Assembler::ConditionVS);
1121 Jump result = makeBranch(ARM64Assembler::ConditionNE);
1122 unordered.link(this);
1123 return result;
1124 }
1125
1126 Jump branchDoubleZeroOrNaN(FPRegisterID reg, FPRegisterID)
1127 {
1128 m_assembler.fcmp_0<64>(reg);
1129 Jump unordered = makeBranch(ARM64Assembler::ConditionVS);
1130 Jump notEqual = makeBranch(ARM64Assembler::ConditionNE);
1131 unordered.link(this);
1132 // We get here if either unordered or equal.
1133 Jump result = jump();
1134 notEqual.link(this);
1135 return result;
1136 }
1137
1138 Jump branchTruncateDoubleToInt32(FPRegisterID src, RegisterID dest, BranchTruncateType branchType = BranchIfTruncateFailed)
1139 {
1140 // Truncate to a 64-bit integer in dataTempRegister, copy the low 32-bit to dest.
1141 m_assembler.fcvtzs<64, 64>(getCachedDataTempRegisterIDAndInvalidate(), src);
1142 zeroExtend32ToPtr(dataTempRegister, dest);
1143 // Check thlow 32-bits sign extend to be equal to the full value.
1144 m_assembler.cmp<64>(dataTempRegister, dataTempRegister, ARM64Assembler::SXTW, 0);
1145 return Jump(makeBranch(branchType == BranchIfTruncateSuccessful ? Equal : NotEqual));
1146 }
1147
1148 Jump branchTruncateDoubleToUint32(FPRegisterID src, RegisterID dest, BranchTruncateType branchType = BranchIfTruncateFailed)
1149 {
1150 // Truncate to a 64-bit integer in dataTempRegister, copy the low 32-bit to dest.
1151 m_assembler.fcvtzs<64, 64>(dest, src);
1152 // Check thlow 32-bits zero extend to be equal to the full value.
1153 m_assembler.cmp<64>(dest, dest, ARM64Assembler::UXTW, 0);
1154 return Jump(makeBranch(branchType == BranchIfTruncateSuccessful ? Equal : NotEqual));
1155 }
1156
1157 void convertDoubleToFloat(FPRegisterID src, FPRegisterID dest)
1158 {
1159 m_assembler.fcvt<32, 64>(dest, src);
1160 }
1161
1162 void convertFloatToDouble(FPRegisterID src, FPRegisterID dest)
1163 {
1164 m_assembler.fcvt<64, 32>(dest, src);
1165 }
1166
1167 void convertInt32ToDouble(TrustedImm32 imm, FPRegisterID dest)
1168 {
1169 move(imm, getCachedDataTempRegisterIDAndInvalidate());
1170 convertInt32ToDouble(dataTempRegister, dest);
1171 }
1172
1173 void convertInt32ToDouble(RegisterID src, FPRegisterID dest)
1174 {
1175 m_assembler.scvtf<64, 32>(dest, src);
1176 }
1177
1178 void convertInt32ToDouble(Address address, FPRegisterID dest)
1179 {
1180 load32(address, getCachedDataTempRegisterIDAndInvalidate());
1181 convertInt32ToDouble(dataTempRegister, dest);
1182 }
1183
1184 void convertInt32ToDouble(AbsoluteAddress address, FPRegisterID dest)
1185 {
1186 load32(address.m_ptr, getCachedDataTempRegisterIDAndInvalidate());
1187 convertInt32ToDouble(dataTempRegister, dest);
1188 }
1189
1190 void divDouble(FPRegisterID src, FPRegisterID dest)
1191 {
1192 divDouble(dest, src, dest);
1193 }
1194
1195 void divDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
1196 {
1197 m_assembler.fdiv<64>(dest, op1, op2);
1198 }
1199
1200 void loadDouble(ImplicitAddress address, FPRegisterID dest)
1201 {
1202 if (tryLoadWithOffset<64>(dest, address.base, address.offset))
1203 return;
1204
1205 signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1206 m_assembler.ldr<64>(dest, address.base, memoryTempRegister);
1207 }
1208
1209 void loadDouble(BaseIndex address, FPRegisterID dest)
1210 {
1211 if (!address.offset && (!address.scale || address.scale == 3)) {
1212 m_assembler.ldr<64>(dest, address.base, address.index, ARM64Assembler::UXTX, address.scale);
1213 return;
1214 }
1215
1216 signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1217 m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
1218 m_assembler.ldr<64>(dest, address.base, memoryTempRegister);
1219 }
1220
1221 void loadDouble(const void* address, FPRegisterID dest)
1222 {
1223 moveToCachedReg(TrustedImmPtr(address), m_cachedMemoryTempRegister);
1224 m_assembler.ldr<64>(dest, memoryTempRegister, ARM64Registers::zr);
1225 }
1226
1227 void loadFloat(BaseIndex address, FPRegisterID dest)
1228 {
1229 if (!address.offset && (!address.scale || address.scale == 2)) {
1230 m_assembler.ldr<32>(dest, address.base, address.index, ARM64Assembler::UXTX, address.scale);
1231 return;
1232 }
1233
1234 signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1235 m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
1236 m_assembler.ldr<32>(dest, address.base, memoryTempRegister);
1237 }
1238
1239 void moveDouble(FPRegisterID src, FPRegisterID dest)
1240 {
1241 m_assembler.fmov<64>(dest, src);
1242 }
1243
1244 void moveDoubleTo64(FPRegisterID src, RegisterID dest)
1245 {
1246 m_assembler.fmov<64>(dest, src);
1247 }
1248
1249 void move64ToDouble(RegisterID src, FPRegisterID dest)
1250 {
1251 m_assembler.fmov<64>(dest, src);
1252 }
1253
1254 void mulDouble(FPRegisterID src, FPRegisterID dest)
1255 {
1256 mulDouble(dest, src, dest);
1257 }
1258
1259 void mulDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
1260 {
1261 m_assembler.fmul<64>(dest, op1, op2);
1262 }
1263
1264 void mulDouble(Address src, FPRegisterID dest)
1265 {
1266 loadDouble(src, fpTempRegister);
1267 mulDouble(fpTempRegister, dest);
1268 }
1269
1270 void negateDouble(FPRegisterID src, FPRegisterID dest)
1271 {
1272 m_assembler.fneg<64>(dest, src);
1273 }
1274
1275 void sqrtDouble(FPRegisterID src, FPRegisterID dest)
1276 {
1277 m_assembler.fsqrt<64>(dest, src);
1278 }
1279
1280 void storeDouble(FPRegisterID src, ImplicitAddress address)
1281 {
1282 if (tryStoreWithOffset<64>(src, address.base, address.offset))
1283 return;
1284
1285 signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1286 m_assembler.str<64>(src, address.base, memoryTempRegister);
1287 }
1288
1289 void storeDouble(FPRegisterID src, const void* address)
1290 {
1291 moveToCachedReg(TrustedImmPtr(address), m_cachedMemoryTempRegister);
1292 m_assembler.str<64>(src, memoryTempRegister, ARM64Registers::zr);
1293 }
1294
1295 void storeDouble(FPRegisterID src, BaseIndex address)
1296 {
1297 if (!address.offset && (!address.scale || address.scale == 3)) {
1298 m_assembler.str<64>(src, address.base, address.index, ARM64Assembler::UXTX, address.scale);
1299 return;
1300 }
1301
1302 signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1303 m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
1304 m_assembler.str<64>(src, address.base, memoryTempRegister);
1305 }
1306
1307 void storeFloat(FPRegisterID src, BaseIndex address)
1308 {
1309 if (!address.offset && (!address.scale || address.scale == 2)) {
1310 m_assembler.str<32>(src, address.base, address.index, ARM64Assembler::UXTX, address.scale);
1311 return;
1312 }
1313
1314 signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1315 m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
1316 m_assembler.str<32>(src, address.base, memoryTempRegister);
1317 }
1318
1319 void subDouble(FPRegisterID src, FPRegisterID dest)
1320 {
1321 subDouble(dest, src, dest);
1322 }
1323
1324 void subDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
1325 {
1326 m_assembler.fsub<64>(dest, op1, op2);
1327 }
1328
1329 void subDouble(Address src, FPRegisterID dest)
1330 {
1331 loadDouble(src, fpTempRegister);
1332 subDouble(fpTempRegister, dest);
1333 }
1334
1335 // Result is undefined if the value is outside of the integer range.
1336 void truncateDoubleToInt32(FPRegisterID src, RegisterID dest)
1337 {
1338 m_assembler.fcvtzs<32, 64>(dest, src);
1339 }
1340
1341 void truncateDoubleToUint32(FPRegisterID src, RegisterID dest)
1342 {
1343 m_assembler.fcvtzu<32, 64>(dest, src);
1344 }
1345
1346
1347 // Stack manipulation operations:
1348 //
1349 // The ABI is assumed to provide a stack abstraction to memory,
1350 // containing machine word sized units of data. Push and pop
1351 // operations add and remove a single register sized unit of data
1352 // to or from the stack. These operations are not supported on
1353 // ARM64. Peek and poke operations read or write values on the
1354 // stack, without moving the current stack position. Additionally,
1355 // there are popToRestore and pushToSave operations, which are
1356 // designed just for quick-and-dirty saving and restoring of
1357 // temporary values. These operations don't claim to have any
1358 // ABI compatibility.
1359
1360 void pop(RegisterID) NO_RETURN_DUE_TO_CRASH
1361 {
1362 CRASH();
1363 }
1364
1365 void push(RegisterID) NO_RETURN_DUE_TO_CRASH
1366 {
1367 CRASH();
1368 }
1369
1370 void push(Address) NO_RETURN_DUE_TO_CRASH
1371 {
1372 CRASH();
1373 }
1374
1375 void push(TrustedImm32) NO_RETURN_DUE_TO_CRASH
1376 {
1377 CRASH();
1378 }
1379
1380 void popToRestore(RegisterID dest)
1381 {
1382 m_assembler.ldr<64>(dest, ARM64Registers::sp, PostIndex(16));
1383 }
1384
1385 void pushToSave(RegisterID src)
1386 {
1387 m_assembler.str<64>(src, ARM64Registers::sp, PreIndex(-16));
1388 }
1389
1390 void pushToSave(Address address)
1391 {
1392 load32(address, getCachedDataTempRegisterIDAndInvalidate());
1393 pushToSave(dataTempRegister);
1394 }
1395
1396 void pushToSave(TrustedImm32 imm)
1397 {
1398 move(imm, getCachedDataTempRegisterIDAndInvalidate());
1399 pushToSave(dataTempRegister);
1400 }
1401
1402 void popToRestore(FPRegisterID dest)
1403 {
1404 loadDouble(stackPointerRegister, dest);
1405 add64(TrustedImm32(16), stackPointerRegister);
1406 }
1407
1408 void pushToSave(FPRegisterID src)
1409 {
1410 sub64(TrustedImm32(16), stackPointerRegister);
1411 storeDouble(src, stackPointerRegister);
1412 }
1413
1414
1415 // Register move operations:
1416
1417 void move(RegisterID src, RegisterID dest)
1418 {
1419 if (src != dest)
1420 m_assembler.mov<64>(dest, src);
1421 }
1422
1423 void move(TrustedImm32 imm, RegisterID dest)
1424 {
1425 moveInternal<TrustedImm32, int32_t>(imm, dest);
1426 }
1427
1428 void move(TrustedImmPtr imm, RegisterID dest)
1429 {
1430 moveInternal<TrustedImmPtr, intptr_t>(imm, dest);
1431 }
1432
1433 void move(TrustedImm64 imm, RegisterID dest)
1434 {
1435 moveInternal<TrustedImm64, int64_t>(imm, dest);
1436 }
1437
1438 void swap(RegisterID reg1, RegisterID reg2)
1439 {
1440 move(reg1, getCachedDataTempRegisterIDAndInvalidate());
1441 move(reg2, reg1);
1442 move(dataTempRegister, reg2);
1443 }
1444
1445 void signExtend32ToPtr(RegisterID src, RegisterID dest)
1446 {
1447 m_assembler.sxtw(dest, src);
1448 }
1449
1450 void zeroExtend32ToPtr(RegisterID src, RegisterID dest)
1451 {
1452 m_assembler.uxtw(dest, src);
1453 }
1454
1455
1456 // Forwards / external control flow operations:
1457 //
1458 // This set of jump and conditional branch operations return a Jump
1459 // object which may linked at a later point, allow forwards jump,
1460 // or jumps that will require external linkage (after the code has been
1461 // relocated).
1462 //
1463 // For branches, signed <, >, <= and >= are denoted as l, g, le, and ge
1464 // respecitvely, for unsigned comparisons the names b, a, be, and ae are
1465 // used (representing the names 'below' and 'above').
1466 //
1467 // Operands to the comparision are provided in the expected order, e.g.
1468 // jle32(reg1, TrustedImm32(5)) will branch if the value held in reg1, when
1469 // treated as a signed 32bit value, is less than or equal to 5.
1470 //
1471 // jz and jnz test whether the first operand is equal to zero, and take
1472 // an optional second operand of a mask under which to perform the test.
1473
1474 Jump branch32(RelationalCondition cond, RegisterID left, RegisterID right)
1475 {
1476 m_assembler.cmp<32>(left, right);
1477 return Jump(makeBranch(cond));
1478 }
1479
1480 Jump branch32(RelationalCondition cond, RegisterID left, TrustedImm32 right)
1481 {
1482 if (isUInt12(right.m_value))
1483 m_assembler.cmp<32>(left, UInt12(right.m_value));
1484 else if (isUInt12(-right.m_value))
1485 m_assembler.cmn<32>(left, UInt12(-right.m_value));
1486 else {
1487 moveToCachedReg(right, m_dataMemoryTempRegister);
1488 m_assembler.cmp<32>(left, dataTempRegister);
1489 }
1490 return Jump(makeBranch(cond));
1491 }
1492
1493 Jump branch32(RelationalCondition cond, RegisterID left, Address right)
1494 {
1495 load32(right, getCachedMemoryTempRegisterIDAndInvalidate());
1496 return branch32(cond, left, memoryTempRegister);
1497 }
1498
1499 Jump branch32(RelationalCondition cond, Address left, RegisterID right)
1500 {
1501 load32(left, getCachedMemoryTempRegisterIDAndInvalidate());
1502 return branch32(cond, memoryTempRegister, right);
1503 }
1504
1505 Jump branch32(RelationalCondition cond, Address left, TrustedImm32 right)
1506 {
1507 load32(left, getCachedMemoryTempRegisterIDAndInvalidate());
1508 return branch32(cond, memoryTempRegister, right);
1509 }
1510
1511 Jump branch32(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
1512 {
1513 load32(left, getCachedMemoryTempRegisterIDAndInvalidate());
1514 return branch32(cond, memoryTempRegister, right);
1515 }
1516
1517 Jump branch32(RelationalCondition cond, AbsoluteAddress left, RegisterID right)
1518 {
1519 load32(left.m_ptr, getCachedMemoryTempRegisterIDAndInvalidate());
1520 return branch32(cond, memoryTempRegister, right);
1521 }
1522
1523 Jump branch32(RelationalCondition cond, AbsoluteAddress left, TrustedImm32 right)
1524 {
1525 load32(left.m_ptr, getCachedMemoryTempRegisterIDAndInvalidate());
1526 return branch32(cond, memoryTempRegister, right);
1527 }
1528
1529 Jump branch64(RelationalCondition cond, RegisterID left, RegisterID right)
1530 {
1531 m_assembler.cmp<64>(left, right);
1532 return Jump(makeBranch(cond));
1533 }
1534
1535 Jump branch64(RelationalCondition cond, RegisterID left, TrustedImm64 right)
1536 {
1537 intptr_t immediate = right.m_value;
1538 if (isUInt12(immediate))
1539 m_assembler.cmp<64>(left, UInt12(static_cast<int32_t>(immediate)));
1540 else if (isUInt12(-immediate))
1541 m_assembler.cmn<64>(left, UInt12(static_cast<int32_t>(-immediate)));
1542 else {
1543 moveToCachedReg(right, m_dataMemoryTempRegister);
1544 m_assembler.cmp<64>(left, dataTempRegister);
1545 }
1546 return Jump(makeBranch(cond));
1547 }
1548
1549 Jump branch64(RelationalCondition cond, RegisterID left, Address right)
1550 {
1551 load64(right, getCachedMemoryTempRegisterIDAndInvalidate());
1552 return branch64(cond, left, memoryTempRegister);
1553 }
1554
1555 Jump branch64(RelationalCondition cond, AbsoluteAddress left, RegisterID right)
1556 {
1557 load64(left.m_ptr, getCachedMemoryTempRegisterIDAndInvalidate());
1558 return branch64(cond, memoryTempRegister, right);
1559 }
1560
1561 Jump branch64(RelationalCondition cond, Address left, RegisterID right)
1562 {
1563 load64(left, getCachedMemoryTempRegisterIDAndInvalidate());
1564 return branch64(cond, memoryTempRegister, right);
1565 }
1566
1567 Jump branch64(RelationalCondition cond, Address left, TrustedImm64 right)
1568 {
1569 load64(left, getCachedMemoryTempRegisterIDAndInvalidate());
1570 return branch64(cond, memoryTempRegister, right);
1571 }
1572
1573 Jump branch8(RelationalCondition cond, Address left, TrustedImm32 right)
1574 {
1575 ASSERT(!(0xffffff00 & right.m_value));
1576 load8(left, getCachedMemoryTempRegisterIDAndInvalidate());
1577 return branch32(cond, memoryTempRegister, right);
1578 }
1579
1580 Jump branch8(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
1581 {
1582 ASSERT(!(0xffffff00 & right.m_value));
1583 load8(left, getCachedMemoryTempRegisterIDAndInvalidate());
1584 return branch32(cond, memoryTempRegister, right);
1585 }
1586
1587 Jump branchTest32(ResultCondition cond, RegisterID reg, RegisterID mask)
1588 {
1589 m_assembler.tst<32>(reg, mask);
1590 return Jump(makeBranch(cond));
1591 }
1592
1593 Jump branchTest32(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
1594 {
1595 if (mask.m_value == -1) {
1596 if ((cond == Zero) || (cond == NonZero))
1597 return Jump(makeCompareAndBranch<32>(static_cast<ZeroCondition>(cond), reg));
1598 m_assembler.tst<32>(reg, reg);
1599 } else if (hasOneBitSet(mask.m_value) && ((cond == Zero) || (cond == NonZero)))
1600 return Jump(makeTestBitAndBranch(reg, getLSBSet(mask.m_value), static_cast<ZeroCondition>(cond)));
1601 else {
1602 if ((cond == Zero) || (cond == NonZero)) {
1603 LogicalImmediate logicalImm = LogicalImmediate::create32(mask.m_value);
1604
1605 if (logicalImm.isValid()) {
1606 m_assembler.tst<32>(reg, logicalImm);
1607 return Jump(makeBranch(cond));
1608 }
1609 }
1610
1611 move(mask, getCachedDataTempRegisterIDAndInvalidate());
1612 m_assembler.tst<32>(reg, dataTempRegister);
1613 }
1614 return Jump(makeBranch(cond));
1615 }
1616
1617 Jump branchTest32(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
1618 {
1619 load32(address, getCachedMemoryTempRegisterIDAndInvalidate());
1620 return branchTest32(cond, memoryTempRegister, mask);
1621 }
1622
1623 Jump branchTest32(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
1624 {
1625 load32(address, getCachedMemoryTempRegisterIDAndInvalidate());
1626 return branchTest32(cond, memoryTempRegister, mask);
1627 }
1628
1629 Jump branchTest64(ResultCondition cond, RegisterID reg, RegisterID mask)
1630 {
1631 m_assembler.tst<64>(reg, mask);
1632 return Jump(makeBranch(cond));
1633 }
1634
1635 Jump branchTest64(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
1636 {
1637 if (mask.m_value == -1) {
1638 if ((cond == Zero) || (cond == NonZero))
1639 return Jump(makeCompareAndBranch<64>(static_cast<ZeroCondition>(cond), reg));
1640 m_assembler.tst<64>(reg, reg);
1641 } else if (hasOneBitSet(mask.m_value) && ((cond == Zero) || (cond == NonZero)))
1642 return Jump(makeTestBitAndBranch(reg, getLSBSet(mask.m_value), static_cast<ZeroCondition>(cond)));
1643 else {
1644 if ((cond == Zero) || (cond == NonZero)) {
1645 LogicalImmediate logicalImm = LogicalImmediate::create64(mask.m_value);
1646
1647 if (logicalImm.isValid()) {
1648 m_assembler.tst<64>(reg, logicalImm);
1649 return Jump(makeBranch(cond));
1650 }
1651 }
1652
1653 signExtend32ToPtr(mask, getCachedDataTempRegisterIDAndInvalidate());
1654 m_assembler.tst<64>(reg, dataTempRegister);
1655 }
1656 return Jump(makeBranch(cond));
1657 }
1658
1659 Jump branchTest64(ResultCondition cond, Address address, RegisterID mask)
1660 {
1661 load64(address, getCachedDataTempRegisterIDAndInvalidate());
1662 return branchTest64(cond, dataTempRegister, mask);
1663 }
1664
1665 Jump branchTest64(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
1666 {
1667 load64(address, getCachedDataTempRegisterIDAndInvalidate());
1668 return branchTest64(cond, dataTempRegister, mask);
1669 }
1670
1671 Jump branchTest64(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
1672 {
1673 load64(address, getCachedDataTempRegisterIDAndInvalidate());
1674 return branchTest64(cond, dataTempRegister, mask);
1675 }
1676
1677 Jump branchTest64(ResultCondition cond, AbsoluteAddress address, TrustedImm32 mask = TrustedImm32(-1))
1678 {
1679 load64(address.m_ptr, getCachedDataTempRegisterIDAndInvalidate());
1680 return branchTest64(cond, dataTempRegister, mask);
1681 }
1682
1683 Jump branchTest8(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
1684 {
1685 load8(address, getCachedDataTempRegisterIDAndInvalidate());
1686 return branchTest32(cond, dataTempRegister, mask);
1687 }
1688
1689 Jump branchTest8(ResultCondition cond, AbsoluteAddress address, TrustedImm32 mask = TrustedImm32(-1))
1690 {
1691 load8(address.m_ptr, getCachedDataTempRegisterIDAndInvalidate());
1692 return branchTest32(cond, dataTempRegister, mask);
1693 }
1694
1695 Jump branchTest8(ResultCondition cond, ExtendedAddress address, TrustedImm32 mask = TrustedImm32(-1))
1696 {
1697 move(ImmPtr(reinterpret_cast<void*>(address.offset)), getCachedDataTempRegisterIDAndInvalidate());
1698 m_assembler.ldrb(dataTempRegister, address.base, dataTempRegister);
1699 return branchTest32(cond, dataTempRegister, mask);
1700 }
1701
1702 Jump branch32WithUnalignedHalfWords(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
1703 {
1704 return branch32(cond, left, right);
1705 }
1706
1707
1708 // Arithmetic control flow operations:
1709 //
1710 // This set of conditional branch operations branch based
1711 // on the result of an arithmetic operation. The operation
1712 // is performed as normal, storing the result.
1713 //
1714 // * jz operations branch if the result is zero.
1715 // * jo operations branch if the (signed) arithmetic
1716 // operation caused an overflow to occur.
1717
1718 Jump branchAdd32(ResultCondition cond, RegisterID op1, RegisterID op2, RegisterID dest)
1719 {
1720 m_assembler.add<32, S>(dest, op1, op2);
1721 return Jump(makeBranch(cond));
1722 }
1723
1724 Jump branchAdd32(ResultCondition cond, RegisterID op1, TrustedImm32 imm, RegisterID dest)
1725 {
1726 if (isUInt12(imm.m_value)) {
1727 m_assembler.add<32, S>(dest, op1, UInt12(imm.m_value));
1728 return Jump(makeBranch(cond));
1729 }
1730 if (isUInt12(-imm.m_value)) {
1731 m_assembler.sub<32, S>(dest, op1, UInt12(-imm.m_value));
1732 return Jump(makeBranch(cond));
1733 }
1734
1735 signExtend32ToPtr(imm, getCachedDataTempRegisterIDAndInvalidate());
1736 return branchAdd32(cond, op1, dataTempRegister, dest);
1737 }
1738
1739 Jump branchAdd32(ResultCondition cond, RegisterID src, RegisterID dest)
1740 {
1741 return branchAdd32(cond, dest, src, dest);
1742 }
1743
1744 Jump branchAdd32(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
1745 {
1746 return branchAdd32(cond, dest, imm, dest);
1747 }
1748
1749 Jump branchAdd32(ResultCondition cond, TrustedImm32 imm, AbsoluteAddress address)
1750 {
1751 load32(address.m_ptr, getCachedDataTempRegisterIDAndInvalidate());
1752
1753 if (isUInt12(imm.m_value)) {
1754 m_assembler.add<32, S>(dataTempRegister, dataTempRegister, UInt12(imm.m_value));
1755 store32(dataTempRegister, address.m_ptr);
1756 } else if (isUInt12(-imm.m_value)) {
1757 m_assembler.sub<32, S>(dataTempRegister, dataTempRegister, UInt12(-imm.m_value));
1758 store32(dataTempRegister, address.m_ptr);
1759 } else {
1760 move(imm, getCachedMemoryTempRegisterIDAndInvalidate());
1761 m_assembler.add<32, S>(dataTempRegister, dataTempRegister, memoryTempRegister);
1762 store32(dataTempRegister, address.m_ptr);
1763 }
1764
1765 return Jump(makeBranch(cond));
1766 }
1767
1768 Jump branchAdd64(ResultCondition cond, RegisterID op1, RegisterID op2, RegisterID dest)
1769 {
1770 m_assembler.add<64, S>(dest, op1, op2);
1771 return Jump(makeBranch(cond));
1772 }
1773
1774 Jump branchAdd64(ResultCondition cond, RegisterID op1, TrustedImm32 imm, RegisterID dest)
1775 {
1776 if (isUInt12(imm.m_value)) {
1777 m_assembler.add<64, S>(dest, op1, UInt12(imm.m_value));
1778 return Jump(makeBranch(cond));
1779 }
1780 if (isUInt12(-imm.m_value)) {
1781 m_assembler.sub<64, S>(dest, op1, UInt12(-imm.m_value));
1782 return Jump(makeBranch(cond));
1783 }
1784
1785 move(imm, getCachedDataTempRegisterIDAndInvalidate());
1786 return branchAdd64(cond, op1, dataTempRegister, dest);
1787 }
1788
1789 Jump branchAdd64(ResultCondition cond, RegisterID src, RegisterID dest)
1790 {
1791 return branchAdd64(cond, dest, src, dest);
1792 }
1793
1794 Jump branchAdd64(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
1795 {
1796 return branchAdd64(cond, dest, imm, dest);
1797 }
1798
1799 Jump branchMul32(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest)
1800 {
1801 ASSERT(cond != Signed);
1802
1803 if (cond != Overflow) {
1804 m_assembler.mul<32>(dest, src1, src2);
1805 return branchTest32(cond, dest);
1806 }
1807
1808 // This is a signed multiple of two 32-bit values, producing a 64-bit result.
1809 m_assembler.smull(dest, src1, src2);
1810 // Copy bits 63..32 of the result to bits 31..0 of dataTempRegister.
1811 m_assembler.asr<64>(getCachedDataTempRegisterIDAndInvalidate(), dest, 32);
1812 // Splat bit 31 of the result to bits 31..0 of memoryTempRegister.
1813 m_assembler.asr<32>(getCachedMemoryTempRegisterIDAndInvalidate(), dest, 31);
1814 // After a mul32 the top 32 bits of the register should be clear.
1815 zeroExtend32ToPtr(dest, dest);
1816 // Check that bits 31..63 of the original result were all equal.
1817 return branch32(NotEqual, memoryTempRegister, dataTempRegister);
1818 }
1819
1820 Jump branchMul32(ResultCondition cond, RegisterID src, RegisterID dest)
1821 {
1822 return branchMul32(cond, dest, src, dest);
1823 }
1824
1825 Jump branchMul32(ResultCondition cond, TrustedImm32 imm, RegisterID src, RegisterID dest)
1826 {
1827 move(imm, getCachedDataTempRegisterIDAndInvalidate());
1828 return branchMul32(cond, dataTempRegister, src, dest);
1829 }
1830
1831 Jump branchNeg32(ResultCondition cond, RegisterID dest)
1832 {
1833 m_assembler.neg<32, S>(dest, dest);
1834 return Jump(makeBranch(cond));
1835 }
1836
1837 Jump branchSub32(ResultCondition cond, RegisterID dest)
1838 {
1839 m_assembler.neg<32, S>(dest, dest);
1840 return Jump(makeBranch(cond));
1841 }
1842
1843 Jump branchSub32(ResultCondition cond, RegisterID op1, RegisterID op2, RegisterID dest)
1844 {
1845 m_assembler.sub<32, S>(dest, op1, op2);
1846 return Jump(makeBranch(cond));
1847 }
1848
1849 Jump branchSub32(ResultCondition cond, RegisterID op1, TrustedImm32 imm, RegisterID dest)
1850 {
1851 if (isUInt12(imm.m_value)) {
1852 m_assembler.sub<32, S>(dest, op1, UInt12(imm.m_value));
1853 return Jump(makeBranch(cond));
1854 }
1855 if (isUInt12(-imm.m_value)) {
1856 m_assembler.add<32, S>(dest, op1, UInt12(-imm.m_value));
1857 return Jump(makeBranch(cond));
1858 }
1859
1860 signExtend32ToPtr(imm, getCachedDataTempRegisterIDAndInvalidate());
1861 return branchSub32(cond, op1, dataTempRegister, dest);
1862 }
1863
1864 Jump branchSub32(ResultCondition cond, RegisterID src, RegisterID dest)
1865 {
1866 return branchSub32(cond, dest, src, dest);
1867 }
1868
1869 Jump branchSub32(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
1870 {
1871 return branchSub32(cond, dest, imm, dest);
1872 }
1873
1874 Jump branchSub64(ResultCondition cond, RegisterID op1, RegisterID op2, RegisterID dest)
1875 {
1876 m_assembler.sub<64, S>(dest, op1, op2);
1877 return Jump(makeBranch(cond));
1878 }
1879
1880 Jump branchSub64(ResultCondition cond, RegisterID op1, TrustedImm32 imm, RegisterID dest)
1881 {
1882 if (isUInt12(imm.m_value)) {
1883 m_assembler.sub<64, S>(dest, op1, UInt12(imm.m_value));
1884 return Jump(makeBranch(cond));
1885 }
1886 if (isUInt12(-imm.m_value)) {
1887 m_assembler.add<64, S>(dest, op1, UInt12(-imm.m_value));
1888 return Jump(makeBranch(cond));
1889 }
1890
1891 move(imm, getCachedDataTempRegisterIDAndInvalidate());
1892 return branchSub64(cond, op1, dataTempRegister, dest);
1893 }
1894
1895 Jump branchSub64(ResultCondition cond, RegisterID src, RegisterID dest)
1896 {
1897 return branchSub64(cond, dest, src, dest);
1898 }
1899
1900 Jump branchSub64(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
1901 {
1902 return branchSub64(cond, dest, imm, dest);
1903 }
1904
1905
1906 // Jumps, calls, returns
1907
1908 ALWAYS_INLINE Call call()
1909 {
1910 AssemblerLabel pointerLabel = m_assembler.label();
1911 moveWithFixedWidth(TrustedImmPtr(0), getCachedDataTempRegisterIDAndInvalidate());
1912 invalidateAllTempRegisters();
1913 m_assembler.blr(dataTempRegister);
1914 AssemblerLabel callLabel = m_assembler.label();
1915 ASSERT_UNUSED(pointerLabel, ARM64Assembler::getDifferenceBetweenLabels(callLabel, pointerLabel) == REPATCH_OFFSET_CALL_TO_POINTER);
1916 return Call(callLabel, Call::Linkable);
1917 }
1918
1919 ALWAYS_INLINE Call call(RegisterID target)
1920 {
1921 invalidateAllTempRegisters();
1922 m_assembler.blr(target);
1923 return Call(m_assembler.label(), Call::None);
1924 }
1925
1926 ALWAYS_INLINE Call call(Address address)
1927 {
1928 load64(address, getCachedDataTempRegisterIDAndInvalidate());
1929 return call(dataTempRegister);
1930 }
1931
1932 ALWAYS_INLINE Jump jump()
1933 {
1934 AssemblerLabel label = m_assembler.label();
1935 m_assembler.b();
1936 return Jump(label, m_makeJumpPatchable ? ARM64Assembler::JumpNoConditionFixedSize : ARM64Assembler::JumpNoCondition);
1937 }
1938
1939 void jump(RegisterID target)
1940 {
1941 m_assembler.br(target);
1942 }
1943
1944 void jump(Address address)
1945 {
1946 load64(address, getCachedDataTempRegisterIDAndInvalidate());
1947 m_assembler.br(dataTempRegister);
1948 }
1949
1950 void jump(AbsoluteAddress address)
1951 {
1952 move(TrustedImmPtr(address.m_ptr), getCachedDataTempRegisterIDAndInvalidate());
1953 load64(Address(dataTempRegister), dataTempRegister);
1954 m_assembler.br(dataTempRegister);
1955 }
1956
1957 ALWAYS_INLINE Call makeTailRecursiveCall(Jump oldJump)
1958 {
1959 oldJump.link(this);
1960 return tailRecursiveCall();
1961 }
1962
1963 ALWAYS_INLINE Call nearCall()
1964 {
1965 m_assembler.bl();
1966 return Call(m_assembler.label(), Call::LinkableNear);
1967 }
1968
1969 ALWAYS_INLINE void ret()
1970 {
1971 m_assembler.ret();
1972 }
1973
1974 ALWAYS_INLINE Call tailRecursiveCall()
1975 {
1976 // Like a normal call, but don't link.
1977 AssemblerLabel pointerLabel = m_assembler.label();
1978 moveWithFixedWidth(TrustedImmPtr(0), getCachedDataTempRegisterIDAndInvalidate());
1979 m_assembler.br(dataTempRegister);
1980 AssemblerLabel callLabel = m_assembler.label();
1981 ASSERT_UNUSED(pointerLabel, ARM64Assembler::getDifferenceBetweenLabels(callLabel, pointerLabel) == REPATCH_OFFSET_CALL_TO_POINTER);
1982 return Call(callLabel, Call::Linkable);
1983 }
1984
1985
1986 // Comparisons operations
1987
1988 void compare32(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID dest)
1989 {
1990 m_assembler.cmp<32>(left, right);
1991 m_assembler.cset<32>(dest, ARM64Condition(cond));
1992 }
1993
1994 void compare32(RelationalCondition cond, Address left, RegisterID right, RegisterID dest)
1995 {
1996 load32(left, getCachedDataTempRegisterIDAndInvalidate());
1997 m_assembler.cmp<32>(dataTempRegister, right);
1998 m_assembler.cset<32>(dest, ARM64Condition(cond));
1999 }
2000
2001 void compare32(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest)
2002 {
2003 move(right, getCachedDataTempRegisterIDAndInvalidate());
2004 m_assembler.cmp<32>(left, dataTempRegister);
2005 m_assembler.cset<32>(dest, ARM64Condition(cond));
2006 }
2007
2008 void compare64(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID dest)
2009 {
2010 m_assembler.cmp<64>(left, right);
2011 m_assembler.cset<32>(dest, ARM64Condition(cond));
2012 }
2013
2014 void compare64(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest)
2015 {
2016 signExtend32ToPtr(right, getCachedDataTempRegisterIDAndInvalidate());
2017 m_assembler.cmp<64>(left, dataTempRegister);
2018 m_assembler.cset<32>(dest, ARM64Condition(cond));
2019 }
2020
2021 void compare8(RelationalCondition cond, Address left, TrustedImm32 right, RegisterID dest)
2022 {
2023 load8(left, getCachedMemoryTempRegisterIDAndInvalidate());
2024 move(right, getCachedDataTempRegisterIDAndInvalidate());
2025 compare32(cond, memoryTempRegister, dataTempRegister, dest);
2026 }
2027
2028 void test32(ResultCondition cond, RegisterID src, TrustedImm32 mask, RegisterID dest)
2029 {
2030 if (mask.m_value == -1)
2031 m_assembler.tst<32>(src, src);
2032 else {
2033 signExtend32ToPtr(mask, getCachedDataTempRegisterIDAndInvalidate());
2034 m_assembler.tst<32>(src, dataTempRegister);
2035 }
2036 m_assembler.cset<32>(dest, ARM64Condition(cond));
2037 }
2038
2039 void test32(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest)
2040 {
2041 load32(address, getCachedDataTempRegisterIDAndInvalidate());
2042 test32(cond, dataTempRegister, mask, dest);
2043 }
2044
2045 void test8(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest)
2046 {
2047 load8(address, getCachedDataTempRegisterIDAndInvalidate());
2048 test32(cond, dataTempRegister, mask, dest);
2049 }
2050
2051 void test64(ResultCondition cond, RegisterID op1, RegisterID op2, RegisterID dest)
2052 {
2053 m_assembler.tst<64>(op1, op2);
2054 m_assembler.cset<32>(dest, ARM64Condition(cond));
2055 }
2056
2057 void test64(ResultCondition cond, RegisterID src, TrustedImm32 mask, RegisterID dest)
2058 {
2059 if (mask.m_value == -1)
2060 m_assembler.tst<64>(src, src);
2061 else {
2062 signExtend32ToPtr(mask, getCachedDataTempRegisterIDAndInvalidate());
2063 m_assembler.tst<64>(src, dataTempRegister);
2064 }
2065 m_assembler.cset<32>(dest, ARM64Condition(cond));
2066 }
2067
2068
2069 // Patchable operations
2070
2071 ALWAYS_INLINE DataLabel32 moveWithPatch(TrustedImm32 imm, RegisterID dest)
2072 {
2073 DataLabel32 label(this);
2074 moveWithFixedWidth(imm, dest);
2075 return label;
2076 }
2077
2078 ALWAYS_INLINE DataLabelPtr moveWithPatch(TrustedImmPtr imm, RegisterID dest)
2079 {
2080 DataLabelPtr label(this);
2081 moveWithFixedWidth(imm, dest);
2082 return label;
2083 }
2084
2085 ALWAYS_INLINE Jump branchPtrWithPatch(RelationalCondition cond, RegisterID left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
2086 {
2087 dataLabel = DataLabelPtr(this);
2088 moveWithPatch(initialRightValue, getCachedDataTempRegisterIDAndInvalidate());
2089 return branch64(cond, left, dataTempRegister);
2090 }
2091
2092 ALWAYS_INLINE Jump branchPtrWithPatch(RelationalCondition cond, Address left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
2093 {
2094 dataLabel = DataLabelPtr(this);
2095 moveWithPatch(initialRightValue, getCachedDataTempRegisterIDAndInvalidate());
2096 return branch64(cond, left, dataTempRegister);
2097 }
2098
2099 PatchableJump patchableBranchPtr(RelationalCondition cond, Address left, TrustedImmPtr right = TrustedImmPtr(0))
2100 {
2101 m_makeJumpPatchable = true;
2102 Jump result = branch32(cond, left, TrustedImm32(right));
2103 m_makeJumpPatchable = false;
2104 return PatchableJump(result);
2105 }
2106
2107 PatchableJump patchableBranchTest32(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
2108 {
2109 m_makeJumpPatchable = true;
2110 Jump result = branchTest32(cond, reg, mask);
2111 m_makeJumpPatchable = false;
2112 return PatchableJump(result);
2113 }
2114
2115 PatchableJump patchableBranch32(RelationalCondition cond, RegisterID reg, TrustedImm32 imm)
2116 {
2117 m_makeJumpPatchable = true;
2118 Jump result = branch32(cond, reg, imm);
2119 m_makeJumpPatchable = false;
2120 return PatchableJump(result);
2121 }
2122
2123 PatchableJump patchableBranchPtrWithPatch(RelationalCondition cond, Address left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
2124 {
2125 m_makeJumpPatchable = true;
2126 Jump result = branchPtrWithPatch(cond, left, dataLabel, initialRightValue);
2127 m_makeJumpPatchable = false;
2128 return PatchableJump(result);
2129 }
2130
2131 PatchableJump patchableJump()
2132 {
2133 m_makeJumpPatchable = true;
2134 Jump result = jump();
2135 m_makeJumpPatchable = false;
2136 return PatchableJump(result);
2137 }
2138
2139 ALWAYS_INLINE DataLabelPtr storePtrWithPatch(TrustedImmPtr initialValue, ImplicitAddress address)
2140 {
2141 DataLabelPtr label(this);
2142 moveWithFixedWidth(initialValue, getCachedDataTempRegisterIDAndInvalidate());
2143 store64(dataTempRegister, address);
2144 return label;
2145 }
2146
2147 ALWAYS_INLINE DataLabelPtr storePtrWithPatch(ImplicitAddress address)
2148 {
2149 return storePtrWithPatch(TrustedImmPtr(0), address);
2150 }
2151
2152 static void reemitInitialMoveWithPatch(void* address, void* value)
2153 {
2154 ARM64Assembler::setPointer(static_cast<int*>(address), value, dataTempRegister, true);
2155 }
2156
2157 // Miscellaneous operations:
2158
2159 void breakpoint(uint16_t imm = 0)
2160 {
2161 m_assembler.brk(imm);
2162 }
2163
2164 void nop()
2165 {
2166 m_assembler.nop();
2167 }
2168
2169
2170 // Misc helper functions.
2171
2172 // Invert a relational condition, e.g. == becomes !=, < becomes >=, etc.
2173 static RelationalCondition invert(RelationalCondition cond)
2174 {
2175 return static_cast<RelationalCondition>(ARM64Assembler::invert(static_cast<ARM64Assembler::Condition>(cond)));
2176 }
2177
2178 static FunctionPtr readCallTarget(CodeLocationCall call)
2179 {
2180 return FunctionPtr(reinterpret_cast<void(*)()>(ARM64Assembler::readCallTarget(call.dataLocation())));
2181 }
2182
2183 static void replaceWithJump(CodeLocationLabel instructionStart, CodeLocationLabel destination)
2184 {
2185 ARM64Assembler::replaceWithJump(instructionStart.dataLocation(), destination.dataLocation());
2186 }
2187
2188 static ptrdiff_t maxJumpReplacementSize()
2189 {
2190 return ARM64Assembler::maxJumpReplacementSize();
2191 }
2192
2193 static bool canJumpReplacePatchableBranchPtrWithPatch() { return false; }
2194
2195 static CodeLocationLabel startOfBranchPtrWithPatchOnRegister(CodeLocationDataLabelPtr label)
2196 {
2197 return label.labelAtOffset(0);
2198 }
2199
2200 static CodeLocationLabel startOfPatchableBranchPtrWithPatchOnAddress(CodeLocationDataLabelPtr)
2201 {
2202 UNREACHABLE_FOR_PLATFORM();
2203 return CodeLocationLabel();
2204 }
2205
2206 static void revertJumpReplacementToBranchPtrWithPatch(CodeLocationLabel instructionStart, RegisterID, void* initialValue)
2207 {
2208 reemitInitialMoveWithPatch(instructionStart.dataLocation(), initialValue);
2209 }
2210
2211 static void revertJumpReplacementToPatchableBranchPtrWithPatch(CodeLocationLabel, Address, void*)
2212 {
2213 UNREACHABLE_FOR_PLATFORM();
2214 }
2215
2216 protected:
2217 ALWAYS_INLINE Jump makeBranch(ARM64Assembler::Condition cond)
2218 {
2219 m_assembler.b_cond(cond);
2220 AssemblerLabel label = m_assembler.label();
2221 m_assembler.nop();
2222 return Jump(label, m_makeJumpPatchable ? ARM64Assembler::JumpConditionFixedSize : ARM64Assembler::JumpCondition, cond);
2223 }
2224 ALWAYS_INLINE Jump makeBranch(RelationalCondition cond) { return makeBranch(ARM64Condition(cond)); }
2225 ALWAYS_INLINE Jump makeBranch(ResultCondition cond) { return makeBranch(ARM64Condition(cond)); }
2226 ALWAYS_INLINE Jump makeBranch(DoubleCondition cond) { return makeBranch(ARM64Condition(cond)); }
2227
2228 template <int dataSize>
2229 ALWAYS_INLINE Jump makeCompareAndBranch(ZeroCondition cond, RegisterID reg)
2230 {
2231 if (cond == IsZero)
2232 m_assembler.cbz<dataSize>(reg);
2233 else
2234 m_assembler.cbnz<dataSize>(reg);
2235 AssemblerLabel label = m_assembler.label();
2236 m_assembler.nop();
2237 return Jump(label, m_makeJumpPatchable ? ARM64Assembler::JumpCompareAndBranchFixedSize : ARM64Assembler::JumpCompareAndBranch, static_cast<ARM64Assembler::Condition>(cond), dataSize == 64, reg);
2238 }
2239
2240 ALWAYS_INLINE Jump makeTestBitAndBranch(RegisterID reg, unsigned bit, ZeroCondition cond)
2241 {
2242 ASSERT(bit < 64);
2243 bit &= 0x3f;
2244 if (cond == IsZero)
2245 m_assembler.tbz(reg, bit);
2246 else
2247 m_assembler.tbnz(reg, bit);
2248 AssemblerLabel label = m_assembler.label();
2249 m_assembler.nop();
2250 return Jump(label, m_makeJumpPatchable ? ARM64Assembler::JumpTestBitFixedSize : ARM64Assembler::JumpTestBit, static_cast<ARM64Assembler::Condition>(cond), bit, reg);
2251 }
2252
2253 ARM64Assembler::Condition ARM64Condition(RelationalCondition cond)
2254 {
2255 return static_cast<ARM64Assembler::Condition>(cond);
2256 }
2257
2258 ARM64Assembler::Condition ARM64Condition(ResultCondition cond)
2259 {
2260 return static_cast<ARM64Assembler::Condition>(cond);
2261 }
2262
2263 ARM64Assembler::Condition ARM64Condition(DoubleCondition cond)
2264 {
2265 return static_cast<ARM64Assembler::Condition>(cond);
2266 }
2267
2268 private:
2269 ALWAYS_INLINE RegisterID getCachedDataTempRegisterIDAndInvalidate() { return m_dataMemoryTempRegister.registerIDInvalidate(); }
2270 ALWAYS_INLINE RegisterID getCachedMemoryTempRegisterIDAndInvalidate() { return m_cachedMemoryTempRegister.registerIDInvalidate(); }
2271
2272 ALWAYS_INLINE bool isInIntRange(intptr_t value)
2273 {
2274 return value == ((value << 32) >> 32);
2275 }
2276
2277 template<typename ImmediateType, typename rawType>
2278 void moveInternal(ImmediateType imm, RegisterID dest)
2279 {
2280 const int dataSize = sizeof(rawType)*8;
2281 const int numberHalfWords = dataSize/16;
2282 rawType value = bitwise_cast<rawType>(imm.m_value);
2283 uint16_t halfword[numberHalfWords];
2284
2285 // Handle 0 and ~0 here to simplify code below
2286 if (!value) {
2287 m_assembler.movz<dataSize>(dest, 0);
2288 return;
2289 }
2290 if (!~value) {
2291 m_assembler.movn<dataSize>(dest, 0);
2292 return;
2293 }
2294
2295 LogicalImmediate logicalImm = dataSize == 64 ? LogicalImmediate::create64(static_cast<uint64_t>(value)) : LogicalImmediate::create32(static_cast<uint32_t>(value));
2296
2297 if (logicalImm.isValid()) {
2298 m_assembler.movi<dataSize>(dest, logicalImm);
2299 return;
2300 }
2301
2302 // Figure out how many halfwords are 0 or FFFF, then choose movz or movn accordingly.
2303 int zeroOrNegateVote = 0;
2304 for (int i = 0; i < numberHalfWords; ++i) {
2305 halfword[i] = getHalfword(value, i);
2306 if (!halfword[i])
2307 zeroOrNegateVote++;
2308 else if (halfword[i] == 0xffff)
2309 zeroOrNegateVote--;
2310 }
2311
2312 bool needToClearRegister = true;
2313 if (zeroOrNegateVote >= 0) {
2314 for (int i = 0; i < numberHalfWords; i++) {
2315 if (halfword[i]) {
2316 if (needToClearRegister) {
2317 m_assembler.movz<dataSize>(dest, halfword[i], 16*i);
2318 needToClearRegister = false;
2319 } else
2320 m_assembler.movk<dataSize>(dest, halfword[i], 16*i);
2321 }
2322 }
2323 } else {
2324 for (int i = 0; i < numberHalfWords; i++) {
2325 if (halfword[i] != 0xffff) {
2326 if (needToClearRegister) {
2327 m_assembler.movn<dataSize>(dest, ~halfword[i], 16*i);
2328 needToClearRegister = false;
2329 } else
2330 m_assembler.movk<dataSize>(dest, halfword[i], 16*i);
2331 }
2332 }
2333 }
2334 }
2335
2336 template<int datasize>
2337 ALWAYS_INLINE void loadUnsignedImmediate(RegisterID rt, RegisterID rn, unsigned pimm)
2338 {
2339 m_assembler.ldr<datasize>(rt, rn, pimm);
2340 }
2341
2342 template<int datasize>
2343 ALWAYS_INLINE void loadUnscaledImmediate(RegisterID rt, RegisterID rn, int simm)
2344 {
2345 m_assembler.ldur<datasize>(rt, rn, simm);
2346 }
2347
2348 template<int datasize>
2349 ALWAYS_INLINE void storeUnsignedImmediate(RegisterID rt, RegisterID rn, unsigned pimm)
2350 {
2351 m_assembler.str<datasize>(rt, rn, pimm);
2352 }
2353
2354 template<int datasize>
2355 ALWAYS_INLINE void storeUnscaledImmediate(RegisterID rt, RegisterID rn, int simm)
2356 {
2357 m_assembler.stur<datasize>(rt, rn, simm);
2358 }
2359
2360 void moveWithFixedWidth(TrustedImm32 imm, RegisterID dest)
2361 {
2362 int32_t value = imm.m_value;
2363 m_assembler.movz<32>(dest, getHalfword(value, 0));
2364 m_assembler.movk<32>(dest, getHalfword(value, 1), 16);
2365 }
2366
2367 void moveWithFixedWidth(TrustedImmPtr imm, RegisterID dest)
2368 {
2369 intptr_t value = reinterpret_cast<intptr_t>(imm.m_value);
2370 m_assembler.movz<64>(dest, getHalfword(value, 0));
2371 m_assembler.movk<64>(dest, getHalfword(value, 1), 16);
2372 m_assembler.movk<64>(dest, getHalfword(value, 2), 32);
2373 }
2374
2375 void signExtend32ToPtrWithFixedWidth(int32_t value, RegisterID dest)
2376 {
2377 if (value >= 0) {
2378 m_assembler.movz<32>(dest, getHalfword(value, 0));
2379 m_assembler.movk<32>(dest, getHalfword(value, 1), 16);
2380 } else {
2381 m_assembler.movn<32>(dest, ~getHalfword(value, 0));
2382 m_assembler.movk<32>(dest, getHalfword(value, 1), 16);
2383 }
2384 }
2385
2386 void signExtend32ToPtr(TrustedImm32 imm, RegisterID dest)
2387 {
2388 move(TrustedImmPtr(reinterpret_cast<void*>(static_cast<intptr_t>(imm.m_value))), dest);
2389 }
2390
2391 template<int datasize>
2392 ALWAYS_INLINE void load(const void* address, RegisterID dest)
2393 {
2394 intptr_t currentRegisterContents;
2395 if (m_cachedMemoryTempRegister.value(currentRegisterContents)) {
2396 intptr_t addressAsInt = reinterpret_cast<intptr_t>(address);
2397 intptr_t addressDelta = addressAsInt - currentRegisterContents;
2398
2399 if (isInIntRange(addressDelta)) {
2400 if (ARM64Assembler::canEncodeSImmOffset(addressDelta)) {
2401 m_assembler.ldur<datasize>(dest, memoryTempRegister, addressDelta);
2402 return;
2403 }
2404
2405 if (ARM64Assembler::canEncodePImmOffset<datasize>(addressDelta)) {
2406 m_assembler.ldr<datasize>(dest, memoryTempRegister, addressDelta);
2407 return;
2408 }
2409 }
2410
2411 if ((addressAsInt & (~maskHalfWord0)) == (currentRegisterContents & (~maskHalfWord0))) {
2412 m_assembler.movk<64>(memoryTempRegister, addressAsInt & maskHalfWord0, 0);
2413 m_cachedMemoryTempRegister.setValue(reinterpret_cast<intptr_t>(address));
2414 m_assembler.ldr<datasize>(dest, memoryTempRegister, ARM64Registers::zr);
2415 return;
2416 }
2417 }
2418
2419 move(TrustedImmPtr(address), memoryTempRegister);
2420 m_cachedMemoryTempRegister.setValue(reinterpret_cast<intptr_t>(address));
2421 m_assembler.ldr<datasize>(dest, memoryTempRegister, ARM64Registers::zr);
2422 }
2423
2424 template<int datasize>
2425 ALWAYS_INLINE void store(RegisterID src, const void* address)
2426 {
2427 intptr_t currentRegisterContents;
2428 if (m_cachedMemoryTempRegister.value(currentRegisterContents)) {
2429 intptr_t addressAsInt = reinterpret_cast<intptr_t>(address);
2430 intptr_t addressDelta = addressAsInt - currentRegisterContents;
2431
2432 if (isInIntRange(addressDelta)) {
2433 if (ARM64Assembler::canEncodeSImmOffset(addressDelta)) {
2434 m_assembler.stur<datasize>(src, memoryTempRegister, addressDelta);
2435 return;
2436 }
2437
2438 if (ARM64Assembler::canEncodePImmOffset<datasize>(addressDelta)) {
2439 m_assembler.str<datasize>(src, memoryTempRegister, addressDelta);
2440 return;
2441 }
2442 }
2443
2444 if ((addressAsInt & (~maskHalfWord0)) == (currentRegisterContents & (~maskHalfWord0))) {
2445 m_assembler.movk<64>(memoryTempRegister, addressAsInt & maskHalfWord0, 0);
2446 m_cachedMemoryTempRegister.setValue(reinterpret_cast<intptr_t>(address));
2447 m_assembler.str<datasize>(src, memoryTempRegister, ARM64Registers::zr);
2448 return;
2449 }
2450 }
2451
2452 move(TrustedImmPtr(address), memoryTempRegister);
2453 m_cachedMemoryTempRegister.setValue(reinterpret_cast<intptr_t>(address));
2454 m_assembler.str<datasize>(src, memoryTempRegister, ARM64Registers::zr);
2455 }
2456
2457 template <int dataSize>
2458 ALWAYS_INLINE bool tryMoveUsingCacheRegisterContents(intptr_t immediate, CachedTempRegister& dest)
2459 {
2460 intptr_t currentRegisterContents;
2461 if (dest.value(currentRegisterContents)) {
2462 if (currentRegisterContents == immediate)
2463 return true;
2464
2465 LogicalImmediate logicalImm = dataSize == 64 ? LogicalImmediate::create64(static_cast<uint64_t>(immediate)) : LogicalImmediate::create32(static_cast<uint32_t>(immediate));
2466
2467 if (logicalImm.isValid()) {
2468 m_assembler.movi<dataSize>(dest.registerIDNoInvalidate(), logicalImm);
2469 dest.setValue(immediate);
2470 return true;
2471 }
2472
2473 if ((immediate & maskUpperWord) == (currentRegisterContents & maskUpperWord)) {
2474 if ((immediate & maskHalfWord1) != (currentRegisterContents & maskHalfWord1))
2475 m_assembler.movk<dataSize>(dest.registerIDNoInvalidate(), (immediate & maskHalfWord1) >> 16, 16);
2476
2477 if ((immediate & maskHalfWord0) != (currentRegisterContents & maskHalfWord0))
2478 m_assembler.movk<dataSize>(dest.registerIDNoInvalidate(), immediate & maskHalfWord0, 0);
2479
2480 dest.setValue(immediate);
2481 return true;
2482 }
2483 }
2484
2485 return false;
2486 }
2487
2488 void moveToCachedReg(TrustedImm32 imm, CachedTempRegister& dest)
2489 {
2490 if (tryMoveUsingCacheRegisterContents<32>(static_cast<intptr_t>(imm.m_value), dest))
2491 return;
2492
2493 moveInternal<TrustedImm32, int32_t>(imm, dest.registerIDNoInvalidate());
2494 dest.setValue(imm.m_value);
2495 }
2496
2497 void moveToCachedReg(TrustedImmPtr imm, CachedTempRegister& dest)
2498 {
2499 if (tryMoveUsingCacheRegisterContents<64>(imm.asIntptr(), dest))
2500 return;
2501
2502 moveInternal<TrustedImmPtr, intptr_t>(imm, dest.registerIDNoInvalidate());
2503 dest.setValue(imm.asIntptr());
2504 }
2505
2506 void moveToCachedReg(TrustedImm64 imm, CachedTempRegister& dest)
2507 {
2508 if (tryMoveUsingCacheRegisterContents<64>(static_cast<intptr_t>(imm.m_value), dest))
2509 return;
2510
2511 moveInternal<TrustedImm64, int64_t>(imm, dest.registerIDNoInvalidate());
2512 dest.setValue(imm.m_value);
2513 }
2514
2515 template<int datasize>
2516 ALWAYS_INLINE bool tryLoadWithOffset(RegisterID rt, RegisterID rn, int32_t offset)
2517 {
2518 if (ARM64Assembler::canEncodeSImmOffset(offset)) {
2519 loadUnscaledImmediate<datasize>(rt, rn, offset);
2520 return true;
2521 }
2522 if (ARM64Assembler::canEncodePImmOffset<datasize>(offset)) {
2523 loadUnsignedImmediate<datasize>(rt, rn, static_cast<unsigned>(offset));
2524 return true;
2525 }
2526 return false;
2527 }
2528
2529 template<int datasize>
2530 ALWAYS_INLINE bool tryLoadWithOffset(FPRegisterID rt, RegisterID rn, int32_t offset)
2531 {
2532 if (ARM64Assembler::canEncodeSImmOffset(offset)) {
2533 m_assembler.ldur<datasize>(rt, rn, offset);
2534 return true;
2535 }
2536 if (ARM64Assembler::canEncodePImmOffset<datasize>(offset)) {
2537 m_assembler.ldr<datasize>(rt, rn, static_cast<unsigned>(offset));
2538 return true;
2539 }
2540 return false;
2541 }
2542
2543 template<int datasize>
2544 ALWAYS_INLINE bool tryStoreWithOffset(RegisterID rt, RegisterID rn, int32_t offset)
2545 {
2546 if (ARM64Assembler::canEncodeSImmOffset(offset)) {
2547 storeUnscaledImmediate<datasize>(rt, rn, offset);
2548 return true;
2549 }
2550 if (ARM64Assembler::canEncodePImmOffset<datasize>(offset)) {
2551 storeUnsignedImmediate<datasize>(rt, rn, static_cast<unsigned>(offset));
2552 return true;
2553 }
2554 return false;
2555 }
2556
2557 template<int datasize>
2558 ALWAYS_INLINE bool tryStoreWithOffset(FPRegisterID rt, RegisterID rn, int32_t offset)
2559 {
2560 if (ARM64Assembler::canEncodeSImmOffset(offset)) {
2561 m_assembler.stur<datasize>(rt, rn, offset);
2562 return true;
2563 }
2564 if (ARM64Assembler::canEncodePImmOffset<datasize>(offset)) {
2565 m_assembler.str<datasize>(rt, rn, static_cast<unsigned>(offset));
2566 return true;
2567 }
2568 return false;
2569 }
2570
2571 friend class LinkBuffer;
2572 friend class RepatchBuffer;
2573
2574 static void linkCall(void* code, Call call, FunctionPtr function)
2575 {
2576 if (call.isFlagSet(Call::Near))
2577 ARM64Assembler::linkCall(code, call.m_label, function.value());
2578 else
2579 ARM64Assembler::linkPointer(code, call.m_label.labelAtOffset(REPATCH_OFFSET_CALL_TO_POINTER), function.value());
2580 }
2581
2582 static void repatchCall(CodeLocationCall call, CodeLocationLabel destination)
2583 {
2584 ARM64Assembler::repatchPointer(call.dataLabelPtrAtOffset(REPATCH_OFFSET_CALL_TO_POINTER).dataLocation(), destination.executableAddress());
2585 }
2586
2587 static void repatchCall(CodeLocationCall call, FunctionPtr destination)
2588 {
2589 ARM64Assembler::repatchPointer(call.dataLabelPtrAtOffset(REPATCH_OFFSET_CALL_TO_POINTER).dataLocation(), destination.executableAddress());
2590 }
2591
2592 CachedTempRegister m_dataMemoryTempRegister;
2593 CachedTempRegister m_cachedMemoryTempRegister;
2594 bool m_makeJumpPatchable;
2595 };
2596
2597 // Extend the {load,store}{Unsigned,Unscaled}Immediate templated general register methods to cover all load/store sizes
2598 template<>
2599 ALWAYS_INLINE void MacroAssemblerARM64::loadUnsignedImmediate<8>(RegisterID rt, RegisterID rn, unsigned pimm)
2600 {
2601 m_assembler.ldrb(rt, rn, pimm);
2602 }
2603
2604 template<>
2605 ALWAYS_INLINE void MacroAssemblerARM64::loadUnsignedImmediate<16>(RegisterID rt, RegisterID rn, unsigned pimm)
2606 {
2607 m_assembler.ldrh(rt, rn, pimm);
2608 }
2609
2610 template<>
2611 ALWAYS_INLINE void MacroAssemblerARM64::loadUnscaledImmediate<8>(RegisterID rt, RegisterID rn, int simm)
2612 {
2613 m_assembler.ldurb(rt, rn, simm);
2614 }
2615
2616 template<>
2617 ALWAYS_INLINE void MacroAssemblerARM64::loadUnscaledImmediate<16>(RegisterID rt, RegisterID rn, int simm)
2618 {
2619 m_assembler.ldurh(rt, rn, simm);
2620 }
2621
2622 template<>
2623 ALWAYS_INLINE void MacroAssemblerARM64::storeUnsignedImmediate<8>(RegisterID rt, RegisterID rn, unsigned pimm)
2624 {
2625 m_assembler.strb(rt, rn, pimm);
2626 }
2627
2628 template<>
2629 ALWAYS_INLINE void MacroAssemblerARM64::storeUnsignedImmediate<16>(RegisterID rt, RegisterID rn, unsigned pimm)
2630 {
2631 m_assembler.strh(rt, rn, pimm);
2632 }
2633
2634 template<>
2635 ALWAYS_INLINE void MacroAssemblerARM64::storeUnscaledImmediate<8>(RegisterID rt, RegisterID rn, int simm)
2636 {
2637 m_assembler.sturb(rt, rn, simm);
2638 }
2639
2640 template<>
2641 ALWAYS_INLINE void MacroAssemblerARM64::storeUnscaledImmediate<16>(RegisterID rt, RegisterID rn, int simm)
2642 {
2643 m_assembler.sturh(rt, rn, simm);
2644 }
2645
2646 } // namespace JSC
2647
2648 #endif // ENABLE(ASSEMBLER)
2649
2650 #endif // MacroAssemblerARM64_h