]> git.saurik.com Git - apple/javascriptcore.git/blame - assembler/MacroAssemblerARM64.h
JavaScriptCore-1218.33.tar.gz
[apple/javascriptcore.git] / assembler / MacroAssemblerARM64.h
CommitLineData
93a37866
A
1/*
2 * Copyright (C) 2012 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#ifndef MacroAssemblerARM64_h
27#define MacroAssemblerARM64_h
28
29#if ENABLE(ASSEMBLER)
30
31#include "ARM64Assembler.h"
32#include "AbstractMacroAssembler.h"
33#include <wtf/MathExtras.h>
34
35namespace JSC {
36
37class MacroAssemblerARM64 : public AbstractMacroAssembler<ARM64Assembler> {
38 static const RegisterID dataTempRegister = ARM64Registers::ip0;
39 static const RegisterID memoryTempRegister = ARM64Registers::ip1;
40 static const ARM64Registers::FPRegisterID fpTempRegister = ARM64Registers::q31;
41 static const ARM64Assembler::SetFlags S = ARM64Assembler::S;
42 static const intptr_t maskHalfWord0 = 0xffffl;
43 static const intptr_t maskHalfWord1 = 0xffff0000l;
44 static const intptr_t maskUpperWord = 0xffffffff00000000l;
45
46 // 4 instructions - 3 to load the function pointer, + blr.
47 static const ptrdiff_t REPATCH_OFFSET_CALL_TO_POINTER = -16;
48
49public:
50 MacroAssemblerARM64()
51 : m_dataMemoryTempRegister(this, dataTempRegister)
52 , m_cachedMemoryTempRegister(this, memoryTempRegister)
53 , m_makeJumpPatchable(false)
54 {
55 }
56
57 typedef ARM64Registers::FPRegisterID FPRegisterID;
58 typedef ARM64Assembler::LinkRecord LinkRecord;
59 typedef ARM64Assembler::JumpType JumpType;
60 typedef ARM64Assembler::JumpLinkType JumpLinkType;
61 typedef ARM64Assembler::Condition Condition;
62
63 static const ARM64Assembler::Condition DefaultCondition = ARM64Assembler::ConditionInvalid;
64 static const ARM64Assembler::JumpType DefaultJump = ARM64Assembler::JumpNoConditionFixedSize;
65
66 Vector<LinkRecord, 0, UnsafeVectorOverflow>& jumpsToLink() { return m_assembler.jumpsToLink(); }
67 void* unlinkedCode() { return m_assembler.unlinkedCode(); }
68 bool canCompact(JumpType jumpType) { return m_assembler.canCompact(jumpType); }
69 JumpLinkType computeJumpType(JumpType jumpType, const uint8_t* from, const uint8_t* to) { return m_assembler.computeJumpType(jumpType, from, to); }
70 JumpLinkType computeJumpType(LinkRecord& record, const uint8_t* from, const uint8_t* to) { return m_assembler.computeJumpType(record, from, to); }
71 void recordLinkOffsets(int32_t regionStart, int32_t regionEnd, int32_t offset) {return m_assembler.recordLinkOffsets(regionStart, regionEnd, offset); }
72 int jumpSizeDelta(JumpType jumpType, JumpLinkType jumpLinkType) { return m_assembler.jumpSizeDelta(jumpType, jumpLinkType); }
73 void link(LinkRecord& record, uint8_t* from, uint8_t* to) { return m_assembler.link(record, from, to); }
74 int executableOffsetFor(int location) { return m_assembler.executableOffsetFor(location); }
75
76 static const Scale ScalePtr = TimesEight;
77
78 static bool isCompactPtrAlignedAddressOffset(ptrdiff_t value)
79 {
80 // This is the largest 32-bit access allowed, aligned to 64-bit boundary.
81 return !(value & ~0x3ff8);
82 }
83
84 enum RelationalCondition {
85 Equal = ARM64Assembler::ConditionEQ,
86 NotEqual = ARM64Assembler::ConditionNE,
87 Above = ARM64Assembler::ConditionHI,
88 AboveOrEqual = ARM64Assembler::ConditionHS,
89 Below = ARM64Assembler::ConditionLO,
90 BelowOrEqual = ARM64Assembler::ConditionLS,
91 GreaterThan = ARM64Assembler::ConditionGT,
92 GreaterThanOrEqual = ARM64Assembler::ConditionGE,
93 LessThan = ARM64Assembler::ConditionLT,
94 LessThanOrEqual = ARM64Assembler::ConditionLE
95 };
96
97 enum ResultCondition {
98 Overflow = ARM64Assembler::ConditionVS,
99 Signed = ARM64Assembler::ConditionMI,
100 PositiveOrZero = ARM64Assembler::ConditionPL,
101 Zero = ARM64Assembler::ConditionEQ,
102 NonZero = ARM64Assembler::ConditionNE
103 };
104
105 enum ZeroCondition {
106 IsZero = ARM64Assembler::ConditionEQ,
107 IsNonZero = ARM64Assembler::ConditionNE
108 };
109
110 enum DoubleCondition {
111 // These conditions will only evaluate to true if the comparison is ordered - i.e. neither operand is NaN.
112 DoubleEqual = ARM64Assembler::ConditionEQ,
113 DoubleNotEqual = ARM64Assembler::ConditionVC, // Not the right flag! check for this & handle differently.
114 DoubleGreaterThan = ARM64Assembler::ConditionGT,
115 DoubleGreaterThanOrEqual = ARM64Assembler::ConditionGE,
116 DoubleLessThan = ARM64Assembler::ConditionLO,
117 DoubleLessThanOrEqual = ARM64Assembler::ConditionLS,
118 // If either operand is NaN, these conditions always evaluate to true.
119 DoubleEqualOrUnordered = ARM64Assembler::ConditionVS, // Not the right flag! check for this & handle differently.
120 DoubleNotEqualOrUnordered = ARM64Assembler::ConditionNE,
121 DoubleGreaterThanOrUnordered = ARM64Assembler::ConditionHI,
122 DoubleGreaterThanOrEqualOrUnordered = ARM64Assembler::ConditionHS,
123 DoubleLessThanOrUnordered = ARM64Assembler::ConditionLT,
124 DoubleLessThanOrEqualOrUnordered = ARM64Assembler::ConditionLE,
125 };
126
127 static const RegisterID stackPointerRegister = ARM64Registers::sp;
128 static const RegisterID linkRegister = ARM64Registers::lr;
129
130
131 // Integer operations:
132
133 void add32(RegisterID src, RegisterID dest)
134 {
135 m_assembler.add<32>(dest, dest, src);
136 }
137
138 void add32(TrustedImm32 imm, RegisterID dest)
139 {
140 add32(imm, dest, dest);
141 }
142
143 void add32(TrustedImm32 imm, RegisterID src, RegisterID dest)
144 {
145 if (isUInt12(imm.m_value))
146 m_assembler.add<32>(dest, src, UInt12(imm.m_value));
147 else if (isUInt12(-imm.m_value))
148 m_assembler.sub<32>(dest, src, UInt12(-imm.m_value));
149 else {
150 move(imm, getCachedDataTempRegisterIDAndInvalidate());
151 m_assembler.add<32>(dest, src, dataTempRegister);
152 }
153 }
154
155 void add32(TrustedImm32 imm, Address address)
156 {
157 load32(address, getCachedDataTempRegisterIDAndInvalidate());
158
159 if (isUInt12(imm.m_value))
160 m_assembler.add<32>(dataTempRegister, dataTempRegister, UInt12(imm.m_value));
161 else if (isUInt12(-imm.m_value))
162 m_assembler.sub<32>(dataTempRegister, dataTempRegister, UInt12(-imm.m_value));
163 else {
164 move(imm, getCachedMemoryTempRegisterIDAndInvalidate());
165 m_assembler.add<32>(dataTempRegister, dataTempRegister, memoryTempRegister);
166 }
167
168 store32(dataTempRegister, address);
169 }
170
171 void add32(TrustedImm32 imm, AbsoluteAddress address)
172 {
173 load32(address.m_ptr, getCachedDataTempRegisterIDAndInvalidate());
174
175 if (isUInt12(imm.m_value)) {
176 m_assembler.add<32>(dataTempRegister, dataTempRegister, UInt12(imm.m_value));
177 store32(dataTempRegister, address.m_ptr);
178 return;
179 }
180
181 if (isUInt12(-imm.m_value)) {
182 m_assembler.sub<32>(dataTempRegister, dataTempRegister, UInt12(-imm.m_value));
183 store32(dataTempRegister, address.m_ptr);
184 return;
185 }
186
187 move(imm, getCachedMemoryTempRegisterIDAndInvalidate());
188 m_assembler.add<32>(dataTempRegister, dataTempRegister, memoryTempRegister);
189 store32(dataTempRegister, address.m_ptr);
190 }
191
192 void add32(Address src, RegisterID dest)
193 {
194 load32(src, getCachedDataTempRegisterIDAndInvalidate());
195 add32(dataTempRegister, dest);
196 }
197
198 void add64(RegisterID src, RegisterID dest)
199 {
200 m_assembler.add<64>(dest, dest, src);
201 }
202
203 void add64(TrustedImm32 imm, RegisterID dest)
204 {
205 if (isUInt12(imm.m_value)) {
206 m_assembler.add<64>(dest, dest, UInt12(imm.m_value));
207 return;
208 }
209 if (isUInt12(-imm.m_value)) {
210 m_assembler.sub<64>(dest, dest, UInt12(-imm.m_value));
211 return;
212 }
213
214 signExtend32ToPtr(imm, getCachedDataTempRegisterIDAndInvalidate());
215 m_assembler.add<64>(dest, dest, dataTempRegister);
216 }
217
218 void add64(TrustedImm64 imm, RegisterID dest)
219 {
220 intptr_t immediate = imm.m_value;
221
222 if (isUInt12(immediate)) {
223 m_assembler.add<64>(dest, dest, UInt12(static_cast<int32_t>(immediate)));
224 return;
225 }
226 if (isUInt12(-immediate)) {
227 m_assembler.sub<64>(dest, dest, UInt12(static_cast<int32_t>(-immediate)));
228 return;
229 }
230
231 move(imm, getCachedDataTempRegisterIDAndInvalidate());
232 m_assembler.add<64>(dest, dest, dataTempRegister);
233 }
234
235 void add64(TrustedImm32 imm, RegisterID src, RegisterID dest)
236 {
237 if (isUInt12(imm.m_value)) {
238 m_assembler.add<64>(dest, src, UInt12(imm.m_value));
239 return;
240 }
241 if (isUInt12(-imm.m_value)) {
242 m_assembler.sub<64>(dest, src, UInt12(-imm.m_value));
243 return;
244 }
245
246 signExtend32ToPtr(imm, getCachedDataTempRegisterIDAndInvalidate());
247 m_assembler.add<64>(dest, src, dataTempRegister);
248 }
249
250 void add64(TrustedImm32 imm, Address address)
251 {
252 load64(address, getCachedDataTempRegisterIDAndInvalidate());
253
254 if (isUInt12(imm.m_value))
255 m_assembler.add<64>(dataTempRegister, dataTempRegister, UInt12(imm.m_value));
256 else if (isUInt12(-imm.m_value))
257 m_assembler.sub<64>(dataTempRegister, dataTempRegister, UInt12(-imm.m_value));
258 else {
259 signExtend32ToPtr(imm, getCachedMemoryTempRegisterIDAndInvalidate());
260 m_assembler.add<64>(dataTempRegister, dataTempRegister, memoryTempRegister);
261 }
262
263 store64(dataTempRegister, address);
264 }
265
266 void add64(TrustedImm32 imm, AbsoluteAddress address)
267 {
268 load64(address.m_ptr, getCachedDataTempRegisterIDAndInvalidate());
269
270 if (isUInt12(imm.m_value)) {
271 m_assembler.add<64>(dataTempRegister, dataTempRegister, UInt12(imm.m_value));
272 store64(dataTempRegister, address.m_ptr);
273 return;
274 }
275
276 if (isUInt12(-imm.m_value)) {
277 m_assembler.sub<64>(dataTempRegister, dataTempRegister, UInt12(-imm.m_value));
278 store64(dataTempRegister, address.m_ptr);
279 return;
280 }
281
282 signExtend32ToPtr(imm, getCachedMemoryTempRegisterIDAndInvalidate());
283 m_assembler.add<64>(dataTempRegister, dataTempRegister, memoryTempRegister);
284 store64(dataTempRegister, address.m_ptr);
285 }
286
287 void add64(Address src, RegisterID dest)
288 {
289 load64(src, getCachedDataTempRegisterIDAndInvalidate());
290 m_assembler.add<64>(dest, dest, dataTempRegister);
291 }
292
293 void add64(AbsoluteAddress src, RegisterID dest)
294 {
295 load64(src.m_ptr, getCachedDataTempRegisterIDAndInvalidate());
296 m_assembler.add<64>(dest, dest, dataTempRegister);
297 }
298
299 void and32(RegisterID src, RegisterID dest)
300 {
301 and32(dest, src, dest);
302 }
303
304 void and32(RegisterID op1, RegisterID op2, RegisterID dest)
305 {
306 m_assembler.and_<32>(dest, op1, op2);
307 }
308
309 void and32(TrustedImm32 imm, RegisterID dest)
310 {
311 and32(imm, dest, dest);
312 }
313
314 void and32(TrustedImm32 imm, RegisterID src, RegisterID dest)
315 {
316 LogicalImmediate logicalImm = LogicalImmediate::create32(imm.m_value);
317
318 if (logicalImm.isValid()) {
319 m_assembler.and_<32>(dest, src, logicalImm);
320 return;
321 }
322
323 move(imm, getCachedDataTempRegisterIDAndInvalidate());
324 m_assembler.and_<32>(dest, src, dataTempRegister);
325 }
326
327 void and32(Address src, RegisterID dest)
328 {
329 load32(src, dataTempRegister);
330 and32(dataTempRegister, dest);
331 }
332
333 void and64(RegisterID src, RegisterID dest)
334 {
335 m_assembler.and_<64>(dest, dest, src);
336 }
337
338 void and64(TrustedImm32 imm, RegisterID dest)
339 {
340 LogicalImmediate logicalImm = LogicalImmediate::create64(static_cast<intptr_t>(static_cast<int64_t>(imm.m_value)));
341
342 if (logicalImm.isValid()) {
343 m_assembler.and_<64>(dest, dest, logicalImm);
344 return;
345 }
346
347 signExtend32ToPtr(imm, getCachedDataTempRegisterIDAndInvalidate());
348 m_assembler.and_<64>(dest, dest, dataTempRegister);
349 }
350
351 void countLeadingZeros32(RegisterID src, RegisterID dest)
352 {
353 m_assembler.clz<32>(dest, src);
354 }
355
356 void lshift32(RegisterID src, RegisterID shiftAmount, RegisterID dest)
357 {
358 m_assembler.lsl<32>(dest, src, shiftAmount);
359 }
360
361 void lshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
362 {
363 m_assembler.lsl<32>(dest, src, imm.m_value & 0x1f);
364 }
365
366 void lshift32(RegisterID shiftAmount, RegisterID dest)
367 {
368 lshift32(dest, shiftAmount, dest);
369 }
370
371 void lshift32(TrustedImm32 imm, RegisterID dest)
372 {
373 lshift32(dest, imm, dest);
374 }
375
376 void mul32(RegisterID src, RegisterID dest)
377 {
378 m_assembler.mul<32>(dest, dest, src);
379 }
380
381 void mul32(TrustedImm32 imm, RegisterID src, RegisterID dest)
382 {
383 move(imm, getCachedDataTempRegisterIDAndInvalidate());
384 m_assembler.mul<32>(dest, src, dataTempRegister);
385 }
386
387 void neg32(RegisterID dest)
388 {
389 m_assembler.neg<32>(dest, dest);
390 }
391
392 void neg64(RegisterID dest)
393 {
394 m_assembler.neg<64>(dest, dest);
395 }
396
397 void or32(RegisterID src, RegisterID dest)
398 {
399 or32(dest, src, dest);
400 }
401
402 void or32(RegisterID op1, RegisterID op2, RegisterID dest)
403 {
404 m_assembler.orr<32>(dest, op1, op2);
405 }
406
407 void or32(TrustedImm32 imm, RegisterID dest)
408 {
409 or32(imm, dest, dest);
410 }
411
412 void or32(TrustedImm32 imm, RegisterID src, RegisterID dest)
413 {
414 LogicalImmediate logicalImm = LogicalImmediate::create32(imm.m_value);
415
416 if (logicalImm.isValid()) {
417 m_assembler.orr<32>(dest, src, logicalImm);
418 return;
419 }
420
421 move(imm, getCachedDataTempRegisterIDAndInvalidate());
422 m_assembler.orr<32>(dest, src, dataTempRegister);
423 }
424
425 void or32(RegisterID src, AbsoluteAddress address)
426 {
427 load32(address.m_ptr, getCachedDataTempRegisterIDAndInvalidate());
428 m_assembler.orr<32>(dataTempRegister, dataTempRegister, src);
429 store32(dataTempRegister, address.m_ptr);
430 }
431
432 void or64(RegisterID src, RegisterID dest)
433 {
434 or64(dest, src, dest);
435 }
436
437 void or64(RegisterID op1, RegisterID op2, RegisterID dest)
438 {
439 m_assembler.orr<64>(dest, op1, op2);
440 }
441
442 void or64(TrustedImm32 imm, RegisterID dest)
443 {
444 or64(imm, dest, dest);
445 }
446
447 void or64(TrustedImm32 imm, RegisterID src, RegisterID dest)
448 {
449 LogicalImmediate logicalImm = LogicalImmediate::create64(static_cast<intptr_t>(static_cast<int64_t>(imm.m_value)));
450
451 if (logicalImm.isValid()) {
452 m_assembler.orr<64>(dest, dest, logicalImm);
453 return;
454 }
455
456 signExtend32ToPtr(imm, getCachedDataTempRegisterIDAndInvalidate());
457 m_assembler.orr<64>(dest, src, dataTempRegister);
458 }
459
460 void or64(TrustedImm64 imm, RegisterID dest)
461 {
462 LogicalImmediate logicalImm = LogicalImmediate::create64(static_cast<intptr_t>(static_cast<int64_t>(imm.m_value)));
463
464 if (logicalImm.isValid()) {
465 m_assembler.orr<64>(dest, dest, logicalImm);
466 return;
467 }
468
469 move(imm, getCachedDataTempRegisterIDAndInvalidate());
470 m_assembler.orr<64>(dest, dest, dataTempRegister);
471 }
472
473 void rotateRight64(TrustedImm32 imm, RegisterID srcDst)
474 {
475 m_assembler.ror<64>(srcDst, srcDst, imm.m_value & 63);
476 }
477
478 void rshift32(RegisterID src, RegisterID shiftAmount, RegisterID dest)
479 {
480 m_assembler.asr<32>(dest, src, shiftAmount);
481 }
482
483 void rshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
484 {
485 m_assembler.asr<32>(dest, src, imm.m_value & 0x1f);
486 }
487
488 void rshift32(RegisterID shiftAmount, RegisterID dest)
489 {
490 rshift32(dest, shiftAmount, dest);
491 }
492
493 void rshift32(TrustedImm32 imm, RegisterID dest)
494 {
495 rshift32(dest, imm, dest);
496 }
497
498 void sub32(RegisterID src, RegisterID dest)
499 {
500 m_assembler.sub<32>(dest, dest, src);
501 }
502
503 void sub32(TrustedImm32 imm, RegisterID dest)
504 {
505 if (isUInt12(imm.m_value)) {
506 m_assembler.sub<32>(dest, dest, UInt12(imm.m_value));
507 return;
508 }
509 if (isUInt12(-imm.m_value)) {
510 m_assembler.add<32>(dest, dest, UInt12(-imm.m_value));
511 return;
512 }
513
514 move(imm, getCachedDataTempRegisterIDAndInvalidate());
515 m_assembler.sub<32>(dest, dest, dataTempRegister);
516 }
517
518 void sub32(TrustedImm32 imm, Address address)
519 {
520 load32(address, getCachedDataTempRegisterIDAndInvalidate());
521
522 if (isUInt12(imm.m_value))
523 m_assembler.sub<32>(dataTempRegister, dataTempRegister, UInt12(imm.m_value));
524 else if (isUInt12(-imm.m_value))
525 m_assembler.add<32>(dataTempRegister, dataTempRegister, UInt12(-imm.m_value));
526 else {
527 move(imm, getCachedMemoryTempRegisterIDAndInvalidate());
528 m_assembler.sub<32>(dataTempRegister, dataTempRegister, memoryTempRegister);
529 }
530
531 store32(dataTempRegister, address);
532 }
533
534 void sub32(TrustedImm32 imm, AbsoluteAddress address)
535 {
536 load32(address.m_ptr, getCachedDataTempRegisterIDAndInvalidate());
537
538 if (isUInt12(imm.m_value)) {
539 m_assembler.sub<32>(dataTempRegister, dataTempRegister, UInt12(imm.m_value));
540 store32(dataTempRegister, address.m_ptr);
541 return;
542 }
543
544 if (isUInt12(-imm.m_value)) {
545 m_assembler.add<32>(dataTempRegister, dataTempRegister, UInt12(-imm.m_value));
546 store32(dataTempRegister, address.m_ptr);
547 return;
548 }
549
550 move(imm, getCachedMemoryTempRegisterIDAndInvalidate());
551 m_assembler.sub<32>(dataTempRegister, dataTempRegister, memoryTempRegister);
552 store32(dataTempRegister, address.m_ptr);
553 }
554
555 void sub32(Address src, RegisterID dest)
556 {
557 load32(src, getCachedDataTempRegisterIDAndInvalidate());
558 sub32(dataTempRegister, dest);
559 }
560
561 void sub64(RegisterID src, RegisterID dest)
562 {
563 m_assembler.sub<64>(dest, dest, src);
564 }
565
566 void sub64(TrustedImm32 imm, RegisterID dest)
567 {
568 if (isUInt12(imm.m_value)) {
569 m_assembler.sub<64>(dest, dest, UInt12(imm.m_value));
570 return;
571 }
572 if (isUInt12(-imm.m_value)) {
573 m_assembler.add<64>(dest, dest, UInt12(-imm.m_value));
574 return;
575 }
576
577 signExtend32ToPtr(imm, getCachedDataTempRegisterIDAndInvalidate());
578 m_assembler.sub<64>(dest, dest, dataTempRegister);
579 }
580
581 void sub64(TrustedImm64 imm, RegisterID dest)
582 {
583 intptr_t immediate = imm.m_value;
584
585 if (isUInt12(immediate)) {
586 m_assembler.sub<64>(dest, dest, UInt12(static_cast<int32_t>(immediate)));
587 return;
588 }
589 if (isUInt12(-immediate)) {
590 m_assembler.add<64>(dest, dest, UInt12(static_cast<int32_t>(-immediate)));
591 return;
592 }
593
594 move(imm, getCachedDataTempRegisterIDAndInvalidate());
595 m_assembler.sub<64>(dest, dest, dataTempRegister);
596 }
597
598 void urshift32(RegisterID src, RegisterID shiftAmount, RegisterID dest)
599 {
600 m_assembler.lsr<32>(dest, src, shiftAmount);
601 }
602
603 void urshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
604 {
605 m_assembler.lsr<32>(dest, src, imm.m_value & 0x1f);
606 }
607
608 void urshift32(RegisterID shiftAmount, RegisterID dest)
609 {
610 urshift32(dest, shiftAmount, dest);
611 }
612
613 void urshift32(TrustedImm32 imm, RegisterID dest)
614 {
615 urshift32(dest, imm, dest);
616 }
617
618 void xor32(RegisterID src, RegisterID dest)
619 {
620 xor32(dest, src, dest);
621 }
622
623 void xor32(RegisterID op1, RegisterID op2, RegisterID dest)
624 {
625 m_assembler.eor<32>(dest, op1, op2);
626 }
627
628 void xor32(TrustedImm32 imm, RegisterID dest)
629 {
630 xor32(imm, dest, dest);
631 }
632
633 void xor32(TrustedImm32 imm, RegisterID src, RegisterID dest)
634 {
635 if (imm.m_value == -1)
636 m_assembler.mvn<32>(dest, src);
637 else {
638 LogicalImmediate logicalImm = LogicalImmediate::create32(imm.m_value);
639
640 if (logicalImm.isValid()) {
641 m_assembler.eor<32>(dest, dest, logicalImm);
642 return;
643 }
644
645 move(imm, getCachedDataTempRegisterIDAndInvalidate());
646 m_assembler.eor<32>(dest, src, dataTempRegister);
647 }
648 }
649
650 void xor64(RegisterID src, Address address)
651 {
652 load64(address, getCachedDataTempRegisterIDAndInvalidate());
653 m_assembler.eor<64>(dataTempRegister, dataTempRegister, src);
654 store64(dataTempRegister, address);
655 }
656
657 void xor64(RegisterID src, RegisterID dest)
658 {
659 xor64(dest, src, dest);
660 }
661
662 void xor64(RegisterID op1, RegisterID op2, RegisterID dest)
663 {
664 m_assembler.eor<64>(dest, op1, op2);
665 }
666
667 void xor64(TrustedImm32 imm, RegisterID dest)
668 {
669 xor64(imm, dest, dest);
670 }
671
672 void xor64(TrustedImm32 imm, RegisterID src, RegisterID dest)
673 {
674 if (imm.m_value == -1)
675 m_assembler.mvn<64>(dest, src);
676 else {
677 LogicalImmediate logicalImm = LogicalImmediate::create64(static_cast<intptr_t>(static_cast<int64_t>(imm.m_value)));
678
679 if (logicalImm.isValid()) {
680 m_assembler.eor<64>(dest, dest, logicalImm);
681 return;
682 }
683
684 signExtend32ToPtr(imm, getCachedDataTempRegisterIDAndInvalidate());
685 m_assembler.eor<64>(dest, src, dataTempRegister);
686 }
687 }
688
689
690 // Memory access operations:
691
692 void load64(ImplicitAddress address, RegisterID dest)
693 {
694 if (tryLoadWithOffset<64>(dest, address.base, address.offset))
695 return;
696
697 signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
698 m_assembler.ldr<64>(dest, address.base, memoryTempRegister);
699 }
700
701 void load64(BaseIndex address, RegisterID dest)
702 {
703 if (!address.offset && (!address.scale || address.scale == 3)) {
704 m_assembler.ldr<64>(dest, address.base, address.index, ARM64Assembler::UXTX, address.scale);
705 return;
706 }
707
708 signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
709 m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
710 m_assembler.ldr<64>(dest, address.base, memoryTempRegister);
711 }
712
713 void load64(const void* address, RegisterID dest)
714 {
715 load<64>(address, dest);
716 }
717
718 DataLabel32 load64WithAddressOffsetPatch(Address address, RegisterID dest)
719 {
720 DataLabel32 label(this);
721 signExtend32ToPtrWithFixedWidth(address.offset, getCachedMemoryTempRegisterIDAndInvalidate());
722 m_assembler.ldr<64>(dest, address.base, memoryTempRegister, ARM64Assembler::SXTW, 0);
723 return label;
724 }
725
726 DataLabelCompact load64WithCompactAddressOffsetPatch(Address address, RegisterID dest)
727 {
728 ASSERT(isCompactPtrAlignedAddressOffset(address.offset));
729 DataLabelCompact label(this);
730 m_assembler.ldr<64>(dest, address.base, address.offset);
731 return label;
732 }
733
734 ConvertibleLoadLabel convertibleLoadPtr(Address address, RegisterID dest)
735 {
736 ConvertibleLoadLabel result(this);
737 ASSERT(!(address.offset & ~0xff8));
738 m_assembler.ldr<64>(dest, address.base, address.offset);
739 return result;
740 }
741
742 void load32(ImplicitAddress address, RegisterID dest)
743 {
744 if (tryLoadWithOffset<32>(dest, address.base, address.offset))
745 return;
746
747 signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
748 m_assembler.ldr<32>(dest, address.base, memoryTempRegister);
749 }
750
751 void load32(BaseIndex address, RegisterID dest)
752 {
753 if (!address.offset && (!address.scale || address.scale == 2)) {
754 m_assembler.ldr<32>(dest, address.base, address.index, ARM64Assembler::UXTX, address.scale);
755 return;
756 }
757
758 signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
759 m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
760 m_assembler.ldr<32>(dest, address.base, memoryTempRegister);
761 }
762
763 void load32(const void* address, RegisterID dest)
764 {
765 load<32>(address, dest);
766 }
767
768 DataLabel32 load32WithAddressOffsetPatch(Address address, RegisterID dest)
769 {
770 DataLabel32 label(this);
771 signExtend32ToPtrWithFixedWidth(address.offset, getCachedMemoryTempRegisterIDAndInvalidate());
772 m_assembler.ldr<32>(dest, address.base, memoryTempRegister, ARM64Assembler::SXTW, 0);
773 return label;
774 }
775
776 DataLabelCompact load32WithCompactAddressOffsetPatch(Address address, RegisterID dest)
777 {
778 ASSERT(isCompactPtrAlignedAddressOffset(address.offset));
779 DataLabelCompact label(this);
780 m_assembler.ldr<32>(dest, address.base, address.offset);
781 return label;
782 }
783
784 void load32WithUnalignedHalfWords(BaseIndex address, RegisterID dest)
785 {
786 load32(address, dest);
787 }
788
789 void load16(ImplicitAddress address, RegisterID dest)
790 {
791 if (tryLoadWithOffset<16>(dest, address.base, address.offset))
792 return;
793
794 signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
795 m_assembler.ldrh(dest, address.base, memoryTempRegister);
796 }
797
798 void load16(BaseIndex address, RegisterID dest)
799 {
800 if (!address.offset && (!address.scale || address.scale == 1)) {
801 m_assembler.ldrh(dest, address.base, address.index, ARM64Assembler::UXTX, address.scale);
802 return;
803 }
804
805 signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
806 m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
807 m_assembler.ldrh(dest, address.base, memoryTempRegister);
808 }
809
810 void load16Unaligned(BaseIndex address, RegisterID dest)
811 {
812 load16(address, dest);
813 }
814
815 void load16Signed(BaseIndex address, RegisterID dest)
816 {
817 if (!address.offset && (!address.scale || address.scale == 1)) {
818 m_assembler.ldrsh<64>(dest, address.base, address.index, ARM64Assembler::UXTX, address.scale);
819 return;
820 }
821
822 signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
823 m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
824 m_assembler.ldrsh<64>(dest, address.base, memoryTempRegister);
825 }
826
827 void load8(ImplicitAddress address, RegisterID dest)
828 {
829 if (tryLoadWithOffset<8>(dest, address.base, address.offset))
830 return;
831
832 signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
833 m_assembler.ldrb(dest, address.base, memoryTempRegister);
834 }
835
836 void load8(BaseIndex address, RegisterID dest)
837 {
838 if (!address.offset && !address.scale) {
839 m_assembler.ldrb(dest, address.base, address.index, ARM64Assembler::UXTX, address.scale);
840 return;
841 }
842
843 signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
844 m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
845 m_assembler.ldrb(dest, address.base, memoryTempRegister);
846 }
847
848 void load8(const void* address, RegisterID dest)
849 {
850 moveToCachedReg(TrustedImmPtr(address), m_cachedMemoryTempRegister);
851 m_assembler.ldrb(dest, memoryTempRegister, ARM64Registers::zr);
12899fa2
A
852 if (dest == memoryTempRegister)
853 m_cachedMemoryTempRegister.invalidate();
93a37866
A
854 }
855
856 void load8Signed(BaseIndex address, RegisterID dest)
857 {
858 if (!address.offset && !address.scale) {
859 m_assembler.ldrsb<64>(dest, address.base, address.index, ARM64Assembler::UXTX, address.scale);
860 return;
861 }
862
863 signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
864 m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
865 m_assembler.ldrsb<64>(dest, address.base, memoryTempRegister);
866 }
867
868 void store64(RegisterID src, ImplicitAddress address)
869 {
870 if (tryStoreWithOffset<64>(src, address.base, address.offset))
871 return;
872
873 signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
874 m_assembler.str<64>(src, address.base, memoryTempRegister);
875 }
876
877 void store64(RegisterID src, BaseIndex address)
878 {
879 if (!address.offset && (!address.scale || address.scale == 3)) {
880 m_assembler.str<64>(src, address.base, address.index, ARM64Assembler::UXTX, address.scale);
881 return;
882 }
883
884 signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
885 m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
886 m_assembler.str<64>(src, address.base, memoryTempRegister);
887 }
888
889 void store64(RegisterID src, const void* address)
890 {
891 store<64>(src, address);
892 }
893
894 void store64(TrustedImm64 imm, ImplicitAddress address)
895 {
896 if (!imm.m_value) {
897 store64(ARM64Registers::zr, address);
898 return;
899 }
900
901 moveToCachedReg(imm, m_dataMemoryTempRegister);
902 store64(dataTempRegister, address);
903 }
904
905 void store64(TrustedImm64 imm, BaseIndex address)
906 {
907 if (!imm.m_value) {
908 store64(ARM64Registers::zr, address);
909 return;
910 }
911
912 moveToCachedReg(imm, m_dataMemoryTempRegister);
913 store64(dataTempRegister, address);
914 }
915
916 DataLabel32 store64WithAddressOffsetPatch(RegisterID src, Address address)
917 {
918 DataLabel32 label(this);
919 signExtend32ToPtrWithFixedWidth(address.offset, getCachedMemoryTempRegisterIDAndInvalidate());
920 m_assembler.str<64>(src, address.base, memoryTempRegister, ARM64Assembler::SXTW, 0);
921 return label;
922 }
923
924 void store32(RegisterID src, ImplicitAddress address)
925 {
926 if (tryStoreWithOffset<32>(src, address.base, address.offset))
927 return;
928
929 signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
930 m_assembler.str<32>(src, address.base, memoryTempRegister);
931 }
932
933 void store32(RegisterID src, BaseIndex address)
934 {
935 if (!address.offset && (!address.scale || address.scale == 2)) {
936 m_assembler.str<32>(src, address.base, address.index, ARM64Assembler::UXTX, address.scale);
937 return;
938 }
939
940 signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
941 m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
942 m_assembler.str<32>(src, address.base, memoryTempRegister);
943 }
944
945 void store32(RegisterID src, const void* address)
946 {
947 store<32>(src, address);
948 }
949
950 void store32(TrustedImm32 imm, ImplicitAddress address)
951 {
952 if (!imm.m_value) {
953 store32(ARM64Registers::zr, address);
954 return;
955 }
956
957 moveToCachedReg(imm, m_dataMemoryTempRegister);
958 store32(dataTempRegister, address);
959 }
960
961 void store32(TrustedImm32 imm, BaseIndex address)
962 {
963 if (!imm.m_value) {
964 store32(ARM64Registers::zr, address);
965 return;
966 }
967
968 moveToCachedReg(imm, m_dataMemoryTempRegister);
969 store32(dataTempRegister, address);
970 }
971
972 void store32(TrustedImm32 imm, const void* address)
973 {
974 if (!imm.m_value) {
975 store32(ARM64Registers::zr, address);
976 return;
977 }
978
979 moveToCachedReg(imm, m_dataMemoryTempRegister);
980 store32(dataTempRegister, address);
981 }
982
983 DataLabel32 store32WithAddressOffsetPatch(RegisterID src, Address address)
984 {
985 DataLabel32 label(this);
986 signExtend32ToPtrWithFixedWidth(address.offset, getCachedMemoryTempRegisterIDAndInvalidate());
987 m_assembler.str<32>(src, address.base, memoryTempRegister, ARM64Assembler::SXTW, 0);
988 return label;
989 }
990
991 void store16(RegisterID src, BaseIndex address)
992 {
993 if (!address.offset && (!address.scale || address.scale == 1)) {
994 m_assembler.strh(src, address.base, address.index, ARM64Assembler::UXTX, address.scale);
995 return;
996 }
997
998 signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
999 m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
1000 m_assembler.strh(src, address.base, memoryTempRegister);
1001 }
1002
1003 void store8(RegisterID src, BaseIndex address)
1004 {
1005 if (!address.offset && !address.scale) {
1006 m_assembler.strb(src, address.base, address.index, ARM64Assembler::UXTX, address.scale);
1007 return;
1008 }
1009
1010 signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1011 m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
1012 m_assembler.strb(src, address.base, memoryTempRegister);
1013 }
1014
1015 void store8(RegisterID src, void* address)
1016 {
1017 move(ImmPtr(address), getCachedMemoryTempRegisterIDAndInvalidate());
1018 m_assembler.strb(src, memoryTempRegister, 0);
1019 }
1020
1021 void store8(TrustedImm32 imm, void* address)
1022 {
1023 if (!imm.m_value) {
1024 store8(ARM64Registers::zr, address);
1025 return;
1026 }
1027
1028 move(imm, getCachedDataTempRegisterIDAndInvalidate());
1029 store8(dataTempRegister, address);
1030 }
1031
1032
1033 // Floating-point operations:
1034
1035 static bool supportsFloatingPoint() { return true; }
1036 static bool supportsFloatingPointTruncate() { return true; }
1037 static bool supportsFloatingPointSqrt() { return true; }
1038 static bool supportsFloatingPointAbs() { return true; }
1039
1040 enum BranchTruncateType { BranchIfTruncateFailed, BranchIfTruncateSuccessful };
1041
1042 void absDouble(FPRegisterID src, FPRegisterID dest)
1043 {
1044 m_assembler.fabs<64>(dest, src);
1045 }
1046
1047 void addDouble(FPRegisterID src, FPRegisterID dest)
1048 {
1049 addDouble(dest, src, dest);
1050 }
1051
1052 void addDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
1053 {
1054 m_assembler.fadd<64>(dest, op1, op2);
1055 }
1056
1057 void addDouble(Address src, FPRegisterID dest)
1058 {
1059 loadDouble(src, fpTempRegister);
1060 addDouble(fpTempRegister, dest);
1061 }
1062
1063 void addDouble(AbsoluteAddress address, FPRegisterID dest)
1064 {
1065 loadDouble(address.m_ptr, fpTempRegister);
1066 addDouble(fpTempRegister, dest);
1067 }
1068
1069 void ceilDouble(FPRegisterID src, FPRegisterID dest)
1070 {
1071 m_assembler.frintp<64>(dest, src);
1072 }
1073
1074 void floorDouble(FPRegisterID src, FPRegisterID dest)
1075 {
1076 m_assembler.frintm<64>(dest, src);
1077 }
1078
1079 // Convert 'src' to an integer, and places the resulting 'dest'.
1080 // If the result is not representable as a 32 bit value, branch.
1081 // May also branch for some values that are representable in 32 bits
1082 // (specifically, in this case, 0).
1083 void branchConvertDoubleToInt32(FPRegisterID src, RegisterID dest, JumpList& failureCases, FPRegisterID, bool negZeroCheck = true)
1084 {
1085 m_assembler.fcvtns<32, 64>(dest, src);
1086
1087 // Convert the integer result back to float & compare to the original value - if not equal or unordered (NaN) then jump.
1088 m_assembler.scvtf<64, 32>(fpTempRegister, dest);
1089 failureCases.append(branchDouble(DoubleNotEqualOrUnordered, src, fpTempRegister));
1090
1091 // If the result is zero, it might have been -0.0, and the double comparison won't catch this!
1092 if (negZeroCheck)
1093 failureCases.append(branchTest32(Zero, dest));
1094 }
1095
1096 Jump branchDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right)
1097 {
1098 m_assembler.fcmp<64>(left, right);
1099
1100 if (cond == DoubleNotEqual) {
1101 // ConditionNE jumps if NotEqual *or* unordered - force the unordered cases not to jump.
1102 Jump unordered = makeBranch(ARM64Assembler::ConditionVS);
1103 Jump result = makeBranch(ARM64Assembler::ConditionNE);
1104 unordered.link(this);
1105 return result;
1106 }
1107 if (cond == DoubleEqualOrUnordered) {
1108 Jump unordered = makeBranch(ARM64Assembler::ConditionVS);
1109 Jump notEqual = makeBranch(ARM64Assembler::ConditionNE);
1110 unordered.link(this);
1111 // We get here if either unordered or equal.
1112 Jump result = jump();
1113 notEqual.link(this);
1114 return result;
1115 }
1116 return makeBranch(cond);
1117 }
1118
1119 Jump branchDoubleNonZero(FPRegisterID reg, FPRegisterID)
1120 {
1121 m_assembler.fcmp_0<64>(reg);
1122 Jump unordered = makeBranch(ARM64Assembler::ConditionVS);
1123 Jump result = makeBranch(ARM64Assembler::ConditionNE);
1124 unordered.link(this);
1125 return result;
1126 }
1127
1128 Jump branchDoubleZeroOrNaN(FPRegisterID reg, FPRegisterID)
1129 {
1130 m_assembler.fcmp_0<64>(reg);
1131 Jump unordered = makeBranch(ARM64Assembler::ConditionVS);
1132 Jump notEqual = makeBranch(ARM64Assembler::ConditionNE);
1133 unordered.link(this);
1134 // We get here if either unordered or equal.
1135 Jump result = jump();
1136 notEqual.link(this);
1137 return result;
1138 }
1139
1140 Jump branchTruncateDoubleToInt32(FPRegisterID src, RegisterID dest, BranchTruncateType branchType = BranchIfTruncateFailed)
1141 {
1142 // Truncate to a 64-bit integer in dataTempRegister, copy the low 32-bit to dest.
1143 m_assembler.fcvtzs<64, 64>(getCachedDataTempRegisterIDAndInvalidate(), src);
1144 zeroExtend32ToPtr(dataTempRegister, dest);
1145 // Check thlow 32-bits sign extend to be equal to the full value.
1146 m_assembler.cmp<64>(dataTempRegister, dataTempRegister, ARM64Assembler::SXTW, 0);
1147 return Jump(makeBranch(branchType == BranchIfTruncateSuccessful ? Equal : NotEqual));
1148 }
1149
1150 Jump branchTruncateDoubleToUint32(FPRegisterID src, RegisterID dest, BranchTruncateType branchType = BranchIfTruncateFailed)
1151 {
1152 // Truncate to a 64-bit integer in dataTempRegister, copy the low 32-bit to dest.
1153 m_assembler.fcvtzs<64, 64>(dest, src);
1154 // Check thlow 32-bits zero extend to be equal to the full value.
1155 m_assembler.cmp<64>(dest, dest, ARM64Assembler::UXTW, 0);
1156 return Jump(makeBranch(branchType == BranchIfTruncateSuccessful ? Equal : NotEqual));
1157 }
1158
1159 void convertDoubleToFloat(FPRegisterID src, FPRegisterID dest)
1160 {
1161 m_assembler.fcvt<32, 64>(dest, src);
1162 }
1163
1164 void convertFloatToDouble(FPRegisterID src, FPRegisterID dest)
1165 {
1166 m_assembler.fcvt<64, 32>(dest, src);
1167 }
1168
1169 void convertInt32ToDouble(TrustedImm32 imm, FPRegisterID dest)
1170 {
1171 move(imm, getCachedDataTempRegisterIDAndInvalidate());
1172 convertInt32ToDouble(dataTempRegister, dest);
1173 }
1174
1175 void convertInt32ToDouble(RegisterID src, FPRegisterID dest)
1176 {
1177 m_assembler.scvtf<64, 32>(dest, src);
1178 }
1179
1180 void convertInt32ToDouble(Address address, FPRegisterID dest)
1181 {
1182 load32(address, getCachedDataTempRegisterIDAndInvalidate());
1183 convertInt32ToDouble(dataTempRegister, dest);
1184 }
1185
1186 void convertInt32ToDouble(AbsoluteAddress address, FPRegisterID dest)
1187 {
1188 load32(address.m_ptr, getCachedDataTempRegisterIDAndInvalidate());
1189 convertInt32ToDouble(dataTempRegister, dest);
1190 }
1191
1192 void divDouble(FPRegisterID src, FPRegisterID dest)
1193 {
1194 divDouble(dest, src, dest);
1195 }
1196
1197 void divDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
1198 {
1199 m_assembler.fdiv<64>(dest, op1, op2);
1200 }
1201
1202 void loadDouble(ImplicitAddress address, FPRegisterID dest)
1203 {
1204 if (tryLoadWithOffset<64>(dest, address.base, address.offset))
1205 return;
1206
1207 signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1208 m_assembler.ldr<64>(dest, address.base, memoryTempRegister);
1209 }
1210
1211 void loadDouble(BaseIndex address, FPRegisterID dest)
1212 {
1213 if (!address.offset && (!address.scale || address.scale == 3)) {
1214 m_assembler.ldr<64>(dest, address.base, address.index, ARM64Assembler::UXTX, address.scale);
1215 return;
1216 }
1217
1218 signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1219 m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
1220 m_assembler.ldr<64>(dest, address.base, memoryTempRegister);
1221 }
1222
1223 void loadDouble(const void* address, FPRegisterID dest)
1224 {
1225 moveToCachedReg(TrustedImmPtr(address), m_cachedMemoryTempRegister);
1226 m_assembler.ldr<64>(dest, memoryTempRegister, ARM64Registers::zr);
1227 }
1228
1229 void loadFloat(BaseIndex address, FPRegisterID dest)
1230 {
1231 if (!address.offset && (!address.scale || address.scale == 2)) {
1232 m_assembler.ldr<32>(dest, address.base, address.index, ARM64Assembler::UXTX, address.scale);
1233 return;
1234 }
1235
1236 signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1237 m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
1238 m_assembler.ldr<32>(dest, address.base, memoryTempRegister);
1239 }
1240
1241 void moveDouble(FPRegisterID src, FPRegisterID dest)
1242 {
1243 m_assembler.fmov<64>(dest, src);
1244 }
1245
1246 void moveDoubleTo64(FPRegisterID src, RegisterID dest)
1247 {
1248 m_assembler.fmov<64>(dest, src);
1249 }
1250
1251 void move64ToDouble(RegisterID src, FPRegisterID dest)
1252 {
1253 m_assembler.fmov<64>(dest, src);
1254 }
1255
1256 void mulDouble(FPRegisterID src, FPRegisterID dest)
1257 {
1258 mulDouble(dest, src, dest);
1259 }
1260
1261 void mulDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
1262 {
1263 m_assembler.fmul<64>(dest, op1, op2);
1264 }
1265
1266 void mulDouble(Address src, FPRegisterID dest)
1267 {
1268 loadDouble(src, fpTempRegister);
1269 mulDouble(fpTempRegister, dest);
1270 }
1271
1272 void negateDouble(FPRegisterID src, FPRegisterID dest)
1273 {
1274 m_assembler.fneg<64>(dest, src);
1275 }
1276
1277 void sqrtDouble(FPRegisterID src, FPRegisterID dest)
1278 {
1279 m_assembler.fsqrt<64>(dest, src);
1280 }
1281
1282 void storeDouble(FPRegisterID src, ImplicitAddress address)
1283 {
1284 if (tryStoreWithOffset<64>(src, address.base, address.offset))
1285 return;
1286
1287 signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1288 m_assembler.str<64>(src, address.base, memoryTempRegister);
1289 }
1290
1291 void storeDouble(FPRegisterID src, const void* address)
1292 {
1293 moveToCachedReg(TrustedImmPtr(address), m_cachedMemoryTempRegister);
1294 m_assembler.str<64>(src, memoryTempRegister, ARM64Registers::zr);
1295 }
1296
1297 void storeDouble(FPRegisterID src, BaseIndex address)
1298 {
1299 if (!address.offset && (!address.scale || address.scale == 3)) {
1300 m_assembler.str<64>(src, address.base, address.index, ARM64Assembler::UXTX, address.scale);
1301 return;
1302 }
1303
1304 signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1305 m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
1306 m_assembler.str<64>(src, address.base, memoryTempRegister);
1307 }
1308
1309 void storeFloat(FPRegisterID src, BaseIndex address)
1310 {
1311 if (!address.offset && (!address.scale || address.scale == 2)) {
1312 m_assembler.str<32>(src, address.base, address.index, ARM64Assembler::UXTX, address.scale);
1313 return;
1314 }
1315
1316 signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1317 m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
1318 m_assembler.str<32>(src, address.base, memoryTempRegister);
1319 }
1320
1321 void subDouble(FPRegisterID src, FPRegisterID dest)
1322 {
1323 subDouble(dest, src, dest);
1324 }
1325
1326 void subDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
1327 {
1328 m_assembler.fsub<64>(dest, op1, op2);
1329 }
1330
1331 void subDouble(Address src, FPRegisterID dest)
1332 {
1333 loadDouble(src, fpTempRegister);
1334 subDouble(fpTempRegister, dest);
1335 }
1336
1337 // Result is undefined if the value is outside of the integer range.
1338 void truncateDoubleToInt32(FPRegisterID src, RegisterID dest)
1339 {
1340 m_assembler.fcvtzs<32, 64>(dest, src);
1341 }
1342
1343 void truncateDoubleToUint32(FPRegisterID src, RegisterID dest)
1344 {
1345 m_assembler.fcvtzu<32, 64>(dest, src);
1346 }
1347
1348
1349 // Stack manipulation operations:
1350 //
1351 // The ABI is assumed to provide a stack abstraction to memory,
1352 // containing machine word sized units of data. Push and pop
1353 // operations add and remove a single register sized unit of data
1354 // to or from the stack. These operations are not supported on
1355 // ARM64. Peek and poke operations read or write values on the
1356 // stack, without moving the current stack position. Additionally,
1357 // there are popToRestore and pushToSave operations, which are
1358 // designed just for quick-and-dirty saving and restoring of
1359 // temporary values. These operations don't claim to have any
1360 // ABI compatibility.
1361
1362 void pop(RegisterID) NO_RETURN_DUE_TO_CRASH
1363 {
1364 CRASH();
1365 }
1366
1367 void push(RegisterID) NO_RETURN_DUE_TO_CRASH
1368 {
1369 CRASH();
1370 }
1371
1372 void push(Address) NO_RETURN_DUE_TO_CRASH
1373 {
1374 CRASH();
1375 }
1376
1377 void push(TrustedImm32) NO_RETURN_DUE_TO_CRASH
1378 {
1379 CRASH();
1380 }
1381
1382 void popToRestore(RegisterID dest)
1383 {
1384 m_assembler.ldr<64>(dest, ARM64Registers::sp, PostIndex(16));
1385 }
1386
1387 void pushToSave(RegisterID src)
1388 {
1389 m_assembler.str<64>(src, ARM64Registers::sp, PreIndex(-16));
1390 }
1391
1392 void pushToSave(Address address)
1393 {
1394 load32(address, getCachedDataTempRegisterIDAndInvalidate());
1395 pushToSave(dataTempRegister);
1396 }
1397
1398 void pushToSave(TrustedImm32 imm)
1399 {
1400 move(imm, getCachedDataTempRegisterIDAndInvalidate());
1401 pushToSave(dataTempRegister);
1402 }
1403
1404 void popToRestore(FPRegisterID dest)
1405 {
1406 loadDouble(stackPointerRegister, dest);
1407 add64(TrustedImm32(16), stackPointerRegister);
1408 }
1409
1410 void pushToSave(FPRegisterID src)
1411 {
1412 sub64(TrustedImm32(16), stackPointerRegister);
1413 storeDouble(src, stackPointerRegister);
1414 }
1415
1416
1417 // Register move operations:
1418
1419 void move(RegisterID src, RegisterID dest)
1420 {
1421 if (src != dest)
1422 m_assembler.mov<64>(dest, src);
1423 }
1424
1425 void move(TrustedImm32 imm, RegisterID dest)
1426 {
1427 moveInternal<TrustedImm32, int32_t>(imm, dest);
1428 }
1429
1430 void move(TrustedImmPtr imm, RegisterID dest)
1431 {
1432 moveInternal<TrustedImmPtr, intptr_t>(imm, dest);
1433 }
1434
1435 void move(TrustedImm64 imm, RegisterID dest)
1436 {
1437 moveInternal<TrustedImm64, int64_t>(imm, dest);
1438 }
1439
1440 void swap(RegisterID reg1, RegisterID reg2)
1441 {
1442 move(reg1, getCachedDataTempRegisterIDAndInvalidate());
1443 move(reg2, reg1);
1444 move(dataTempRegister, reg2);
1445 }
1446
1447 void signExtend32ToPtr(RegisterID src, RegisterID dest)
1448 {
1449 m_assembler.sxtw(dest, src);
1450 }
1451
1452 void zeroExtend32ToPtr(RegisterID src, RegisterID dest)
1453 {
1454 m_assembler.uxtw(dest, src);
1455 }
1456
1457
1458 // Forwards / external control flow operations:
1459 //
1460 // This set of jump and conditional branch operations return a Jump
1461 // object which may linked at a later point, allow forwards jump,
1462 // or jumps that will require external linkage (after the code has been
1463 // relocated).
1464 //
1465 // For branches, signed <, >, <= and >= are denoted as l, g, le, and ge
1466 // respecitvely, for unsigned comparisons the names b, a, be, and ae are
1467 // used (representing the names 'below' and 'above').
1468 //
1469 // Operands to the comparision are provided in the expected order, e.g.
1470 // jle32(reg1, TrustedImm32(5)) will branch if the value held in reg1, when
1471 // treated as a signed 32bit value, is less than or equal to 5.
1472 //
1473 // jz and jnz test whether the first operand is equal to zero, and take
1474 // an optional second operand of a mask under which to perform the test.
1475
1476 Jump branch32(RelationalCondition cond, RegisterID left, RegisterID right)
1477 {
1478 m_assembler.cmp<32>(left, right);
1479 return Jump(makeBranch(cond));
1480 }
1481
1482 Jump branch32(RelationalCondition cond, RegisterID left, TrustedImm32 right)
1483 {
1484 if (isUInt12(right.m_value))
1485 m_assembler.cmp<32>(left, UInt12(right.m_value));
1486 else if (isUInt12(-right.m_value))
1487 m_assembler.cmn<32>(left, UInt12(-right.m_value));
1488 else {
1489 moveToCachedReg(right, m_dataMemoryTempRegister);
1490 m_assembler.cmp<32>(left, dataTempRegister);
1491 }
1492 return Jump(makeBranch(cond));
1493 }
1494
1495 Jump branch32(RelationalCondition cond, RegisterID left, Address right)
1496 {
1497 load32(right, getCachedMemoryTempRegisterIDAndInvalidate());
1498 return branch32(cond, left, memoryTempRegister);
1499 }
1500
1501 Jump branch32(RelationalCondition cond, Address left, RegisterID right)
1502 {
1503 load32(left, getCachedMemoryTempRegisterIDAndInvalidate());
1504 return branch32(cond, memoryTempRegister, right);
1505 }
1506
1507 Jump branch32(RelationalCondition cond, Address left, TrustedImm32 right)
1508 {
1509 load32(left, getCachedMemoryTempRegisterIDAndInvalidate());
1510 return branch32(cond, memoryTempRegister, right);
1511 }
1512
1513 Jump branch32(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
1514 {
1515 load32(left, getCachedMemoryTempRegisterIDAndInvalidate());
1516 return branch32(cond, memoryTempRegister, right);
1517 }
1518
1519 Jump branch32(RelationalCondition cond, AbsoluteAddress left, RegisterID right)
1520 {
12899fa2
A
1521 load32(left.m_ptr, getCachedDataTempRegisterIDAndInvalidate());
1522 return branch32(cond, dataTempRegister, right);
93a37866
A
1523 }
1524
1525 Jump branch32(RelationalCondition cond, AbsoluteAddress left, TrustedImm32 right)
1526 {
1527 load32(left.m_ptr, getCachedMemoryTempRegisterIDAndInvalidate());
1528 return branch32(cond, memoryTempRegister, right);
1529 }
1530
1531 Jump branch64(RelationalCondition cond, RegisterID left, RegisterID right)
1532 {
1533 m_assembler.cmp<64>(left, right);
1534 return Jump(makeBranch(cond));
1535 }
1536
1537 Jump branch64(RelationalCondition cond, RegisterID left, TrustedImm64 right)
1538 {
1539 intptr_t immediate = right.m_value;
1540 if (isUInt12(immediate))
1541 m_assembler.cmp<64>(left, UInt12(static_cast<int32_t>(immediate)));
1542 else if (isUInt12(-immediate))
1543 m_assembler.cmn<64>(left, UInt12(static_cast<int32_t>(-immediate)));
1544 else {
1545 moveToCachedReg(right, m_dataMemoryTempRegister);
1546 m_assembler.cmp<64>(left, dataTempRegister);
1547 }
1548 return Jump(makeBranch(cond));
1549 }
1550
1551 Jump branch64(RelationalCondition cond, RegisterID left, Address right)
1552 {
1553 load64(right, getCachedMemoryTempRegisterIDAndInvalidate());
1554 return branch64(cond, left, memoryTempRegister);
1555 }
1556
1557 Jump branch64(RelationalCondition cond, AbsoluteAddress left, RegisterID right)
1558 {
12899fa2
A
1559 load64(left.m_ptr, getCachedDataTempRegisterIDAndInvalidate());
1560 return branch64(cond, dataTempRegister, right);
93a37866
A
1561 }
1562
1563 Jump branch64(RelationalCondition cond, Address left, RegisterID right)
1564 {
1565 load64(left, getCachedMemoryTempRegisterIDAndInvalidate());
1566 return branch64(cond, memoryTempRegister, right);
1567 }
1568
1569 Jump branch64(RelationalCondition cond, Address left, TrustedImm64 right)
1570 {
1571 load64(left, getCachedMemoryTempRegisterIDAndInvalidate());
1572 return branch64(cond, memoryTempRegister, right);
1573 }
1574
1575 Jump branch8(RelationalCondition cond, Address left, TrustedImm32 right)
1576 {
1577 ASSERT(!(0xffffff00 & right.m_value));
1578 load8(left, getCachedMemoryTempRegisterIDAndInvalidate());
1579 return branch32(cond, memoryTempRegister, right);
1580 }
1581
1582 Jump branch8(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
1583 {
1584 ASSERT(!(0xffffff00 & right.m_value));
1585 load8(left, getCachedMemoryTempRegisterIDAndInvalidate());
1586 return branch32(cond, memoryTempRegister, right);
1587 }
1588
1589 Jump branchTest32(ResultCondition cond, RegisterID reg, RegisterID mask)
1590 {
1591 m_assembler.tst<32>(reg, mask);
1592 return Jump(makeBranch(cond));
1593 }
1594
1595 Jump branchTest32(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
1596 {
1597 if (mask.m_value == -1) {
1598 if ((cond == Zero) || (cond == NonZero))
1599 return Jump(makeCompareAndBranch<32>(static_cast<ZeroCondition>(cond), reg));
1600 m_assembler.tst<32>(reg, reg);
1601 } else if (hasOneBitSet(mask.m_value) && ((cond == Zero) || (cond == NonZero)))
1602 return Jump(makeTestBitAndBranch(reg, getLSBSet(mask.m_value), static_cast<ZeroCondition>(cond)));
1603 else {
1604 if ((cond == Zero) || (cond == NonZero)) {
1605 LogicalImmediate logicalImm = LogicalImmediate::create32(mask.m_value);
1606
1607 if (logicalImm.isValid()) {
1608 m_assembler.tst<32>(reg, logicalImm);
1609 return Jump(makeBranch(cond));
1610 }
1611 }
1612
1613 move(mask, getCachedDataTempRegisterIDAndInvalidate());
1614 m_assembler.tst<32>(reg, dataTempRegister);
1615 }
1616 return Jump(makeBranch(cond));
1617 }
1618
1619 Jump branchTest32(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
1620 {
1621 load32(address, getCachedMemoryTempRegisterIDAndInvalidate());
1622 return branchTest32(cond, memoryTempRegister, mask);
1623 }
1624
1625 Jump branchTest32(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
1626 {
1627 load32(address, getCachedMemoryTempRegisterIDAndInvalidate());
1628 return branchTest32(cond, memoryTempRegister, mask);
1629 }
1630
1631 Jump branchTest64(ResultCondition cond, RegisterID reg, RegisterID mask)
1632 {
1633 m_assembler.tst<64>(reg, mask);
1634 return Jump(makeBranch(cond));
1635 }
1636
1637 Jump branchTest64(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
1638 {
1639 if (mask.m_value == -1) {
1640 if ((cond == Zero) || (cond == NonZero))
1641 return Jump(makeCompareAndBranch<64>(static_cast<ZeroCondition>(cond), reg));
1642 m_assembler.tst<64>(reg, reg);
1643 } else if (hasOneBitSet(mask.m_value) && ((cond == Zero) || (cond == NonZero)))
1644 return Jump(makeTestBitAndBranch(reg, getLSBSet(mask.m_value), static_cast<ZeroCondition>(cond)));
1645 else {
1646 if ((cond == Zero) || (cond == NonZero)) {
1647 LogicalImmediate logicalImm = LogicalImmediate::create64(mask.m_value);
1648
1649 if (logicalImm.isValid()) {
1650 m_assembler.tst<64>(reg, logicalImm);
1651 return Jump(makeBranch(cond));
1652 }
1653 }
1654
1655 signExtend32ToPtr(mask, getCachedDataTempRegisterIDAndInvalidate());
1656 m_assembler.tst<64>(reg, dataTempRegister);
1657 }
1658 return Jump(makeBranch(cond));
1659 }
1660
1661 Jump branchTest64(ResultCondition cond, Address address, RegisterID mask)
1662 {
1663 load64(address, getCachedDataTempRegisterIDAndInvalidate());
1664 return branchTest64(cond, dataTempRegister, mask);
1665 }
1666
1667 Jump branchTest64(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
1668 {
1669 load64(address, getCachedDataTempRegisterIDAndInvalidate());
1670 return branchTest64(cond, dataTempRegister, mask);
1671 }
1672
1673 Jump branchTest64(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
1674 {
1675 load64(address, getCachedDataTempRegisterIDAndInvalidate());
1676 return branchTest64(cond, dataTempRegister, mask);
1677 }
1678
1679 Jump branchTest64(ResultCondition cond, AbsoluteAddress address, TrustedImm32 mask = TrustedImm32(-1))
1680 {
1681 load64(address.m_ptr, getCachedDataTempRegisterIDAndInvalidate());
1682 return branchTest64(cond, dataTempRegister, mask);
1683 }
1684
1685 Jump branchTest8(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
1686 {
1687 load8(address, getCachedDataTempRegisterIDAndInvalidate());
1688 return branchTest32(cond, dataTempRegister, mask);
1689 }
1690
1691 Jump branchTest8(ResultCondition cond, AbsoluteAddress address, TrustedImm32 mask = TrustedImm32(-1))
1692 {
1693 load8(address.m_ptr, getCachedDataTempRegisterIDAndInvalidate());
1694 return branchTest32(cond, dataTempRegister, mask);
1695 }
1696
1697 Jump branchTest8(ResultCondition cond, ExtendedAddress address, TrustedImm32 mask = TrustedImm32(-1))
1698 {
1699 move(ImmPtr(reinterpret_cast<void*>(address.offset)), getCachedDataTempRegisterIDAndInvalidate());
1700 m_assembler.ldrb(dataTempRegister, address.base, dataTempRegister);
1701 return branchTest32(cond, dataTempRegister, mask);
1702 }
1703
1704 Jump branch32WithUnalignedHalfWords(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
1705 {
1706 return branch32(cond, left, right);
1707 }
1708
1709
1710 // Arithmetic control flow operations:
1711 //
1712 // This set of conditional branch operations branch based
1713 // on the result of an arithmetic operation. The operation
1714 // is performed as normal, storing the result.
1715 //
1716 // * jz operations branch if the result is zero.
1717 // * jo operations branch if the (signed) arithmetic
1718 // operation caused an overflow to occur.
1719
1720 Jump branchAdd32(ResultCondition cond, RegisterID op1, RegisterID op2, RegisterID dest)
1721 {
1722 m_assembler.add<32, S>(dest, op1, op2);
1723 return Jump(makeBranch(cond));
1724 }
1725
1726 Jump branchAdd32(ResultCondition cond, RegisterID op1, TrustedImm32 imm, RegisterID dest)
1727 {
1728 if (isUInt12(imm.m_value)) {
1729 m_assembler.add<32, S>(dest, op1, UInt12(imm.m_value));
1730 return Jump(makeBranch(cond));
1731 }
1732 if (isUInt12(-imm.m_value)) {
1733 m_assembler.sub<32, S>(dest, op1, UInt12(-imm.m_value));
1734 return Jump(makeBranch(cond));
1735 }
1736
1737 signExtend32ToPtr(imm, getCachedDataTempRegisterIDAndInvalidate());
1738 return branchAdd32(cond, op1, dataTempRegister, dest);
1739 }
1740
1741 Jump branchAdd32(ResultCondition cond, RegisterID src, RegisterID dest)
1742 {
1743 return branchAdd32(cond, dest, src, dest);
1744 }
1745
1746 Jump branchAdd32(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
1747 {
1748 return branchAdd32(cond, dest, imm, dest);
1749 }
1750
1751 Jump branchAdd32(ResultCondition cond, TrustedImm32 imm, AbsoluteAddress address)
1752 {
1753 load32(address.m_ptr, getCachedDataTempRegisterIDAndInvalidate());
1754
1755 if (isUInt12(imm.m_value)) {
1756 m_assembler.add<32, S>(dataTempRegister, dataTempRegister, UInt12(imm.m_value));
1757 store32(dataTempRegister, address.m_ptr);
1758 } else if (isUInt12(-imm.m_value)) {
1759 m_assembler.sub<32, S>(dataTempRegister, dataTempRegister, UInt12(-imm.m_value));
1760 store32(dataTempRegister, address.m_ptr);
1761 } else {
1762 move(imm, getCachedMemoryTempRegisterIDAndInvalidate());
1763 m_assembler.add<32, S>(dataTempRegister, dataTempRegister, memoryTempRegister);
1764 store32(dataTempRegister, address.m_ptr);
1765 }
1766
1767 return Jump(makeBranch(cond));
1768 }
1769
1770 Jump branchAdd64(ResultCondition cond, RegisterID op1, RegisterID op2, RegisterID dest)
1771 {
1772 m_assembler.add<64, S>(dest, op1, op2);
1773 return Jump(makeBranch(cond));
1774 }
1775
1776 Jump branchAdd64(ResultCondition cond, RegisterID op1, TrustedImm32 imm, RegisterID dest)
1777 {
1778 if (isUInt12(imm.m_value)) {
1779 m_assembler.add<64, S>(dest, op1, UInt12(imm.m_value));
1780 return Jump(makeBranch(cond));
1781 }
1782 if (isUInt12(-imm.m_value)) {
1783 m_assembler.sub<64, S>(dest, op1, UInt12(-imm.m_value));
1784 return Jump(makeBranch(cond));
1785 }
1786
1787 move(imm, getCachedDataTempRegisterIDAndInvalidate());
1788 return branchAdd64(cond, op1, dataTempRegister, dest);
1789 }
1790
1791 Jump branchAdd64(ResultCondition cond, RegisterID src, RegisterID dest)
1792 {
1793 return branchAdd64(cond, dest, src, dest);
1794 }
1795
1796 Jump branchAdd64(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
1797 {
1798 return branchAdd64(cond, dest, imm, dest);
1799 }
1800
1801 Jump branchMul32(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest)
1802 {
1803 ASSERT(cond != Signed);
1804
1805 if (cond != Overflow) {
1806 m_assembler.mul<32>(dest, src1, src2);
1807 return branchTest32(cond, dest);
1808 }
1809
1810 // This is a signed multiple of two 32-bit values, producing a 64-bit result.
1811 m_assembler.smull(dest, src1, src2);
1812 // Copy bits 63..32 of the result to bits 31..0 of dataTempRegister.
1813 m_assembler.asr<64>(getCachedDataTempRegisterIDAndInvalidate(), dest, 32);
1814 // Splat bit 31 of the result to bits 31..0 of memoryTempRegister.
1815 m_assembler.asr<32>(getCachedMemoryTempRegisterIDAndInvalidate(), dest, 31);
1816 // After a mul32 the top 32 bits of the register should be clear.
1817 zeroExtend32ToPtr(dest, dest);
1818 // Check that bits 31..63 of the original result were all equal.
1819 return branch32(NotEqual, memoryTempRegister, dataTempRegister);
1820 }
1821
1822 Jump branchMul32(ResultCondition cond, RegisterID src, RegisterID dest)
1823 {
1824 return branchMul32(cond, dest, src, dest);
1825 }
1826
1827 Jump branchMul32(ResultCondition cond, TrustedImm32 imm, RegisterID src, RegisterID dest)
1828 {
1829 move(imm, getCachedDataTempRegisterIDAndInvalidate());
1830 return branchMul32(cond, dataTempRegister, src, dest);
1831 }
1832
1833 Jump branchNeg32(ResultCondition cond, RegisterID dest)
1834 {
1835 m_assembler.neg<32, S>(dest, dest);
1836 return Jump(makeBranch(cond));
1837 }
1838
1839 Jump branchSub32(ResultCondition cond, RegisterID dest)
1840 {
1841 m_assembler.neg<32, S>(dest, dest);
1842 return Jump(makeBranch(cond));
1843 }
1844
1845 Jump branchSub32(ResultCondition cond, RegisterID op1, RegisterID op2, RegisterID dest)
1846 {
1847 m_assembler.sub<32, S>(dest, op1, op2);
1848 return Jump(makeBranch(cond));
1849 }
1850
1851 Jump branchSub32(ResultCondition cond, RegisterID op1, TrustedImm32 imm, RegisterID dest)
1852 {
1853 if (isUInt12(imm.m_value)) {
1854 m_assembler.sub<32, S>(dest, op1, UInt12(imm.m_value));
1855 return Jump(makeBranch(cond));
1856 }
1857 if (isUInt12(-imm.m_value)) {
1858 m_assembler.add<32, S>(dest, op1, UInt12(-imm.m_value));
1859 return Jump(makeBranch(cond));
1860 }
1861
1862 signExtend32ToPtr(imm, getCachedDataTempRegisterIDAndInvalidate());
1863 return branchSub32(cond, op1, dataTempRegister, dest);
1864 }
1865
1866 Jump branchSub32(ResultCondition cond, RegisterID src, RegisterID dest)
1867 {
1868 return branchSub32(cond, dest, src, dest);
1869 }
1870
1871 Jump branchSub32(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
1872 {
1873 return branchSub32(cond, dest, imm, dest);
1874 }
1875
1876 Jump branchSub64(ResultCondition cond, RegisterID op1, RegisterID op2, RegisterID dest)
1877 {
1878 m_assembler.sub<64, S>(dest, op1, op2);
1879 return Jump(makeBranch(cond));
1880 }
1881
1882 Jump branchSub64(ResultCondition cond, RegisterID op1, TrustedImm32 imm, RegisterID dest)
1883 {
1884 if (isUInt12(imm.m_value)) {
1885 m_assembler.sub<64, S>(dest, op1, UInt12(imm.m_value));
1886 return Jump(makeBranch(cond));
1887 }
1888 if (isUInt12(-imm.m_value)) {
1889 m_assembler.add<64, S>(dest, op1, UInt12(-imm.m_value));
1890 return Jump(makeBranch(cond));
1891 }
1892
1893 move(imm, getCachedDataTempRegisterIDAndInvalidate());
1894 return branchSub64(cond, op1, dataTempRegister, dest);
1895 }
1896
1897 Jump branchSub64(ResultCondition cond, RegisterID src, RegisterID dest)
1898 {
1899 return branchSub64(cond, dest, src, dest);
1900 }
1901
1902 Jump branchSub64(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
1903 {
1904 return branchSub64(cond, dest, imm, dest);
1905 }
1906
1907
1908 // Jumps, calls, returns
1909
1910 ALWAYS_INLINE Call call()
1911 {
1912 AssemblerLabel pointerLabel = m_assembler.label();
1913 moveWithFixedWidth(TrustedImmPtr(0), getCachedDataTempRegisterIDAndInvalidate());
1914 invalidateAllTempRegisters();
1915 m_assembler.blr(dataTempRegister);
1916 AssemblerLabel callLabel = m_assembler.label();
1917 ASSERT_UNUSED(pointerLabel, ARM64Assembler::getDifferenceBetweenLabels(callLabel, pointerLabel) == REPATCH_OFFSET_CALL_TO_POINTER);
1918 return Call(callLabel, Call::Linkable);
1919 }
1920
1921 ALWAYS_INLINE Call call(RegisterID target)
1922 {
1923 invalidateAllTempRegisters();
1924 m_assembler.blr(target);
1925 return Call(m_assembler.label(), Call::None);
1926 }
1927
1928 ALWAYS_INLINE Call call(Address address)
1929 {
1930 load64(address, getCachedDataTempRegisterIDAndInvalidate());
1931 return call(dataTempRegister);
1932 }
1933
1934 ALWAYS_INLINE Jump jump()
1935 {
1936 AssemblerLabel label = m_assembler.label();
1937 m_assembler.b();
1938 return Jump(label, m_makeJumpPatchable ? ARM64Assembler::JumpNoConditionFixedSize : ARM64Assembler::JumpNoCondition);
1939 }
1940
1941 void jump(RegisterID target)
1942 {
1943 m_assembler.br(target);
1944 }
1945
1946 void jump(Address address)
1947 {
1948 load64(address, getCachedDataTempRegisterIDAndInvalidate());
1949 m_assembler.br(dataTempRegister);
1950 }
1951
1952 void jump(AbsoluteAddress address)
1953 {
1954 move(TrustedImmPtr(address.m_ptr), getCachedDataTempRegisterIDAndInvalidate());
1955 load64(Address(dataTempRegister), dataTempRegister);
1956 m_assembler.br(dataTempRegister);
1957 }
1958
1959 ALWAYS_INLINE Call makeTailRecursiveCall(Jump oldJump)
1960 {
1961 oldJump.link(this);
1962 return tailRecursiveCall();
1963 }
1964
1965 ALWAYS_INLINE Call nearCall()
1966 {
1967 m_assembler.bl();
1968 return Call(m_assembler.label(), Call::LinkableNear);
1969 }
1970
1971 ALWAYS_INLINE void ret()
1972 {
1973 m_assembler.ret();
1974 }
1975
1976 ALWAYS_INLINE Call tailRecursiveCall()
1977 {
1978 // Like a normal call, but don't link.
1979 AssemblerLabel pointerLabel = m_assembler.label();
1980 moveWithFixedWidth(TrustedImmPtr(0), getCachedDataTempRegisterIDAndInvalidate());
1981 m_assembler.br(dataTempRegister);
1982 AssemblerLabel callLabel = m_assembler.label();
1983 ASSERT_UNUSED(pointerLabel, ARM64Assembler::getDifferenceBetweenLabels(callLabel, pointerLabel) == REPATCH_OFFSET_CALL_TO_POINTER);
1984 return Call(callLabel, Call::Linkable);
1985 }
1986
1987
1988 // Comparisons operations
1989
1990 void compare32(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID dest)
1991 {
1992 m_assembler.cmp<32>(left, right);
1993 m_assembler.cset<32>(dest, ARM64Condition(cond));
1994 }
1995
1996 void compare32(RelationalCondition cond, Address left, RegisterID right, RegisterID dest)
1997 {
1998 load32(left, getCachedDataTempRegisterIDAndInvalidate());
1999 m_assembler.cmp<32>(dataTempRegister, right);
2000 m_assembler.cset<32>(dest, ARM64Condition(cond));
2001 }
2002
2003 void compare32(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest)
2004 {
2005 move(right, getCachedDataTempRegisterIDAndInvalidate());
2006 m_assembler.cmp<32>(left, dataTempRegister);
2007 m_assembler.cset<32>(dest, ARM64Condition(cond));
2008 }
2009
2010 void compare64(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID dest)
2011 {
2012 m_assembler.cmp<64>(left, right);
2013 m_assembler.cset<32>(dest, ARM64Condition(cond));
2014 }
2015
2016 void compare64(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest)
2017 {
2018 signExtend32ToPtr(right, getCachedDataTempRegisterIDAndInvalidate());
2019 m_assembler.cmp<64>(left, dataTempRegister);
2020 m_assembler.cset<32>(dest, ARM64Condition(cond));
2021 }
2022
2023 void compare8(RelationalCondition cond, Address left, TrustedImm32 right, RegisterID dest)
2024 {
2025 load8(left, getCachedMemoryTempRegisterIDAndInvalidate());
2026 move(right, getCachedDataTempRegisterIDAndInvalidate());
2027 compare32(cond, memoryTempRegister, dataTempRegister, dest);
2028 }
2029
2030 void test32(ResultCondition cond, RegisterID src, TrustedImm32 mask, RegisterID dest)
2031 {
2032 if (mask.m_value == -1)
2033 m_assembler.tst<32>(src, src);
2034 else {
2035 signExtend32ToPtr(mask, getCachedDataTempRegisterIDAndInvalidate());
2036 m_assembler.tst<32>(src, dataTempRegister);
2037 }
2038 m_assembler.cset<32>(dest, ARM64Condition(cond));
2039 }
2040
2041 void test32(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest)
2042 {
2043 load32(address, getCachedDataTempRegisterIDAndInvalidate());
2044 test32(cond, dataTempRegister, mask, dest);
2045 }
2046
2047 void test8(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest)
2048 {
2049 load8(address, getCachedDataTempRegisterIDAndInvalidate());
2050 test32(cond, dataTempRegister, mask, dest);
2051 }
2052
2053 void test64(ResultCondition cond, RegisterID op1, RegisterID op2, RegisterID dest)
2054 {
2055 m_assembler.tst<64>(op1, op2);
2056 m_assembler.cset<32>(dest, ARM64Condition(cond));
2057 }
2058
2059 void test64(ResultCondition cond, RegisterID src, TrustedImm32 mask, RegisterID dest)
2060 {
2061 if (mask.m_value == -1)
2062 m_assembler.tst<64>(src, src);
2063 else {
2064 signExtend32ToPtr(mask, getCachedDataTempRegisterIDAndInvalidate());
2065 m_assembler.tst<64>(src, dataTempRegister);
2066 }
2067 m_assembler.cset<32>(dest, ARM64Condition(cond));
2068 }
2069
2070
2071 // Patchable operations
2072
2073 ALWAYS_INLINE DataLabel32 moveWithPatch(TrustedImm32 imm, RegisterID dest)
2074 {
2075 DataLabel32 label(this);
2076 moveWithFixedWidth(imm, dest);
2077 return label;
2078 }
2079
2080 ALWAYS_INLINE DataLabelPtr moveWithPatch(TrustedImmPtr imm, RegisterID dest)
2081 {
2082 DataLabelPtr label(this);
2083 moveWithFixedWidth(imm, dest);
2084 return label;
2085 }
2086
2087 ALWAYS_INLINE Jump branchPtrWithPatch(RelationalCondition cond, RegisterID left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
2088 {
2089 dataLabel = DataLabelPtr(this);
2090 moveWithPatch(initialRightValue, getCachedDataTempRegisterIDAndInvalidate());
2091 return branch64(cond, left, dataTempRegister);
2092 }
2093
2094 ALWAYS_INLINE Jump branchPtrWithPatch(RelationalCondition cond, Address left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
2095 {
2096 dataLabel = DataLabelPtr(this);
2097 moveWithPatch(initialRightValue, getCachedDataTempRegisterIDAndInvalidate());
2098 return branch64(cond, left, dataTempRegister);
2099 }
2100
2101 PatchableJump patchableBranchPtr(RelationalCondition cond, Address left, TrustedImmPtr right = TrustedImmPtr(0))
2102 {
2103 m_makeJumpPatchable = true;
2104 Jump result = branch32(cond, left, TrustedImm32(right));
2105 m_makeJumpPatchable = false;
2106 return PatchableJump(result);
2107 }
2108
2109 PatchableJump patchableBranchTest32(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
2110 {
2111 m_makeJumpPatchable = true;
2112 Jump result = branchTest32(cond, reg, mask);
2113 m_makeJumpPatchable = false;
2114 return PatchableJump(result);
2115 }
2116
2117 PatchableJump patchableBranch32(RelationalCondition cond, RegisterID reg, TrustedImm32 imm)
2118 {
2119 m_makeJumpPatchable = true;
2120 Jump result = branch32(cond, reg, imm);
2121 m_makeJumpPatchable = false;
2122 return PatchableJump(result);
2123 }
2124
2125 PatchableJump patchableBranchPtrWithPatch(RelationalCondition cond, Address left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
2126 {
2127 m_makeJumpPatchable = true;
2128 Jump result = branchPtrWithPatch(cond, left, dataLabel, initialRightValue);
2129 m_makeJumpPatchable = false;
2130 return PatchableJump(result);
2131 }
2132
2133 PatchableJump patchableJump()
2134 {
2135 m_makeJumpPatchable = true;
2136 Jump result = jump();
2137 m_makeJumpPatchable = false;
2138 return PatchableJump(result);
2139 }
2140
2141 ALWAYS_INLINE DataLabelPtr storePtrWithPatch(TrustedImmPtr initialValue, ImplicitAddress address)
2142 {
2143 DataLabelPtr label(this);
2144 moveWithFixedWidth(initialValue, getCachedDataTempRegisterIDAndInvalidate());
2145 store64(dataTempRegister, address);
2146 return label;
2147 }
2148
2149 ALWAYS_INLINE DataLabelPtr storePtrWithPatch(ImplicitAddress address)
2150 {
2151 return storePtrWithPatch(TrustedImmPtr(0), address);
2152 }
2153
2154 static void reemitInitialMoveWithPatch(void* address, void* value)
2155 {
2156 ARM64Assembler::setPointer(static_cast<int*>(address), value, dataTempRegister, true);
2157 }
2158
2159 // Miscellaneous operations:
2160
2161 void breakpoint(uint16_t imm = 0)
2162 {
2163 m_assembler.brk(imm);
2164 }
2165
2166 void nop()
2167 {
2168 m_assembler.nop();
2169 }
2170
2171
2172 // Misc helper functions.
2173
2174 // Invert a relational condition, e.g. == becomes !=, < becomes >=, etc.
2175 static RelationalCondition invert(RelationalCondition cond)
2176 {
2177 return static_cast<RelationalCondition>(ARM64Assembler::invert(static_cast<ARM64Assembler::Condition>(cond)));
2178 }
2179
2180 static FunctionPtr readCallTarget(CodeLocationCall call)
2181 {
2182 return FunctionPtr(reinterpret_cast<void(*)()>(ARM64Assembler::readCallTarget(call.dataLocation())));
2183 }
2184
2185 static void replaceWithJump(CodeLocationLabel instructionStart, CodeLocationLabel destination)
2186 {
2187 ARM64Assembler::replaceWithJump(instructionStart.dataLocation(), destination.dataLocation());
2188 }
2189
2190 static ptrdiff_t maxJumpReplacementSize()
2191 {
2192 return ARM64Assembler::maxJumpReplacementSize();
2193 }
2194
2195 static bool canJumpReplacePatchableBranchPtrWithPatch() { return false; }
2196
2197 static CodeLocationLabel startOfBranchPtrWithPatchOnRegister(CodeLocationDataLabelPtr label)
2198 {
2199 return label.labelAtOffset(0);
2200 }
2201
2202 static CodeLocationLabel startOfPatchableBranchPtrWithPatchOnAddress(CodeLocationDataLabelPtr)
2203 {
2204 UNREACHABLE_FOR_PLATFORM();
2205 return CodeLocationLabel();
2206 }
2207
2208 static void revertJumpReplacementToBranchPtrWithPatch(CodeLocationLabel instructionStart, RegisterID, void* initialValue)
2209 {
2210 reemitInitialMoveWithPatch(instructionStart.dataLocation(), initialValue);
2211 }
2212
2213 static void revertJumpReplacementToPatchableBranchPtrWithPatch(CodeLocationLabel, Address, void*)
2214 {
2215 UNREACHABLE_FOR_PLATFORM();
2216 }
2217
2218protected:
2219 ALWAYS_INLINE Jump makeBranch(ARM64Assembler::Condition cond)
2220 {
2221 m_assembler.b_cond(cond);
2222 AssemblerLabel label = m_assembler.label();
2223 m_assembler.nop();
2224 return Jump(label, m_makeJumpPatchable ? ARM64Assembler::JumpConditionFixedSize : ARM64Assembler::JumpCondition, cond);
2225 }
2226 ALWAYS_INLINE Jump makeBranch(RelationalCondition cond) { return makeBranch(ARM64Condition(cond)); }
2227 ALWAYS_INLINE Jump makeBranch(ResultCondition cond) { return makeBranch(ARM64Condition(cond)); }
2228 ALWAYS_INLINE Jump makeBranch(DoubleCondition cond) { return makeBranch(ARM64Condition(cond)); }
2229
2230 template <int dataSize>
2231 ALWAYS_INLINE Jump makeCompareAndBranch(ZeroCondition cond, RegisterID reg)
2232 {
2233 if (cond == IsZero)
2234 m_assembler.cbz<dataSize>(reg);
2235 else
2236 m_assembler.cbnz<dataSize>(reg);
2237 AssemblerLabel label = m_assembler.label();
2238 m_assembler.nop();
2239 return Jump(label, m_makeJumpPatchable ? ARM64Assembler::JumpCompareAndBranchFixedSize : ARM64Assembler::JumpCompareAndBranch, static_cast<ARM64Assembler::Condition>(cond), dataSize == 64, reg);
2240 }
2241
2242 ALWAYS_INLINE Jump makeTestBitAndBranch(RegisterID reg, unsigned bit, ZeroCondition cond)
2243 {
2244 ASSERT(bit < 64);
2245 bit &= 0x3f;
2246 if (cond == IsZero)
2247 m_assembler.tbz(reg, bit);
2248 else
2249 m_assembler.tbnz(reg, bit);
2250 AssemblerLabel label = m_assembler.label();
2251 m_assembler.nop();
2252 return Jump(label, m_makeJumpPatchable ? ARM64Assembler::JumpTestBitFixedSize : ARM64Assembler::JumpTestBit, static_cast<ARM64Assembler::Condition>(cond), bit, reg);
2253 }
2254
2255 ARM64Assembler::Condition ARM64Condition(RelationalCondition cond)
2256 {
2257 return static_cast<ARM64Assembler::Condition>(cond);
2258 }
2259
2260 ARM64Assembler::Condition ARM64Condition(ResultCondition cond)
2261 {
2262 return static_cast<ARM64Assembler::Condition>(cond);
2263 }
2264
2265 ARM64Assembler::Condition ARM64Condition(DoubleCondition cond)
2266 {
2267 return static_cast<ARM64Assembler::Condition>(cond);
2268 }
2269
2270private:
2271 ALWAYS_INLINE RegisterID getCachedDataTempRegisterIDAndInvalidate() { return m_dataMemoryTempRegister.registerIDInvalidate(); }
2272 ALWAYS_INLINE RegisterID getCachedMemoryTempRegisterIDAndInvalidate() { return m_cachedMemoryTempRegister.registerIDInvalidate(); }
2273
2274 ALWAYS_INLINE bool isInIntRange(intptr_t value)
2275 {
2276 return value == ((value << 32) >> 32);
2277 }
2278
2279 template<typename ImmediateType, typename rawType>
2280 void moveInternal(ImmediateType imm, RegisterID dest)
2281 {
2282 const int dataSize = sizeof(rawType)*8;
2283 const int numberHalfWords = dataSize/16;
2284 rawType value = bitwise_cast<rawType>(imm.m_value);
2285 uint16_t halfword[numberHalfWords];
2286
2287 // Handle 0 and ~0 here to simplify code below
2288 if (!value) {
2289 m_assembler.movz<dataSize>(dest, 0);
2290 return;
2291 }
2292 if (!~value) {
2293 m_assembler.movn<dataSize>(dest, 0);
2294 return;
2295 }
2296
2297 LogicalImmediate logicalImm = dataSize == 64 ? LogicalImmediate::create64(static_cast<uint64_t>(value)) : LogicalImmediate::create32(static_cast<uint32_t>(value));
2298
2299 if (logicalImm.isValid()) {
2300 m_assembler.movi<dataSize>(dest, logicalImm);
2301 return;
2302 }
2303
2304 // Figure out how many halfwords are 0 or FFFF, then choose movz or movn accordingly.
2305 int zeroOrNegateVote = 0;
2306 for (int i = 0; i < numberHalfWords; ++i) {
2307 halfword[i] = getHalfword(value, i);
2308 if (!halfword[i])
2309 zeroOrNegateVote++;
2310 else if (halfword[i] == 0xffff)
2311 zeroOrNegateVote--;
2312 }
2313
2314 bool needToClearRegister = true;
2315 if (zeroOrNegateVote >= 0) {
2316 for (int i = 0; i < numberHalfWords; i++) {
2317 if (halfword[i]) {
2318 if (needToClearRegister) {
2319 m_assembler.movz<dataSize>(dest, halfword[i], 16*i);
2320 needToClearRegister = false;
2321 } else
2322 m_assembler.movk<dataSize>(dest, halfword[i], 16*i);
2323 }
2324 }
2325 } else {
2326 for (int i = 0; i < numberHalfWords; i++) {
2327 if (halfword[i] != 0xffff) {
2328 if (needToClearRegister) {
2329 m_assembler.movn<dataSize>(dest, ~halfword[i], 16*i);
2330 needToClearRegister = false;
2331 } else
2332 m_assembler.movk<dataSize>(dest, halfword[i], 16*i);
2333 }
2334 }
2335 }
2336 }
2337
2338 template<int datasize>
2339 ALWAYS_INLINE void loadUnsignedImmediate(RegisterID rt, RegisterID rn, unsigned pimm)
2340 {
2341 m_assembler.ldr<datasize>(rt, rn, pimm);
2342 }
2343
2344 template<int datasize>
2345 ALWAYS_INLINE void loadUnscaledImmediate(RegisterID rt, RegisterID rn, int simm)
2346 {
2347 m_assembler.ldur<datasize>(rt, rn, simm);
2348 }
2349
2350 template<int datasize>
2351 ALWAYS_INLINE void storeUnsignedImmediate(RegisterID rt, RegisterID rn, unsigned pimm)
2352 {
2353 m_assembler.str<datasize>(rt, rn, pimm);
2354 }
2355
2356 template<int datasize>
2357 ALWAYS_INLINE void storeUnscaledImmediate(RegisterID rt, RegisterID rn, int simm)
2358 {
2359 m_assembler.stur<datasize>(rt, rn, simm);
2360 }
2361
2362 void moveWithFixedWidth(TrustedImm32 imm, RegisterID dest)
2363 {
2364 int32_t value = imm.m_value;
2365 m_assembler.movz<32>(dest, getHalfword(value, 0));
2366 m_assembler.movk<32>(dest, getHalfword(value, 1), 16);
2367 }
2368
2369 void moveWithFixedWidth(TrustedImmPtr imm, RegisterID dest)
2370 {
2371 intptr_t value = reinterpret_cast<intptr_t>(imm.m_value);
2372 m_assembler.movz<64>(dest, getHalfword(value, 0));
2373 m_assembler.movk<64>(dest, getHalfword(value, 1), 16);
2374 m_assembler.movk<64>(dest, getHalfword(value, 2), 32);
2375 }
2376
2377 void signExtend32ToPtrWithFixedWidth(int32_t value, RegisterID dest)
2378 {
2379 if (value >= 0) {
2380 m_assembler.movz<32>(dest, getHalfword(value, 0));
2381 m_assembler.movk<32>(dest, getHalfword(value, 1), 16);
2382 } else {
2383 m_assembler.movn<32>(dest, ~getHalfword(value, 0));
2384 m_assembler.movk<32>(dest, getHalfword(value, 1), 16);
2385 }
2386 }
2387
2388 void signExtend32ToPtr(TrustedImm32 imm, RegisterID dest)
2389 {
2390 move(TrustedImmPtr(reinterpret_cast<void*>(static_cast<intptr_t>(imm.m_value))), dest);
2391 }
2392
2393 template<int datasize>
2394 ALWAYS_INLINE void load(const void* address, RegisterID dest)
2395 {
2396 intptr_t currentRegisterContents;
2397 if (m_cachedMemoryTempRegister.value(currentRegisterContents)) {
2398 intptr_t addressAsInt = reinterpret_cast<intptr_t>(address);
2399 intptr_t addressDelta = addressAsInt - currentRegisterContents;
2400
12899fa2
A
2401 if (dest == memoryTempRegister)
2402 m_cachedMemoryTempRegister.invalidate();
2403
93a37866
A
2404 if (isInIntRange(addressDelta)) {
2405 if (ARM64Assembler::canEncodeSImmOffset(addressDelta)) {
2406 m_assembler.ldur<datasize>(dest, memoryTempRegister, addressDelta);
2407 return;
2408 }
2409
2410 if (ARM64Assembler::canEncodePImmOffset<datasize>(addressDelta)) {
2411 m_assembler.ldr<datasize>(dest, memoryTempRegister, addressDelta);
2412 return;
2413 }
2414 }
2415
2416 if ((addressAsInt & (~maskHalfWord0)) == (currentRegisterContents & (~maskHalfWord0))) {
2417 m_assembler.movk<64>(memoryTempRegister, addressAsInt & maskHalfWord0, 0);
2418 m_cachedMemoryTempRegister.setValue(reinterpret_cast<intptr_t>(address));
2419 m_assembler.ldr<datasize>(dest, memoryTempRegister, ARM64Registers::zr);
2420 return;
2421 }
2422 }
2423
2424 move(TrustedImmPtr(address), memoryTempRegister);
12899fa2
A
2425 if (dest == memoryTempRegister)
2426 m_cachedMemoryTempRegister.invalidate();
2427 else
2428 m_cachedMemoryTempRegister.setValue(reinterpret_cast<intptr_t>(address));
93a37866
A
2429 m_assembler.ldr<datasize>(dest, memoryTempRegister, ARM64Registers::zr);
2430 }
2431
2432 template<int datasize>
2433 ALWAYS_INLINE void store(RegisterID src, const void* address)
2434 {
2435 intptr_t currentRegisterContents;
2436 if (m_cachedMemoryTempRegister.value(currentRegisterContents)) {
2437 intptr_t addressAsInt = reinterpret_cast<intptr_t>(address);
2438 intptr_t addressDelta = addressAsInt - currentRegisterContents;
2439
2440 if (isInIntRange(addressDelta)) {
2441 if (ARM64Assembler::canEncodeSImmOffset(addressDelta)) {
2442 m_assembler.stur<datasize>(src, memoryTempRegister, addressDelta);
2443 return;
2444 }
2445
2446 if (ARM64Assembler::canEncodePImmOffset<datasize>(addressDelta)) {
2447 m_assembler.str<datasize>(src, memoryTempRegister, addressDelta);
2448 return;
2449 }
2450 }
2451
2452 if ((addressAsInt & (~maskHalfWord0)) == (currentRegisterContents & (~maskHalfWord0))) {
2453 m_assembler.movk<64>(memoryTempRegister, addressAsInt & maskHalfWord0, 0);
2454 m_cachedMemoryTempRegister.setValue(reinterpret_cast<intptr_t>(address));
2455 m_assembler.str<datasize>(src, memoryTempRegister, ARM64Registers::zr);
2456 return;
2457 }
2458 }
2459
2460 move(TrustedImmPtr(address), memoryTempRegister);
2461 m_cachedMemoryTempRegister.setValue(reinterpret_cast<intptr_t>(address));
2462 m_assembler.str<datasize>(src, memoryTempRegister, ARM64Registers::zr);
2463 }
2464
2465 template <int dataSize>
2466 ALWAYS_INLINE bool tryMoveUsingCacheRegisterContents(intptr_t immediate, CachedTempRegister& dest)
2467 {
2468 intptr_t currentRegisterContents;
2469 if (dest.value(currentRegisterContents)) {
2470 if (currentRegisterContents == immediate)
2471 return true;
2472
2473 LogicalImmediate logicalImm = dataSize == 64 ? LogicalImmediate::create64(static_cast<uint64_t>(immediate)) : LogicalImmediate::create32(static_cast<uint32_t>(immediate));
2474
2475 if (logicalImm.isValid()) {
2476 m_assembler.movi<dataSize>(dest.registerIDNoInvalidate(), logicalImm);
2477 dest.setValue(immediate);
2478 return true;
2479 }
2480
2481 if ((immediate & maskUpperWord) == (currentRegisterContents & maskUpperWord)) {
2482 if ((immediate & maskHalfWord1) != (currentRegisterContents & maskHalfWord1))
2483 m_assembler.movk<dataSize>(dest.registerIDNoInvalidate(), (immediate & maskHalfWord1) >> 16, 16);
2484
2485 if ((immediate & maskHalfWord0) != (currentRegisterContents & maskHalfWord0))
2486 m_assembler.movk<dataSize>(dest.registerIDNoInvalidate(), immediate & maskHalfWord0, 0);
2487
2488 dest.setValue(immediate);
2489 return true;
2490 }
2491 }
2492
2493 return false;
2494 }
2495
2496 void moveToCachedReg(TrustedImm32 imm, CachedTempRegister& dest)
2497 {
2498 if (tryMoveUsingCacheRegisterContents<32>(static_cast<intptr_t>(imm.m_value), dest))
2499 return;
2500
2501 moveInternal<TrustedImm32, int32_t>(imm, dest.registerIDNoInvalidate());
2502 dest.setValue(imm.m_value);
2503 }
2504
2505 void moveToCachedReg(TrustedImmPtr imm, CachedTempRegister& dest)
2506 {
2507 if (tryMoveUsingCacheRegisterContents<64>(imm.asIntptr(), dest))
2508 return;
2509
2510 moveInternal<TrustedImmPtr, intptr_t>(imm, dest.registerIDNoInvalidate());
2511 dest.setValue(imm.asIntptr());
2512 }
2513
2514 void moveToCachedReg(TrustedImm64 imm, CachedTempRegister& dest)
2515 {
2516 if (tryMoveUsingCacheRegisterContents<64>(static_cast<intptr_t>(imm.m_value), dest))
2517 return;
2518
2519 moveInternal<TrustedImm64, int64_t>(imm, dest.registerIDNoInvalidate());
2520 dest.setValue(imm.m_value);
2521 }
2522
2523 template<int datasize>
2524 ALWAYS_INLINE bool tryLoadWithOffset(RegisterID rt, RegisterID rn, int32_t offset)
2525 {
2526 if (ARM64Assembler::canEncodeSImmOffset(offset)) {
2527 loadUnscaledImmediate<datasize>(rt, rn, offset);
2528 return true;
2529 }
2530 if (ARM64Assembler::canEncodePImmOffset<datasize>(offset)) {
2531 loadUnsignedImmediate<datasize>(rt, rn, static_cast<unsigned>(offset));
2532 return true;
2533 }
2534 return false;
2535 }
2536
2537 template<int datasize>
2538 ALWAYS_INLINE bool tryLoadWithOffset(FPRegisterID rt, RegisterID rn, int32_t offset)
2539 {
2540 if (ARM64Assembler::canEncodeSImmOffset(offset)) {
2541 m_assembler.ldur<datasize>(rt, rn, offset);
2542 return true;
2543 }
2544 if (ARM64Assembler::canEncodePImmOffset<datasize>(offset)) {
2545 m_assembler.ldr<datasize>(rt, rn, static_cast<unsigned>(offset));
2546 return true;
2547 }
2548 return false;
2549 }
2550
2551 template<int datasize>
2552 ALWAYS_INLINE bool tryStoreWithOffset(RegisterID rt, RegisterID rn, int32_t offset)
2553 {
2554 if (ARM64Assembler::canEncodeSImmOffset(offset)) {
2555 storeUnscaledImmediate<datasize>(rt, rn, offset);
2556 return true;
2557 }
2558 if (ARM64Assembler::canEncodePImmOffset<datasize>(offset)) {
2559 storeUnsignedImmediate<datasize>(rt, rn, static_cast<unsigned>(offset));
2560 return true;
2561 }
2562 return false;
2563 }
2564
2565 template<int datasize>
2566 ALWAYS_INLINE bool tryStoreWithOffset(FPRegisterID rt, RegisterID rn, int32_t offset)
2567 {
2568 if (ARM64Assembler::canEncodeSImmOffset(offset)) {
2569 m_assembler.stur<datasize>(rt, rn, offset);
2570 return true;
2571 }
2572 if (ARM64Assembler::canEncodePImmOffset<datasize>(offset)) {
2573 m_assembler.str<datasize>(rt, rn, static_cast<unsigned>(offset));
2574 return true;
2575 }
2576 return false;
2577 }
2578
2579 friend class LinkBuffer;
2580 friend class RepatchBuffer;
2581
2582 static void linkCall(void* code, Call call, FunctionPtr function)
2583 {
2584 if (call.isFlagSet(Call::Near))
2585 ARM64Assembler::linkCall(code, call.m_label, function.value());
2586 else
2587 ARM64Assembler::linkPointer(code, call.m_label.labelAtOffset(REPATCH_OFFSET_CALL_TO_POINTER), function.value());
2588 }
2589
2590 static void repatchCall(CodeLocationCall call, CodeLocationLabel destination)
2591 {
2592 ARM64Assembler::repatchPointer(call.dataLabelPtrAtOffset(REPATCH_OFFSET_CALL_TO_POINTER).dataLocation(), destination.executableAddress());
2593 }
2594
2595 static void repatchCall(CodeLocationCall call, FunctionPtr destination)
2596 {
2597 ARM64Assembler::repatchPointer(call.dataLabelPtrAtOffset(REPATCH_OFFSET_CALL_TO_POINTER).dataLocation(), destination.executableAddress());
2598 }
2599
2600 CachedTempRegister m_dataMemoryTempRegister;
2601 CachedTempRegister m_cachedMemoryTempRegister;
2602 bool m_makeJumpPatchable;
2603};
2604
2605// Extend the {load,store}{Unsigned,Unscaled}Immediate templated general register methods to cover all load/store sizes
2606template<>
2607ALWAYS_INLINE void MacroAssemblerARM64::loadUnsignedImmediate<8>(RegisterID rt, RegisterID rn, unsigned pimm)
2608{
2609 m_assembler.ldrb(rt, rn, pimm);
2610}
2611
2612template<>
2613ALWAYS_INLINE void MacroAssemblerARM64::loadUnsignedImmediate<16>(RegisterID rt, RegisterID rn, unsigned pimm)
2614{
2615 m_assembler.ldrh(rt, rn, pimm);
2616}
2617
2618template<>
2619ALWAYS_INLINE void MacroAssemblerARM64::loadUnscaledImmediate<8>(RegisterID rt, RegisterID rn, int simm)
2620{
2621 m_assembler.ldurb(rt, rn, simm);
2622}
2623
2624template<>
2625ALWAYS_INLINE void MacroAssemblerARM64::loadUnscaledImmediate<16>(RegisterID rt, RegisterID rn, int simm)
2626{
2627 m_assembler.ldurh(rt, rn, simm);
2628}
2629
2630template<>
2631ALWAYS_INLINE void MacroAssemblerARM64::storeUnsignedImmediate<8>(RegisterID rt, RegisterID rn, unsigned pimm)
2632{
2633 m_assembler.strb(rt, rn, pimm);
2634}
2635
2636template<>
2637ALWAYS_INLINE void MacroAssemblerARM64::storeUnsignedImmediate<16>(RegisterID rt, RegisterID rn, unsigned pimm)
2638{
2639 m_assembler.strh(rt, rn, pimm);
2640}
2641
2642template<>
2643ALWAYS_INLINE void MacroAssemblerARM64::storeUnscaledImmediate<8>(RegisterID rt, RegisterID rn, int simm)
2644{
2645 m_assembler.sturb(rt, rn, simm);
2646}
2647
2648template<>
2649ALWAYS_INLINE void MacroAssemblerARM64::storeUnscaledImmediate<16>(RegisterID rt, RegisterID rn, int simm)
2650{
2651 m_assembler.sturh(rt, rn, simm);
2652}
2653
2654} // namespace JSC
2655
2656#endif // ENABLE(ASSEMBLER)
2657
2658#endif // MacroAssemblerARM64_h