2 * Copyright (C) 2012, 2014 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 #ifndef MacroAssemblerARM64_h
27 #define MacroAssemblerARM64_h
31 #include "ARM64Assembler.h"
32 #include "AbstractMacroAssembler.h"
33 #include <wtf/MathExtras.h>
37 class MacroAssemblerARM64
: public AbstractMacroAssembler
<ARM64Assembler
> {
38 static const RegisterID dataTempRegister
= ARM64Registers::ip0
;
39 static const RegisterID memoryTempRegister
= ARM64Registers::ip1
;
40 static const ARM64Registers::FPRegisterID fpTempRegister
= ARM64Registers::q31
;
41 static const ARM64Assembler::SetFlags S
= ARM64Assembler::S
;
42 static const intptr_t maskHalfWord0
= 0xffffl
;
43 static const intptr_t maskHalfWord1
= 0xffff0000l
;
44 static const intptr_t maskUpperWord
= 0xffffffff00000000l
;
46 // 4 instructions - 3 to load the function pointer, + blr.
47 static const ptrdiff_t REPATCH_OFFSET_CALL_TO_POINTER
= -16;
51 : m_dataMemoryTempRegister(this, dataTempRegister
)
52 , m_cachedMemoryTempRegister(this, memoryTempRegister
)
53 , m_makeJumpPatchable(false)
57 typedef ARM64Assembler::LinkRecord LinkRecord
;
58 typedef ARM64Assembler::JumpType JumpType
;
59 typedef ARM64Assembler::JumpLinkType JumpLinkType
;
60 typedef ARM64Assembler::Condition Condition
;
62 static const ARM64Assembler::Condition DefaultCondition
= ARM64Assembler::ConditionInvalid
;
63 static const ARM64Assembler::JumpType DefaultJump
= ARM64Assembler::JumpNoConditionFixedSize
;
65 Vector
<LinkRecord
, 0, UnsafeVectorOverflow
>& jumpsToLink() { return m_assembler
.jumpsToLink(); }
66 void* unlinkedCode() { return m_assembler
.unlinkedCode(); }
67 static bool canCompact(JumpType jumpType
) { return ARM64Assembler::canCompact(jumpType
); }
68 static JumpLinkType
computeJumpType(JumpType jumpType
, const uint8_t* from
, const uint8_t* to
) { return ARM64Assembler::computeJumpType(jumpType
, from
, to
); }
69 static JumpLinkType
computeJumpType(LinkRecord
& record
, const uint8_t* from
, const uint8_t* to
) { return ARM64Assembler::computeJumpType(record
, from
, to
); }
70 static int jumpSizeDelta(JumpType jumpType
, JumpLinkType jumpLinkType
) { return ARM64Assembler::jumpSizeDelta(jumpType
, jumpLinkType
); }
71 static void link(LinkRecord
& record
, uint8_t* from
, uint8_t* to
) { return ARM64Assembler::link(record
, from
, to
); }
73 static const Scale ScalePtr
= TimesEight
;
75 static bool isCompactPtrAlignedAddressOffset(ptrdiff_t value
)
77 // This is the largest 32-bit access allowed, aligned to 64-bit boundary.
78 return !(value
& ~0x3ff8);
81 enum RelationalCondition
{
82 Equal
= ARM64Assembler::ConditionEQ
,
83 NotEqual
= ARM64Assembler::ConditionNE
,
84 Above
= ARM64Assembler::ConditionHI
,
85 AboveOrEqual
= ARM64Assembler::ConditionHS
,
86 Below
= ARM64Assembler::ConditionLO
,
87 BelowOrEqual
= ARM64Assembler::ConditionLS
,
88 GreaterThan
= ARM64Assembler::ConditionGT
,
89 GreaterThanOrEqual
= ARM64Assembler::ConditionGE
,
90 LessThan
= ARM64Assembler::ConditionLT
,
91 LessThanOrEqual
= ARM64Assembler::ConditionLE
94 enum ResultCondition
{
95 Overflow
= ARM64Assembler::ConditionVS
,
96 Signed
= ARM64Assembler::ConditionMI
,
97 PositiveOrZero
= ARM64Assembler::ConditionPL
,
98 Zero
= ARM64Assembler::ConditionEQ
,
99 NonZero
= ARM64Assembler::ConditionNE
103 IsZero
= ARM64Assembler::ConditionEQ
,
104 IsNonZero
= ARM64Assembler::ConditionNE
107 enum DoubleCondition
{
108 // These conditions will only evaluate to true if the comparison is ordered - i.e. neither operand is NaN.
109 DoubleEqual
= ARM64Assembler::ConditionEQ
,
110 DoubleNotEqual
= ARM64Assembler::ConditionVC
, // Not the right flag! check for this & handle differently.
111 DoubleGreaterThan
= ARM64Assembler::ConditionGT
,
112 DoubleGreaterThanOrEqual
= ARM64Assembler::ConditionGE
,
113 DoubleLessThan
= ARM64Assembler::ConditionLO
,
114 DoubleLessThanOrEqual
= ARM64Assembler::ConditionLS
,
115 // If either operand is NaN, these conditions always evaluate to true.
116 DoubleEqualOrUnordered
= ARM64Assembler::ConditionVS
, // Not the right flag! check for this & handle differently.
117 DoubleNotEqualOrUnordered
= ARM64Assembler::ConditionNE
,
118 DoubleGreaterThanOrUnordered
= ARM64Assembler::ConditionHI
,
119 DoubleGreaterThanOrEqualOrUnordered
= ARM64Assembler::ConditionHS
,
120 DoubleLessThanOrUnordered
= ARM64Assembler::ConditionLT
,
121 DoubleLessThanOrEqualOrUnordered
= ARM64Assembler::ConditionLE
,
124 static const RegisterID stackPointerRegister
= ARM64Registers::sp
;
125 static const RegisterID framePointerRegister
= ARM64Registers::fp
;
126 static const RegisterID linkRegister
= ARM64Registers::lr
;
128 // FIXME: Get reasonable implementations for these
129 static bool shouldBlindForSpecificArch(uint32_t value
) { return value
>= 0x00ffffff; }
130 static bool shouldBlindForSpecificArch(uint64_t value
) { return value
>= 0x00ffffff; }
132 // Integer operations:
134 void add32(RegisterID src
, RegisterID dest
)
136 m_assembler
.add
<32>(dest
, dest
, src
);
139 void add32(TrustedImm32 imm
, RegisterID dest
)
141 add32(imm
, dest
, dest
);
144 void add32(TrustedImm32 imm
, RegisterID src
, RegisterID dest
)
146 if (isUInt12(imm
.m_value
))
147 m_assembler
.add
<32>(dest
, src
, UInt12(imm
.m_value
));
148 else if (isUInt12(-imm
.m_value
))
149 m_assembler
.sub
<32>(dest
, src
, UInt12(-imm
.m_value
));
151 move(imm
, getCachedDataTempRegisterIDAndInvalidate());
152 m_assembler
.add
<32>(dest
, src
, dataTempRegister
);
156 void add32(TrustedImm32 imm
, Address address
)
158 load32(address
, getCachedDataTempRegisterIDAndInvalidate());
160 if (isUInt12(imm
.m_value
))
161 m_assembler
.add
<32>(dataTempRegister
, dataTempRegister
, UInt12(imm
.m_value
));
162 else if (isUInt12(-imm
.m_value
))
163 m_assembler
.sub
<32>(dataTempRegister
, dataTempRegister
, UInt12(-imm
.m_value
));
165 move(imm
, getCachedMemoryTempRegisterIDAndInvalidate());
166 m_assembler
.add
<32>(dataTempRegister
, dataTempRegister
, memoryTempRegister
);
169 store32(dataTempRegister
, address
);
172 void add32(TrustedImm32 imm
, AbsoluteAddress address
)
174 load32(address
.m_ptr
, getCachedDataTempRegisterIDAndInvalidate());
176 if (isUInt12(imm
.m_value
)) {
177 m_assembler
.add
<32>(dataTempRegister
, dataTempRegister
, UInt12(imm
.m_value
));
178 store32(dataTempRegister
, address
.m_ptr
);
182 if (isUInt12(-imm
.m_value
)) {
183 m_assembler
.sub
<32>(dataTempRegister
, dataTempRegister
, UInt12(-imm
.m_value
));
184 store32(dataTempRegister
, address
.m_ptr
);
188 move(imm
, getCachedMemoryTempRegisterIDAndInvalidate());
189 m_assembler
.add
<32>(dataTempRegister
, dataTempRegister
, memoryTempRegister
);
190 store32(dataTempRegister
, address
.m_ptr
);
193 void add32(Address src
, RegisterID dest
)
195 load32(src
, getCachedDataTempRegisterIDAndInvalidate());
196 add32(dataTempRegister
, dest
);
199 void add64(RegisterID src
, RegisterID dest
)
201 if (src
== ARM64Registers::sp
)
202 m_assembler
.add
<64>(dest
, src
, dest
);
204 m_assembler
.add
<64>(dest
, dest
, src
);
207 void add64(TrustedImm32 imm
, RegisterID dest
)
209 if (isUInt12(imm
.m_value
)) {
210 m_assembler
.add
<64>(dest
, dest
, UInt12(imm
.m_value
));
213 if (isUInt12(-imm
.m_value
)) {
214 m_assembler
.sub
<64>(dest
, dest
, UInt12(-imm
.m_value
));
218 signExtend32ToPtr(imm
, getCachedDataTempRegisterIDAndInvalidate());
219 m_assembler
.add
<64>(dest
, dest
, dataTempRegister
);
222 void add64(TrustedImm64 imm
, RegisterID dest
)
224 intptr_t immediate
= imm
.m_value
;
226 if (isUInt12(immediate
)) {
227 m_assembler
.add
<64>(dest
, dest
, UInt12(static_cast<int32_t>(immediate
)));
230 if (isUInt12(-immediate
)) {
231 m_assembler
.sub
<64>(dest
, dest
, UInt12(static_cast<int32_t>(-immediate
)));
235 move(imm
, getCachedDataTempRegisterIDAndInvalidate());
236 m_assembler
.add
<64>(dest
, dest
, dataTempRegister
);
239 void add64(TrustedImm32 imm
, RegisterID src
, RegisterID dest
)
241 if (isUInt12(imm
.m_value
)) {
242 m_assembler
.add
<64>(dest
, src
, UInt12(imm
.m_value
));
245 if (isUInt12(-imm
.m_value
)) {
246 m_assembler
.sub
<64>(dest
, src
, UInt12(-imm
.m_value
));
250 signExtend32ToPtr(imm
, getCachedDataTempRegisterIDAndInvalidate());
251 m_assembler
.add
<64>(dest
, src
, dataTempRegister
);
254 void add64(TrustedImm32 imm
, Address address
)
256 load64(address
, getCachedDataTempRegisterIDAndInvalidate());
258 if (isUInt12(imm
.m_value
))
259 m_assembler
.add
<64>(dataTempRegister
, dataTempRegister
, UInt12(imm
.m_value
));
260 else if (isUInt12(-imm
.m_value
))
261 m_assembler
.sub
<64>(dataTempRegister
, dataTempRegister
, UInt12(-imm
.m_value
));
263 signExtend32ToPtr(imm
, getCachedMemoryTempRegisterIDAndInvalidate());
264 m_assembler
.add
<64>(dataTempRegister
, dataTempRegister
, memoryTempRegister
);
267 store64(dataTempRegister
, address
);
270 void add64(TrustedImm32 imm
, AbsoluteAddress address
)
272 load64(address
.m_ptr
, getCachedDataTempRegisterIDAndInvalidate());
274 if (isUInt12(imm
.m_value
)) {
275 m_assembler
.add
<64>(dataTempRegister
, dataTempRegister
, UInt12(imm
.m_value
));
276 store64(dataTempRegister
, address
.m_ptr
);
280 if (isUInt12(-imm
.m_value
)) {
281 m_assembler
.sub
<64>(dataTempRegister
, dataTempRegister
, UInt12(-imm
.m_value
));
282 store64(dataTempRegister
, address
.m_ptr
);
286 signExtend32ToPtr(imm
, getCachedMemoryTempRegisterIDAndInvalidate());
287 m_assembler
.add
<64>(dataTempRegister
, dataTempRegister
, memoryTempRegister
);
288 store64(dataTempRegister
, address
.m_ptr
);
291 void addPtrNoFlags(TrustedImm32 imm
, RegisterID srcDest
)
296 void add64(Address src
, RegisterID dest
)
298 load64(src
, getCachedDataTempRegisterIDAndInvalidate());
299 m_assembler
.add
<64>(dest
, dest
, dataTempRegister
);
302 void add64(AbsoluteAddress src
, RegisterID dest
)
304 load64(src
.m_ptr
, getCachedDataTempRegisterIDAndInvalidate());
305 m_assembler
.add
<64>(dest
, dest
, dataTempRegister
);
308 void and32(RegisterID src
, RegisterID dest
)
310 and32(dest
, src
, dest
);
313 void and32(RegisterID op1
, RegisterID op2
, RegisterID dest
)
315 m_assembler
.and_
<32>(dest
, op1
, op2
);
318 void and32(TrustedImm32 imm
, RegisterID dest
)
320 and32(imm
, dest
, dest
);
323 void and32(TrustedImm32 imm
, RegisterID src
, RegisterID dest
)
325 LogicalImmediate logicalImm
= LogicalImmediate::create32(imm
.m_value
);
327 if (logicalImm
.isValid()) {
328 m_assembler
.and_
<32>(dest
, src
, logicalImm
);
332 move(imm
, getCachedDataTempRegisterIDAndInvalidate());
333 m_assembler
.and_
<32>(dest
, src
, dataTempRegister
);
336 void and32(Address src
, RegisterID dest
)
338 load32(src
, dataTempRegister
);
339 and32(dataTempRegister
, dest
);
342 void and64(RegisterID src
, RegisterID dest
)
344 m_assembler
.and_
<64>(dest
, dest
, src
);
347 void and64(TrustedImm32 imm
, RegisterID dest
)
349 LogicalImmediate logicalImm
= LogicalImmediate::create64(static_cast<intptr_t>(static_cast<int64_t>(imm
.m_value
)));
351 if (logicalImm
.isValid()) {
352 m_assembler
.and_
<64>(dest
, dest
, logicalImm
);
356 signExtend32ToPtr(imm
, getCachedDataTempRegisterIDAndInvalidate());
357 m_assembler
.and_
<64>(dest
, dest
, dataTempRegister
);
360 void and64(TrustedImmPtr imm
, RegisterID dest
)
362 LogicalImmediate logicalImm
= LogicalImmediate::create64(reinterpret_cast<uint64_t>(imm
.m_value
));
364 if (logicalImm
.isValid()) {
365 m_assembler
.and_
<64>(dest
, dest
, logicalImm
);
369 move(imm
, getCachedDataTempRegisterIDAndInvalidate());
370 m_assembler
.and_
<64>(dest
, dest
, dataTempRegister
);
373 void countLeadingZeros32(RegisterID src
, RegisterID dest
)
375 m_assembler
.clz
<32>(dest
, src
);
378 void lshift32(RegisterID src
, RegisterID shiftAmount
, RegisterID dest
)
380 m_assembler
.lsl
<32>(dest
, src
, shiftAmount
);
383 void lshift32(RegisterID src
, TrustedImm32 imm
, RegisterID dest
)
385 m_assembler
.lsl
<32>(dest
, src
, imm
.m_value
& 0x1f);
388 void lshift32(RegisterID shiftAmount
, RegisterID dest
)
390 lshift32(dest
, shiftAmount
, dest
);
393 void lshift32(TrustedImm32 imm
, RegisterID dest
)
395 lshift32(dest
, imm
, dest
);
398 void lshift64(RegisterID src
, RegisterID shiftAmount
, RegisterID dest
)
400 m_assembler
.lsl
<64>(dest
, src
, shiftAmount
);
403 void lshift64(RegisterID src
, TrustedImm32 imm
, RegisterID dest
)
405 m_assembler
.lsl
<64>(dest
, src
, imm
.m_value
& 0x3f);
408 void lshift64(RegisterID shiftAmount
, RegisterID dest
)
410 lshift64(dest
, shiftAmount
, dest
);
413 void lshift64(TrustedImm32 imm
, RegisterID dest
)
415 lshift64(dest
, imm
, dest
);
418 void mul32(RegisterID src
, RegisterID dest
)
420 m_assembler
.mul
<32>(dest
, dest
, src
);
423 void mul64(RegisterID src
, RegisterID dest
)
425 m_assembler
.mul
<64>(dest
, dest
, src
);
428 void mul32(TrustedImm32 imm
, RegisterID src
, RegisterID dest
)
430 move(imm
, getCachedDataTempRegisterIDAndInvalidate());
431 m_assembler
.mul
<32>(dest
, src
, dataTempRegister
);
434 void neg32(RegisterID dest
)
436 m_assembler
.neg
<32>(dest
, dest
);
439 void neg64(RegisterID dest
)
441 m_assembler
.neg
<64>(dest
, dest
);
444 void or32(RegisterID src
, RegisterID dest
)
446 or32(dest
, src
, dest
);
449 void or32(RegisterID op1
, RegisterID op2
, RegisterID dest
)
451 m_assembler
.orr
<32>(dest
, op1
, op2
);
454 void or32(TrustedImm32 imm
, RegisterID dest
)
456 or32(imm
, dest
, dest
);
459 void or32(TrustedImm32 imm
, RegisterID src
, RegisterID dest
)
461 LogicalImmediate logicalImm
= LogicalImmediate::create32(imm
.m_value
);
463 if (logicalImm
.isValid()) {
464 m_assembler
.orr
<32>(dest
, src
, logicalImm
);
468 move(imm
, getCachedDataTempRegisterIDAndInvalidate());
469 m_assembler
.orr
<32>(dest
, src
, dataTempRegister
);
472 void or32(RegisterID src
, AbsoluteAddress address
)
474 load32(address
.m_ptr
, getCachedDataTempRegisterIDAndInvalidate());
475 m_assembler
.orr
<32>(dataTempRegister
, dataTempRegister
, src
);
476 store32(dataTempRegister
, address
.m_ptr
);
479 void or32(TrustedImm32 imm
, Address address
)
481 load32(address
, getCachedDataTempRegisterIDAndInvalidate());
482 or32(imm
, dataTempRegister
, dataTempRegister
);
483 store32(dataTempRegister
, address
);
486 void or64(RegisterID src
, RegisterID dest
)
488 or64(dest
, src
, dest
);
491 void or64(RegisterID op1
, RegisterID op2
, RegisterID dest
)
493 m_assembler
.orr
<64>(dest
, op1
, op2
);
496 void or64(TrustedImm32 imm
, RegisterID dest
)
498 or64(imm
, dest
, dest
);
501 void or64(TrustedImm32 imm
, RegisterID src
, RegisterID dest
)
503 LogicalImmediate logicalImm
= LogicalImmediate::create64(static_cast<intptr_t>(static_cast<int64_t>(imm
.m_value
)));
505 if (logicalImm
.isValid()) {
506 m_assembler
.orr
<64>(dest
, src
, logicalImm
);
510 signExtend32ToPtr(imm
, getCachedDataTempRegisterIDAndInvalidate());
511 m_assembler
.orr
<64>(dest
, src
, dataTempRegister
);
514 void or64(TrustedImm64 imm
, RegisterID dest
)
516 LogicalImmediate logicalImm
= LogicalImmediate::create64(static_cast<intptr_t>(static_cast<int64_t>(imm
.m_value
)));
518 if (logicalImm
.isValid()) {
519 m_assembler
.orr
<64>(dest
, dest
, logicalImm
);
523 move(imm
, getCachedDataTempRegisterIDAndInvalidate());
524 m_assembler
.orr
<64>(dest
, dest
, dataTempRegister
);
527 void rotateRight64(TrustedImm32 imm
, RegisterID srcDst
)
529 m_assembler
.ror
<64>(srcDst
, srcDst
, imm
.m_value
& 63);
532 void rshift32(RegisterID src
, RegisterID shiftAmount
, RegisterID dest
)
534 m_assembler
.asr
<32>(dest
, src
, shiftAmount
);
537 void rshift32(RegisterID src
, TrustedImm32 imm
, RegisterID dest
)
539 m_assembler
.asr
<32>(dest
, src
, imm
.m_value
& 0x1f);
542 void rshift32(RegisterID shiftAmount
, RegisterID dest
)
544 rshift32(dest
, shiftAmount
, dest
);
547 void rshift32(TrustedImm32 imm
, RegisterID dest
)
549 rshift32(dest
, imm
, dest
);
552 void rshift64(RegisterID src
, RegisterID shiftAmount
, RegisterID dest
)
554 m_assembler
.asr
<64>(dest
, src
, shiftAmount
);
557 void rshift64(RegisterID src
, TrustedImm32 imm
, RegisterID dest
)
559 m_assembler
.asr
<64>(dest
, src
, imm
.m_value
& 0x3f);
562 void rshift64(RegisterID shiftAmount
, RegisterID dest
)
564 rshift64(dest
, shiftAmount
, dest
);
567 void rshift64(TrustedImm32 imm
, RegisterID dest
)
569 rshift64(dest
, imm
, dest
);
572 void sub32(RegisterID src
, RegisterID dest
)
574 m_assembler
.sub
<32>(dest
, dest
, src
);
577 void sub32(TrustedImm32 imm
, RegisterID dest
)
579 if (isUInt12(imm
.m_value
)) {
580 m_assembler
.sub
<32>(dest
, dest
, UInt12(imm
.m_value
));
583 if (isUInt12(-imm
.m_value
)) {
584 m_assembler
.add
<32>(dest
, dest
, UInt12(-imm
.m_value
));
588 move(imm
, getCachedDataTempRegisterIDAndInvalidate());
589 m_assembler
.sub
<32>(dest
, dest
, dataTempRegister
);
592 void sub32(TrustedImm32 imm
, Address address
)
594 load32(address
, getCachedDataTempRegisterIDAndInvalidate());
596 if (isUInt12(imm
.m_value
))
597 m_assembler
.sub
<32>(dataTempRegister
, dataTempRegister
, UInt12(imm
.m_value
));
598 else if (isUInt12(-imm
.m_value
))
599 m_assembler
.add
<32>(dataTempRegister
, dataTempRegister
, UInt12(-imm
.m_value
));
601 move(imm
, getCachedMemoryTempRegisterIDAndInvalidate());
602 m_assembler
.sub
<32>(dataTempRegister
, dataTempRegister
, memoryTempRegister
);
605 store32(dataTempRegister
, address
);
608 void sub32(TrustedImm32 imm
, AbsoluteAddress address
)
610 load32(address
.m_ptr
, getCachedDataTempRegisterIDAndInvalidate());
612 if (isUInt12(imm
.m_value
)) {
613 m_assembler
.sub
<32>(dataTempRegister
, dataTempRegister
, UInt12(imm
.m_value
));
614 store32(dataTempRegister
, address
.m_ptr
);
618 if (isUInt12(-imm
.m_value
)) {
619 m_assembler
.add
<32>(dataTempRegister
, dataTempRegister
, UInt12(-imm
.m_value
));
620 store32(dataTempRegister
, address
.m_ptr
);
624 move(imm
, getCachedMemoryTempRegisterIDAndInvalidate());
625 m_assembler
.sub
<32>(dataTempRegister
, dataTempRegister
, memoryTempRegister
);
626 store32(dataTempRegister
, address
.m_ptr
);
629 void sub32(Address src
, RegisterID dest
)
631 load32(src
, getCachedDataTempRegisterIDAndInvalidate());
632 sub32(dataTempRegister
, dest
);
635 void sub64(RegisterID src
, RegisterID dest
)
637 m_assembler
.sub
<64>(dest
, dest
, src
);
640 void sub64(TrustedImm32 imm
, RegisterID dest
)
642 if (isUInt12(imm
.m_value
)) {
643 m_assembler
.sub
<64>(dest
, dest
, UInt12(imm
.m_value
));
646 if (isUInt12(-imm
.m_value
)) {
647 m_assembler
.add
<64>(dest
, dest
, UInt12(-imm
.m_value
));
651 signExtend32ToPtr(imm
, getCachedDataTempRegisterIDAndInvalidate());
652 m_assembler
.sub
<64>(dest
, dest
, dataTempRegister
);
655 void sub64(TrustedImm64 imm
, RegisterID dest
)
657 intptr_t immediate
= imm
.m_value
;
659 if (isUInt12(immediate
)) {
660 m_assembler
.sub
<64>(dest
, dest
, UInt12(static_cast<int32_t>(immediate
)));
663 if (isUInt12(-immediate
)) {
664 m_assembler
.add
<64>(dest
, dest
, UInt12(static_cast<int32_t>(-immediate
)));
668 move(imm
, getCachedDataTempRegisterIDAndInvalidate());
669 m_assembler
.sub
<64>(dest
, dest
, dataTempRegister
);
672 void urshift32(RegisterID src
, RegisterID shiftAmount
, RegisterID dest
)
674 m_assembler
.lsr
<32>(dest
, src
, shiftAmount
);
677 void urshift32(RegisterID src
, TrustedImm32 imm
, RegisterID dest
)
679 m_assembler
.lsr
<32>(dest
, src
, imm
.m_value
& 0x1f);
682 void urshift32(RegisterID shiftAmount
, RegisterID dest
)
684 urshift32(dest
, shiftAmount
, dest
);
687 void urshift32(TrustedImm32 imm
, RegisterID dest
)
689 urshift32(dest
, imm
, dest
);
692 void xor32(RegisterID src
, RegisterID dest
)
694 xor32(dest
, src
, dest
);
697 void xor32(RegisterID op1
, RegisterID op2
, RegisterID dest
)
699 m_assembler
.eor
<32>(dest
, op1
, op2
);
702 void xor32(TrustedImm32 imm
, RegisterID dest
)
704 xor32(imm
, dest
, dest
);
707 void xor32(TrustedImm32 imm
, RegisterID src
, RegisterID dest
)
709 if (imm
.m_value
== -1)
710 m_assembler
.mvn
<32>(dest
, src
);
712 LogicalImmediate logicalImm
= LogicalImmediate::create32(imm
.m_value
);
714 if (logicalImm
.isValid()) {
715 m_assembler
.eor
<32>(dest
, src
, logicalImm
);
719 move(imm
, getCachedDataTempRegisterIDAndInvalidate());
720 m_assembler
.eor
<32>(dest
, src
, dataTempRegister
);
724 void xor64(RegisterID src
, Address address
)
726 load64(address
, getCachedDataTempRegisterIDAndInvalidate());
727 m_assembler
.eor
<64>(dataTempRegister
, dataTempRegister
, src
);
728 store64(dataTempRegister
, address
);
731 void xor64(RegisterID src
, RegisterID dest
)
733 xor64(dest
, src
, dest
);
736 void xor64(RegisterID op1
, RegisterID op2
, RegisterID dest
)
738 m_assembler
.eor
<64>(dest
, op1
, op2
);
741 void xor64(TrustedImm32 imm
, RegisterID dest
)
743 xor64(imm
, dest
, dest
);
746 void xor64(TrustedImm32 imm
, RegisterID src
, RegisterID dest
)
748 if (imm
.m_value
== -1)
749 m_assembler
.mvn
<64>(dest
, src
);
751 LogicalImmediate logicalImm
= LogicalImmediate::create64(static_cast<intptr_t>(static_cast<int64_t>(imm
.m_value
)));
753 if (logicalImm
.isValid()) {
754 m_assembler
.eor
<64>(dest
, src
, logicalImm
);
758 signExtend32ToPtr(imm
, getCachedDataTempRegisterIDAndInvalidate());
759 m_assembler
.eor
<64>(dest
, src
, dataTempRegister
);
764 // Memory access operations:
766 void load64(ImplicitAddress address
, RegisterID dest
)
768 if (tryLoadWithOffset
<64>(dest
, address
.base
, address
.offset
))
771 signExtend32ToPtr(TrustedImm32(address
.offset
), getCachedMemoryTempRegisterIDAndInvalidate());
772 m_assembler
.ldr
<64>(dest
, address
.base
, memoryTempRegister
);
775 void load64(BaseIndex address
, RegisterID dest
)
777 if (!address
.offset
&& (!address
.scale
|| address
.scale
== 3)) {
778 m_assembler
.ldr
<64>(dest
, address
.base
, address
.index
, ARM64Assembler::UXTX
, address
.scale
);
782 signExtend32ToPtr(TrustedImm32(address
.offset
), getCachedMemoryTempRegisterIDAndInvalidate());
783 m_assembler
.add
<64>(memoryTempRegister
, memoryTempRegister
, address
.index
, ARM64Assembler::UXTX
, address
.scale
);
784 m_assembler
.ldr
<64>(dest
, address
.base
, memoryTempRegister
);
787 void load64(const void* address
, RegisterID dest
)
789 load
<64>(address
, dest
);
792 DataLabel32
load64WithAddressOffsetPatch(Address address
, RegisterID dest
)
794 DataLabel32
label(this);
795 signExtend32ToPtrWithFixedWidth(address
.offset
, getCachedMemoryTempRegisterIDAndInvalidate());
796 m_assembler
.ldr
<64>(dest
, address
.base
, memoryTempRegister
, ARM64Assembler::SXTW
, 0);
800 DataLabelCompact
load64WithCompactAddressOffsetPatch(Address address
, RegisterID dest
)
802 ASSERT(isCompactPtrAlignedAddressOffset(address
.offset
));
803 DataLabelCompact
label(this);
804 m_assembler
.ldr
<64>(dest
, address
.base
, address
.offset
);
808 void abortWithReason(AbortReason reason
)
810 move(TrustedImm32(reason
), dataTempRegister
);
814 void abortWithReason(AbortReason reason
, intptr_t misc
)
816 move(TrustedImm64(misc
), memoryTempRegister
);
817 abortWithReason(reason
);
820 ConvertibleLoadLabel
convertibleLoadPtr(Address address
, RegisterID dest
)
822 ConvertibleLoadLabel
result(this);
823 ASSERT(!(address
.offset
& ~0xff8));
824 m_assembler
.ldr
<64>(dest
, address
.base
, address
.offset
);
828 void load32(ImplicitAddress address
, RegisterID dest
)
830 if (tryLoadWithOffset
<32>(dest
, address
.base
, address
.offset
))
833 signExtend32ToPtr(TrustedImm32(address
.offset
), getCachedMemoryTempRegisterIDAndInvalidate());
834 m_assembler
.ldr
<32>(dest
, address
.base
, memoryTempRegister
);
837 void load32(BaseIndex address
, RegisterID dest
)
839 if (!address
.offset
&& (!address
.scale
|| address
.scale
== 2)) {
840 m_assembler
.ldr
<32>(dest
, address
.base
, address
.index
, ARM64Assembler::UXTX
, address
.scale
);
844 signExtend32ToPtr(TrustedImm32(address
.offset
), getCachedMemoryTempRegisterIDAndInvalidate());
845 m_assembler
.add
<64>(memoryTempRegister
, memoryTempRegister
, address
.index
, ARM64Assembler::UXTX
, address
.scale
);
846 m_assembler
.ldr
<32>(dest
, address
.base
, memoryTempRegister
);
849 void load32(const void* address
, RegisterID dest
)
851 load
<32>(address
, dest
);
854 DataLabel32
load32WithAddressOffsetPatch(Address address
, RegisterID dest
)
856 DataLabel32
label(this);
857 signExtend32ToPtrWithFixedWidth(address
.offset
, getCachedMemoryTempRegisterIDAndInvalidate());
858 m_assembler
.ldr
<32>(dest
, address
.base
, memoryTempRegister
, ARM64Assembler::SXTW
, 0);
862 DataLabelCompact
load32WithCompactAddressOffsetPatch(Address address
, RegisterID dest
)
864 ASSERT(isCompactPtrAlignedAddressOffset(address
.offset
));
865 DataLabelCompact
label(this);
866 m_assembler
.ldr
<32>(dest
, address
.base
, address
.offset
);
870 void load32WithUnalignedHalfWords(BaseIndex address
, RegisterID dest
)
872 load32(address
, dest
);
875 void load16(ImplicitAddress address
, RegisterID dest
)
877 if (tryLoadWithOffset
<16>(dest
, address
.base
, address
.offset
))
880 signExtend32ToPtr(TrustedImm32(address
.offset
), getCachedMemoryTempRegisterIDAndInvalidate());
881 m_assembler
.ldrh(dest
, address
.base
, memoryTempRegister
);
884 void load16(BaseIndex address
, RegisterID dest
)
886 if (!address
.offset
&& (!address
.scale
|| address
.scale
== 1)) {
887 m_assembler
.ldrh(dest
, address
.base
, address
.index
, ARM64Assembler::UXTX
, address
.scale
);
891 signExtend32ToPtr(TrustedImm32(address
.offset
), getCachedMemoryTempRegisterIDAndInvalidate());
892 m_assembler
.add
<64>(memoryTempRegister
, memoryTempRegister
, address
.index
, ARM64Assembler::UXTX
, address
.scale
);
893 m_assembler
.ldrh(dest
, address
.base
, memoryTempRegister
);
896 void load16Unaligned(BaseIndex address
, RegisterID dest
)
898 load16(address
, dest
);
901 void load16Signed(BaseIndex address
, RegisterID dest
)
903 if (!address
.offset
&& (!address
.scale
|| address
.scale
== 1)) {
904 m_assembler
.ldrsh
<64>(dest
, address
.base
, address
.index
, ARM64Assembler::UXTX
, address
.scale
);
908 signExtend32ToPtr(TrustedImm32(address
.offset
), getCachedMemoryTempRegisterIDAndInvalidate());
909 m_assembler
.add
<64>(memoryTempRegister
, memoryTempRegister
, address
.index
, ARM64Assembler::UXTX
, address
.scale
);
910 m_assembler
.ldrsh
<64>(dest
, address
.base
, memoryTempRegister
);
913 void load8(ImplicitAddress address
, RegisterID dest
)
915 if (tryLoadWithOffset
<8>(dest
, address
.base
, address
.offset
))
918 signExtend32ToPtr(TrustedImm32(address
.offset
), getCachedMemoryTempRegisterIDAndInvalidate());
919 m_assembler
.ldrb(dest
, address
.base
, memoryTempRegister
);
922 void load8(BaseIndex address
, RegisterID dest
)
924 if (!address
.offset
&& !address
.scale
) {
925 m_assembler
.ldrb(dest
, address
.base
, address
.index
, ARM64Assembler::UXTX
, address
.scale
);
929 signExtend32ToPtr(TrustedImm32(address
.offset
), getCachedMemoryTempRegisterIDAndInvalidate());
930 m_assembler
.add
<64>(memoryTempRegister
, memoryTempRegister
, address
.index
, ARM64Assembler::UXTX
, address
.scale
);
931 m_assembler
.ldrb(dest
, address
.base
, memoryTempRegister
);
934 void load8(const void* address
, RegisterID dest
)
936 moveToCachedReg(TrustedImmPtr(address
), m_cachedMemoryTempRegister
);
937 m_assembler
.ldrb(dest
, memoryTempRegister
, ARM64Registers::zr
);
938 if (dest
== memoryTempRegister
)
939 m_cachedMemoryTempRegister
.invalidate();
942 void load8Signed(BaseIndex address
, RegisterID dest
)
944 if (!address
.offset
&& !address
.scale
) {
945 m_assembler
.ldrsb
<64>(dest
, address
.base
, address
.index
, ARM64Assembler::UXTX
, address
.scale
);
949 signExtend32ToPtr(TrustedImm32(address
.offset
), getCachedMemoryTempRegisterIDAndInvalidate());
950 m_assembler
.add
<64>(memoryTempRegister
, memoryTempRegister
, address
.index
, ARM64Assembler::UXTX
, address
.scale
);
951 m_assembler
.ldrsb
<64>(dest
, address
.base
, memoryTempRegister
);
954 void store64(RegisterID src
, ImplicitAddress address
)
956 if (tryStoreWithOffset
<64>(src
, address
.base
, address
.offset
))
959 signExtend32ToPtr(TrustedImm32(address
.offset
), getCachedMemoryTempRegisterIDAndInvalidate());
960 m_assembler
.str
<64>(src
, address
.base
, memoryTempRegister
);
963 void store64(RegisterID src
, BaseIndex address
)
965 if (!address
.offset
&& (!address
.scale
|| address
.scale
== 3)) {
966 m_assembler
.str
<64>(src
, address
.base
, address
.index
, ARM64Assembler::UXTX
, address
.scale
);
970 signExtend32ToPtr(TrustedImm32(address
.offset
), getCachedMemoryTempRegisterIDAndInvalidate());
971 m_assembler
.add
<64>(memoryTempRegister
, memoryTempRegister
, address
.index
, ARM64Assembler::UXTX
, address
.scale
);
972 m_assembler
.str
<64>(src
, address
.base
, memoryTempRegister
);
975 void store64(RegisterID src
, const void* address
)
977 store
<64>(src
, address
);
980 void store64(TrustedImm64 imm
, ImplicitAddress address
)
983 store64(ARM64Registers::zr
, address
);
987 moveToCachedReg(imm
, m_dataMemoryTempRegister
);
988 store64(dataTempRegister
, address
);
991 void store64(TrustedImm64 imm
, BaseIndex address
)
994 store64(ARM64Registers::zr
, address
);
998 moveToCachedReg(imm
, m_dataMemoryTempRegister
);
999 store64(dataTempRegister
, address
);
1002 DataLabel32
store64WithAddressOffsetPatch(RegisterID src
, Address address
)
1004 DataLabel32
label(this);
1005 signExtend32ToPtrWithFixedWidth(address
.offset
, getCachedMemoryTempRegisterIDAndInvalidate());
1006 m_assembler
.str
<64>(src
, address
.base
, memoryTempRegister
, ARM64Assembler::SXTW
, 0);
1010 void store32(RegisterID src
, ImplicitAddress address
)
1012 if (tryStoreWithOffset
<32>(src
, address
.base
, address
.offset
))
1015 signExtend32ToPtr(TrustedImm32(address
.offset
), getCachedMemoryTempRegisterIDAndInvalidate());
1016 m_assembler
.str
<32>(src
, address
.base
, memoryTempRegister
);
1019 void store32(RegisterID src
, BaseIndex address
)
1021 if (!address
.offset
&& (!address
.scale
|| address
.scale
== 2)) {
1022 m_assembler
.str
<32>(src
, address
.base
, address
.index
, ARM64Assembler::UXTX
, address
.scale
);
1026 signExtend32ToPtr(TrustedImm32(address
.offset
), getCachedMemoryTempRegisterIDAndInvalidate());
1027 m_assembler
.add
<64>(memoryTempRegister
, memoryTempRegister
, address
.index
, ARM64Assembler::UXTX
, address
.scale
);
1028 m_assembler
.str
<32>(src
, address
.base
, memoryTempRegister
);
1031 void store32(RegisterID src
, const void* address
)
1033 store
<32>(src
, address
);
1036 void store32(TrustedImm32 imm
, ImplicitAddress address
)
1039 store32(ARM64Registers::zr
, address
);
1043 moveToCachedReg(imm
, m_dataMemoryTempRegister
);
1044 store32(dataTempRegister
, address
);
1047 void store32(TrustedImm32 imm
, BaseIndex address
)
1050 store32(ARM64Registers::zr
, address
);
1054 moveToCachedReg(imm
, m_dataMemoryTempRegister
);
1055 store32(dataTempRegister
, address
);
1058 void store32(TrustedImm32 imm
, const void* address
)
1061 store32(ARM64Registers::zr
, address
);
1065 moveToCachedReg(imm
, m_dataMemoryTempRegister
);
1066 store32(dataTempRegister
, address
);
1069 DataLabel32
store32WithAddressOffsetPatch(RegisterID src
, Address address
)
1071 DataLabel32
label(this);
1072 signExtend32ToPtrWithFixedWidth(address
.offset
, getCachedMemoryTempRegisterIDAndInvalidate());
1073 m_assembler
.str
<32>(src
, address
.base
, memoryTempRegister
, ARM64Assembler::SXTW
, 0);
1077 void store16(RegisterID src
, BaseIndex address
)
1079 if (!address
.offset
&& (!address
.scale
|| address
.scale
== 1)) {
1080 m_assembler
.strh(src
, address
.base
, address
.index
, ARM64Assembler::UXTX
, address
.scale
);
1084 signExtend32ToPtr(TrustedImm32(address
.offset
), getCachedMemoryTempRegisterIDAndInvalidate());
1085 m_assembler
.add
<64>(memoryTempRegister
, memoryTempRegister
, address
.index
, ARM64Assembler::UXTX
, address
.scale
);
1086 m_assembler
.strh(src
, address
.base
, memoryTempRegister
);
1089 void store8(RegisterID src
, BaseIndex address
)
1091 if (!address
.offset
&& !address
.scale
) {
1092 m_assembler
.strb(src
, address
.base
, address
.index
, ARM64Assembler::UXTX
, address
.scale
);
1096 signExtend32ToPtr(TrustedImm32(address
.offset
), getCachedMemoryTempRegisterIDAndInvalidate());
1097 m_assembler
.add
<64>(memoryTempRegister
, memoryTempRegister
, address
.index
, ARM64Assembler::UXTX
, address
.scale
);
1098 m_assembler
.strb(src
, address
.base
, memoryTempRegister
);
1101 void store8(RegisterID src
, void* address
)
1103 move(TrustedImmPtr(address
), getCachedMemoryTempRegisterIDAndInvalidate());
1104 m_assembler
.strb(src
, memoryTempRegister
, 0);
1107 void store8(RegisterID src
, ImplicitAddress address
)
1109 if (tryStoreWithOffset
<8>(src
, address
.base
, address
.offset
))
1112 signExtend32ToPtr(TrustedImm32(address
.offset
), getCachedMemoryTempRegisterIDAndInvalidate());
1113 m_assembler
.str
<8>(src
, address
.base
, memoryTempRegister
);
1116 void store8(TrustedImm32 imm
, void* address
)
1119 store8(ARM64Registers::zr
, address
);
1123 move(imm
, getCachedDataTempRegisterIDAndInvalidate());
1124 store8(dataTempRegister
, address
);
1127 void store8(TrustedImm32 imm
, ImplicitAddress address
)
1130 store8(ARM64Registers::zr
, address
);
1134 move(imm
, getCachedDataTempRegisterIDAndInvalidate());
1135 store8(dataTempRegister
, address
);
1138 // Floating-point operations:
1140 static bool supportsFloatingPoint() { return true; }
1141 static bool supportsFloatingPointTruncate() { return true; }
1142 static bool supportsFloatingPointSqrt() { return true; }
1143 static bool supportsFloatingPointAbs() { return true; }
1145 enum BranchTruncateType
{ BranchIfTruncateFailed
, BranchIfTruncateSuccessful
};
1147 void absDouble(FPRegisterID src
, FPRegisterID dest
)
1149 m_assembler
.fabs
<64>(dest
, src
);
1152 void addDouble(FPRegisterID src
, FPRegisterID dest
)
1154 addDouble(dest
, src
, dest
);
1157 void addDouble(FPRegisterID op1
, FPRegisterID op2
, FPRegisterID dest
)
1159 m_assembler
.fadd
<64>(dest
, op1
, op2
);
1162 void addDouble(Address src
, FPRegisterID dest
)
1164 loadDouble(src
, fpTempRegister
);
1165 addDouble(fpTempRegister
, dest
);
1168 void addDouble(AbsoluteAddress address
, FPRegisterID dest
)
1170 loadDouble(TrustedImmPtr(address
.m_ptr
), fpTempRegister
);
1171 addDouble(fpTempRegister
, dest
);
1174 void ceilDouble(FPRegisterID src
, FPRegisterID dest
)
1176 m_assembler
.frintp
<64>(dest
, src
);
1179 void floorDouble(FPRegisterID src
, FPRegisterID dest
)
1181 m_assembler
.frintm
<64>(dest
, src
);
1184 // Convert 'src' to an integer, and places the resulting 'dest'.
1185 // If the result is not representable as a 32 bit value, branch.
1186 // May also branch for some values that are representable in 32 bits
1187 // (specifically, in this case, 0).
1188 void branchConvertDoubleToInt32(FPRegisterID src
, RegisterID dest
, JumpList
& failureCases
, FPRegisterID
, bool negZeroCheck
= true)
1190 m_assembler
.fcvtns
<32, 64>(dest
, src
);
1192 // Convert the integer result back to float & compare to the original value - if not equal or unordered (NaN) then jump.
1193 m_assembler
.scvtf
<64, 32>(fpTempRegister
, dest
);
1194 failureCases
.append(branchDouble(DoubleNotEqualOrUnordered
, src
, fpTempRegister
));
1196 // If the result is zero, it might have been -0.0, and the double comparison won't catch this!
1198 failureCases
.append(branchTest32(Zero
, dest
));
1201 Jump
branchDouble(DoubleCondition cond
, FPRegisterID left
, FPRegisterID right
)
1203 m_assembler
.fcmp
<64>(left
, right
);
1205 if (cond
== DoubleNotEqual
) {
1206 // ConditionNE jumps if NotEqual *or* unordered - force the unordered cases not to jump.
1207 Jump unordered
= makeBranch(ARM64Assembler::ConditionVS
);
1208 Jump result
= makeBranch(ARM64Assembler::ConditionNE
);
1209 unordered
.link(this);
1212 if (cond
== DoubleEqualOrUnordered
) {
1213 Jump unordered
= makeBranch(ARM64Assembler::ConditionVS
);
1214 Jump notEqual
= makeBranch(ARM64Assembler::ConditionNE
);
1215 unordered
.link(this);
1216 // We get here if either unordered or equal.
1217 Jump result
= jump();
1218 notEqual
.link(this);
1221 return makeBranch(cond
);
1224 Jump
branchDoubleNonZero(FPRegisterID reg
, FPRegisterID
)
1226 m_assembler
.fcmp_0
<64>(reg
);
1227 Jump unordered
= makeBranch(ARM64Assembler::ConditionVS
);
1228 Jump result
= makeBranch(ARM64Assembler::ConditionNE
);
1229 unordered
.link(this);
1233 Jump
branchDoubleZeroOrNaN(FPRegisterID reg
, FPRegisterID
)
1235 m_assembler
.fcmp_0
<64>(reg
);
1236 Jump unordered
= makeBranch(ARM64Assembler::ConditionVS
);
1237 Jump notEqual
= makeBranch(ARM64Assembler::ConditionNE
);
1238 unordered
.link(this);
1239 // We get here if either unordered or equal.
1240 Jump result
= jump();
1241 notEqual
.link(this);
1245 Jump
branchTruncateDoubleToInt32(FPRegisterID src
, RegisterID dest
, BranchTruncateType branchType
= BranchIfTruncateFailed
)
1247 // Truncate to a 64-bit integer in dataTempRegister, copy the low 32-bit to dest.
1248 m_assembler
.fcvtzs
<64, 64>(getCachedDataTempRegisterIDAndInvalidate(), src
);
1249 zeroExtend32ToPtr(dataTempRegister
, dest
);
1250 // Check thlow 32-bits sign extend to be equal to the full value.
1251 m_assembler
.cmp
<64>(dataTempRegister
, dataTempRegister
, ARM64Assembler::SXTW
, 0);
1252 return Jump(makeBranch(branchType
== BranchIfTruncateSuccessful
? Equal
: NotEqual
));
1255 void convertDoubleToFloat(FPRegisterID src
, FPRegisterID dest
)
1257 m_assembler
.fcvt
<32, 64>(dest
, src
);
1260 void convertFloatToDouble(FPRegisterID src
, FPRegisterID dest
)
1262 m_assembler
.fcvt
<64, 32>(dest
, src
);
1265 void convertInt32ToDouble(TrustedImm32 imm
, FPRegisterID dest
)
1267 move(imm
, getCachedDataTempRegisterIDAndInvalidate());
1268 convertInt32ToDouble(dataTempRegister
, dest
);
1271 void convertInt32ToDouble(RegisterID src
, FPRegisterID dest
)
1273 m_assembler
.scvtf
<64, 32>(dest
, src
);
1276 void convertInt32ToDouble(Address address
, FPRegisterID dest
)
1278 load32(address
, getCachedDataTempRegisterIDAndInvalidate());
1279 convertInt32ToDouble(dataTempRegister
, dest
);
1282 void convertInt32ToDouble(AbsoluteAddress address
, FPRegisterID dest
)
1284 load32(address
.m_ptr
, getCachedDataTempRegisterIDAndInvalidate());
1285 convertInt32ToDouble(dataTempRegister
, dest
);
1288 void convertInt64ToDouble(RegisterID src
, FPRegisterID dest
)
1290 m_assembler
.scvtf
<64, 64>(dest
, src
);
1293 void divDouble(FPRegisterID src
, FPRegisterID dest
)
1295 divDouble(dest
, src
, dest
);
1298 void divDouble(FPRegisterID op1
, FPRegisterID op2
, FPRegisterID dest
)
1300 m_assembler
.fdiv
<64>(dest
, op1
, op2
);
1303 void loadDouble(ImplicitAddress address
, FPRegisterID dest
)
1305 if (tryLoadWithOffset
<64>(dest
, address
.base
, address
.offset
))
1308 signExtend32ToPtr(TrustedImm32(address
.offset
), getCachedMemoryTempRegisterIDAndInvalidate());
1309 m_assembler
.ldr
<64>(dest
, address
.base
, memoryTempRegister
);
1312 void loadDouble(BaseIndex address
, FPRegisterID dest
)
1314 if (!address
.offset
&& (!address
.scale
|| address
.scale
== 3)) {
1315 m_assembler
.ldr
<64>(dest
, address
.base
, address
.index
, ARM64Assembler::UXTX
, address
.scale
);
1319 signExtend32ToPtr(TrustedImm32(address
.offset
), getCachedMemoryTempRegisterIDAndInvalidate());
1320 m_assembler
.add
<64>(memoryTempRegister
, memoryTempRegister
, address
.index
, ARM64Assembler::UXTX
, address
.scale
);
1321 m_assembler
.ldr
<64>(dest
, address
.base
, memoryTempRegister
);
1324 void loadDouble(TrustedImmPtr address
, FPRegisterID dest
)
1326 moveToCachedReg(address
, m_cachedMemoryTempRegister
);
1327 m_assembler
.ldr
<64>(dest
, memoryTempRegister
, ARM64Registers::zr
);
1330 void loadFloat(BaseIndex address
, FPRegisterID dest
)
1332 if (!address
.offset
&& (!address
.scale
|| address
.scale
== 2)) {
1333 m_assembler
.ldr
<32>(dest
, address
.base
, address
.index
, ARM64Assembler::UXTX
, address
.scale
);
1337 signExtend32ToPtr(TrustedImm32(address
.offset
), getCachedMemoryTempRegisterIDAndInvalidate());
1338 m_assembler
.add
<64>(memoryTempRegister
, memoryTempRegister
, address
.index
, ARM64Assembler::UXTX
, address
.scale
);
1339 m_assembler
.ldr
<32>(dest
, address
.base
, memoryTempRegister
);
1342 void moveDouble(FPRegisterID src
, FPRegisterID dest
)
1344 m_assembler
.fmov
<64>(dest
, src
);
1347 void moveDoubleTo64(FPRegisterID src
, RegisterID dest
)
1349 m_assembler
.fmov
<64>(dest
, src
);
1352 void move64ToDouble(RegisterID src
, FPRegisterID dest
)
1354 m_assembler
.fmov
<64>(dest
, src
);
1357 void mulDouble(FPRegisterID src
, FPRegisterID dest
)
1359 mulDouble(dest
, src
, dest
);
1362 void mulDouble(FPRegisterID op1
, FPRegisterID op2
, FPRegisterID dest
)
1364 m_assembler
.fmul
<64>(dest
, op1
, op2
);
1367 void mulDouble(Address src
, FPRegisterID dest
)
1369 loadDouble(src
, fpTempRegister
);
1370 mulDouble(fpTempRegister
, dest
);
1373 void negateDouble(FPRegisterID src
, FPRegisterID dest
)
1375 m_assembler
.fneg
<64>(dest
, src
);
1378 void sqrtDouble(FPRegisterID src
, FPRegisterID dest
)
1380 m_assembler
.fsqrt
<64>(dest
, src
);
1383 void storeDouble(FPRegisterID src
, ImplicitAddress address
)
1385 if (tryStoreWithOffset
<64>(src
, address
.base
, address
.offset
))
1388 signExtend32ToPtr(TrustedImm32(address
.offset
), getCachedMemoryTempRegisterIDAndInvalidate());
1389 m_assembler
.str
<64>(src
, address
.base
, memoryTempRegister
);
1392 void storeDouble(FPRegisterID src
, TrustedImmPtr address
)
1394 moveToCachedReg(address
, m_cachedMemoryTempRegister
);
1395 m_assembler
.str
<64>(src
, memoryTempRegister
, ARM64Registers::zr
);
1398 void storeDouble(FPRegisterID src
, BaseIndex address
)
1400 if (!address
.offset
&& (!address
.scale
|| address
.scale
== 3)) {
1401 m_assembler
.str
<64>(src
, address
.base
, address
.index
, ARM64Assembler::UXTX
, address
.scale
);
1405 signExtend32ToPtr(TrustedImm32(address
.offset
), getCachedMemoryTempRegisterIDAndInvalidate());
1406 m_assembler
.add
<64>(memoryTempRegister
, memoryTempRegister
, address
.index
, ARM64Assembler::UXTX
, address
.scale
);
1407 m_assembler
.str
<64>(src
, address
.base
, memoryTempRegister
);
1410 void storeFloat(FPRegisterID src
, BaseIndex address
)
1412 if (!address
.offset
&& (!address
.scale
|| address
.scale
== 2)) {
1413 m_assembler
.str
<32>(src
, address
.base
, address
.index
, ARM64Assembler::UXTX
, address
.scale
);
1417 signExtend32ToPtr(TrustedImm32(address
.offset
), getCachedMemoryTempRegisterIDAndInvalidate());
1418 m_assembler
.add
<64>(memoryTempRegister
, memoryTempRegister
, address
.index
, ARM64Assembler::UXTX
, address
.scale
);
1419 m_assembler
.str
<32>(src
, address
.base
, memoryTempRegister
);
1422 void subDouble(FPRegisterID src
, FPRegisterID dest
)
1424 subDouble(dest
, src
, dest
);
1427 void subDouble(FPRegisterID op1
, FPRegisterID op2
, FPRegisterID dest
)
1429 m_assembler
.fsub
<64>(dest
, op1
, op2
);
1432 void subDouble(Address src
, FPRegisterID dest
)
1434 loadDouble(src
, fpTempRegister
);
1435 subDouble(fpTempRegister
, dest
);
1438 // Result is undefined if the value is outside of the integer range.
1439 void truncateDoubleToInt32(FPRegisterID src
, RegisterID dest
)
1441 m_assembler
.fcvtzs
<32, 64>(dest
, src
);
1444 void truncateDoubleToUint32(FPRegisterID src
, RegisterID dest
)
1446 m_assembler
.fcvtzu
<32, 64>(dest
, src
);
1450 // Stack manipulation operations:
1452 // The ABI is assumed to provide a stack abstraction to memory,
1453 // containing machine word sized units of data. Push and pop
1454 // operations add and remove a single register sized unit of data
1455 // to or from the stack. These operations are not supported on
1456 // ARM64. Peek and poke operations read or write values on the
1457 // stack, without moving the current stack position. Additionally,
1458 // there are popToRestore and pushToSave operations, which are
1459 // designed just for quick-and-dirty saving and restoring of
1460 // temporary values. These operations don't claim to have any
1461 // ABI compatibility.
1463 void pop(RegisterID
) NO_RETURN_DUE_TO_CRASH
1468 void push(RegisterID
) NO_RETURN_DUE_TO_CRASH
1473 void push(Address
) NO_RETURN_DUE_TO_CRASH
1478 void push(TrustedImm32
) NO_RETURN_DUE_TO_CRASH
1483 void popPair(RegisterID dest1
, RegisterID dest2
)
1485 m_assembler
.ldp
<64>(dest1
, dest2
, ARM64Registers::sp
, PairPostIndex(16));
1488 void pushPair(RegisterID src1
, RegisterID src2
)
1490 m_assembler
.stp
<64>(src1
, src2
, ARM64Registers::sp
, PairPreIndex(-16));
1493 void popToRestore(RegisterID dest
)
1495 m_assembler
.ldr
<64>(dest
, ARM64Registers::sp
, PostIndex(16));
1498 void pushToSave(RegisterID src
)
1500 m_assembler
.str
<64>(src
, ARM64Registers::sp
, PreIndex(-16));
1503 void pushToSaveImmediateWithoutTouchingRegisters(TrustedImm32 imm
)
1505 RegisterID reg
= dataTempRegister
;
1508 store64(reg
, stackPointerRegister
);
1509 load64(Address(stackPointerRegister
, 8), reg
);
1512 void pushToSave(Address address
)
1514 load32(address
, getCachedDataTempRegisterIDAndInvalidate());
1515 pushToSave(dataTempRegister
);
1518 void pushToSave(TrustedImm32 imm
)
1520 move(imm
, getCachedDataTempRegisterIDAndInvalidate());
1521 pushToSave(dataTempRegister
);
1524 void popToRestore(FPRegisterID dest
)
1526 loadDouble(stackPointerRegister
, dest
);
1527 add64(TrustedImm32(16), stackPointerRegister
);
1530 void pushToSave(FPRegisterID src
)
1532 sub64(TrustedImm32(16), stackPointerRegister
);
1533 storeDouble(src
, stackPointerRegister
);
1536 static ptrdiff_t pushToSaveByteOffset() { return 16; }
1538 // Register move operations:
1540 void move(RegisterID src
, RegisterID dest
)
1543 m_assembler
.mov
<64>(dest
, src
);
1546 void move(TrustedImm32 imm
, RegisterID dest
)
1548 moveInternal
<TrustedImm32
, int32_t>(imm
, dest
);
1551 void move(TrustedImmPtr imm
, RegisterID dest
)
1553 moveInternal
<TrustedImmPtr
, intptr_t>(imm
, dest
);
1556 void move(TrustedImm64 imm
, RegisterID dest
)
1558 moveInternal
<TrustedImm64
, int64_t>(imm
, dest
);
1561 void swap(RegisterID reg1
, RegisterID reg2
)
1563 move(reg1
, getCachedDataTempRegisterIDAndInvalidate());
1565 move(dataTempRegister
, reg2
);
1568 void signExtend32ToPtr(RegisterID src
, RegisterID dest
)
1570 m_assembler
.sxtw(dest
, src
);
1573 void zeroExtend32ToPtr(RegisterID src
, RegisterID dest
)
1575 m_assembler
.uxtw(dest
, src
);
1579 // Forwards / external control flow operations:
1581 // This set of jump and conditional branch operations return a Jump
1582 // object which may linked at a later point, allow forwards jump,
1583 // or jumps that will require external linkage (after the code has been
1586 // For branches, signed <, >, <= and >= are denoted as l, g, le, and ge
1587 // respecitvely, for unsigned comparisons the names b, a, be, and ae are
1588 // used (representing the names 'below' and 'above').
1590 // Operands to the comparision are provided in the expected order, e.g.
1591 // jle32(reg1, TrustedImm32(5)) will branch if the value held in reg1, when
1592 // treated as a signed 32bit value, is less than or equal to 5.
1594 // jz and jnz test whether the first operand is equal to zero, and take
1595 // an optional second operand of a mask under which to perform the test.
1597 Jump
branch32(RelationalCondition cond
, RegisterID left
, RegisterID right
)
1599 m_assembler
.cmp
<32>(left
, right
);
1600 return Jump(makeBranch(cond
));
1603 Jump
branch32(RelationalCondition cond
, RegisterID left
, TrustedImm32 right
)
1605 if (isUInt12(right
.m_value
))
1606 m_assembler
.cmp
<32>(left
, UInt12(right
.m_value
));
1607 else if (isUInt12(-right
.m_value
))
1608 m_assembler
.cmn
<32>(left
, UInt12(-right
.m_value
));
1610 moveToCachedReg(right
, m_dataMemoryTempRegister
);
1611 m_assembler
.cmp
<32>(left
, dataTempRegister
);
1613 return Jump(makeBranch(cond
));
1616 Jump
branch32(RelationalCondition cond
, RegisterID left
, Address right
)
1618 load32(right
, getCachedMemoryTempRegisterIDAndInvalidate());
1619 return branch32(cond
, left
, memoryTempRegister
);
1622 Jump
branch32(RelationalCondition cond
, Address left
, RegisterID right
)
1624 load32(left
, getCachedMemoryTempRegisterIDAndInvalidate());
1625 return branch32(cond
, memoryTempRegister
, right
);
1628 Jump
branch32(RelationalCondition cond
, Address left
, TrustedImm32 right
)
1630 load32(left
, getCachedMemoryTempRegisterIDAndInvalidate());
1631 return branch32(cond
, memoryTempRegister
, right
);
1634 Jump
branch32(RelationalCondition cond
, BaseIndex left
, TrustedImm32 right
)
1636 load32(left
, getCachedMemoryTempRegisterIDAndInvalidate());
1637 return branch32(cond
, memoryTempRegister
, right
);
1640 Jump
branch32(RelationalCondition cond
, AbsoluteAddress left
, RegisterID right
)
1642 load32(left
.m_ptr
, getCachedDataTempRegisterIDAndInvalidate());
1643 return branch32(cond
, dataTempRegister
, right
);
1646 Jump
branch32(RelationalCondition cond
, AbsoluteAddress left
, TrustedImm32 right
)
1648 load32(left
.m_ptr
, getCachedMemoryTempRegisterIDAndInvalidate());
1649 return branch32(cond
, memoryTempRegister
, right
);
1652 Jump
branch64(RelationalCondition cond
, RegisterID left
, RegisterID right
)
1654 m_assembler
.cmp
<64>(left
, right
);
1655 return Jump(makeBranch(cond
));
1658 Jump
branch64(RelationalCondition cond
, RegisterID left
, TrustedImm64 right
)
1660 intptr_t immediate
= right
.m_value
;
1661 if (isUInt12(immediate
))
1662 m_assembler
.cmp
<64>(left
, UInt12(static_cast<int32_t>(immediate
)));
1663 else if (isUInt12(-immediate
))
1664 m_assembler
.cmn
<64>(left
, UInt12(static_cast<int32_t>(-immediate
)));
1666 moveToCachedReg(right
, m_dataMemoryTempRegister
);
1667 m_assembler
.cmp
<64>(left
, dataTempRegister
);
1669 return Jump(makeBranch(cond
));
1672 Jump
branch64(RelationalCondition cond
, RegisterID left
, Address right
)
1674 load64(right
, getCachedMemoryTempRegisterIDAndInvalidate());
1675 return branch64(cond
, left
, memoryTempRegister
);
1678 Jump
branch64(RelationalCondition cond
, AbsoluteAddress left
, RegisterID right
)
1680 load64(left
.m_ptr
, getCachedDataTempRegisterIDAndInvalidate());
1681 return branch64(cond
, dataTempRegister
, right
);
1684 Jump
branch64(RelationalCondition cond
, Address left
, RegisterID right
)
1686 load64(left
, getCachedMemoryTempRegisterIDAndInvalidate());
1687 return branch64(cond
, memoryTempRegister
, right
);
1690 Jump
branch64(RelationalCondition cond
, Address left
, TrustedImm64 right
)
1692 load64(left
, getCachedMemoryTempRegisterIDAndInvalidate());
1693 return branch64(cond
, memoryTempRegister
, right
);
1696 Jump
branchPtr(RelationalCondition cond
, BaseIndex left
, RegisterID right
)
1698 load64(left
, getCachedMemoryTempRegisterIDAndInvalidate());
1699 return branch64(cond
, memoryTempRegister
, right
);
1702 Jump
branch8(RelationalCondition cond
, Address left
, TrustedImm32 right
)
1704 ASSERT(!(0xffffff00 & right
.m_value
));
1705 load8(left
, getCachedMemoryTempRegisterIDAndInvalidate());
1706 return branch32(cond
, memoryTempRegister
, right
);
1709 Jump
branch8(RelationalCondition cond
, BaseIndex left
, TrustedImm32 right
)
1711 ASSERT(!(0xffffff00 & right
.m_value
));
1712 load8(left
, getCachedMemoryTempRegisterIDAndInvalidate());
1713 return branch32(cond
, memoryTempRegister
, right
);
1716 Jump
branch8(RelationalCondition cond
, AbsoluteAddress left
, TrustedImm32 right
)
1718 ASSERT(!(0xffffff00 & right
.m_value
));
1719 load8(left
.m_ptr
, getCachedMemoryTempRegisterIDAndInvalidate());
1720 return branch32(cond
, memoryTempRegister
, right
);
1723 Jump
branchTest32(ResultCondition cond
, RegisterID reg
, RegisterID mask
)
1725 m_assembler
.tst
<32>(reg
, mask
);
1726 return Jump(makeBranch(cond
));
1729 void test32(ResultCondition cond
, RegisterID reg
, TrustedImm32 mask
= TrustedImm32(-1))
1731 if (mask
.m_value
== -1)
1732 m_assembler
.tst
<32>(reg
, reg
);
1734 bool testedWithImmediate
= false;
1735 if ((cond
== Zero
) || (cond
== NonZero
)) {
1736 LogicalImmediate logicalImm
= LogicalImmediate::create32(mask
.m_value
);
1738 if (logicalImm
.isValid()) {
1739 m_assembler
.tst
<32>(reg
, logicalImm
);
1740 testedWithImmediate
= true;
1743 if (!testedWithImmediate
) {
1744 move(mask
, getCachedDataTempRegisterIDAndInvalidate());
1745 m_assembler
.tst
<32>(reg
, dataTempRegister
);
1750 Jump
branch(ResultCondition cond
)
1752 return Jump(makeBranch(cond
));
1755 Jump
branchTest32(ResultCondition cond
, RegisterID reg
, TrustedImm32 mask
= TrustedImm32(-1))
1757 if (mask
.m_value
== -1) {
1758 if ((cond
== Zero
) || (cond
== NonZero
))
1759 return Jump(makeCompareAndBranch
<32>(static_cast<ZeroCondition
>(cond
), reg
));
1760 m_assembler
.tst
<32>(reg
, reg
);
1761 } else if (hasOneBitSet(mask
.m_value
) && ((cond
== Zero
) || (cond
== NonZero
)))
1762 return Jump(makeTestBitAndBranch(reg
, getLSBSet(mask
.m_value
), static_cast<ZeroCondition
>(cond
)));
1764 if ((cond
== Zero
) || (cond
== NonZero
)) {
1765 LogicalImmediate logicalImm
= LogicalImmediate::create32(mask
.m_value
);
1767 if (logicalImm
.isValid()) {
1768 m_assembler
.tst
<32>(reg
, logicalImm
);
1769 return Jump(makeBranch(cond
));
1773 move(mask
, getCachedDataTempRegisterIDAndInvalidate());
1774 m_assembler
.tst
<32>(reg
, dataTempRegister
);
1776 return Jump(makeBranch(cond
));
1779 Jump
branchTest32(ResultCondition cond
, Address address
, TrustedImm32 mask
= TrustedImm32(-1))
1781 load32(address
, getCachedMemoryTempRegisterIDAndInvalidate());
1782 return branchTest32(cond
, memoryTempRegister
, mask
);
1785 Jump
branchTest32(ResultCondition cond
, BaseIndex address
, TrustedImm32 mask
= TrustedImm32(-1))
1787 load32(address
, getCachedMemoryTempRegisterIDAndInvalidate());
1788 return branchTest32(cond
, memoryTempRegister
, mask
);
1791 Jump
branchTest64(ResultCondition cond
, RegisterID reg
, RegisterID mask
)
1793 m_assembler
.tst
<64>(reg
, mask
);
1794 return Jump(makeBranch(cond
));
1797 Jump
branchTest64(ResultCondition cond
, RegisterID reg
, TrustedImm32 mask
= TrustedImm32(-1))
1799 if (mask
.m_value
== -1) {
1800 if ((cond
== Zero
) || (cond
== NonZero
))
1801 return Jump(makeCompareAndBranch
<64>(static_cast<ZeroCondition
>(cond
), reg
));
1802 m_assembler
.tst
<64>(reg
, reg
);
1803 } else if (hasOneBitSet(mask
.m_value
) && ((cond
== Zero
) || (cond
== NonZero
)))
1804 return Jump(makeTestBitAndBranch(reg
, getLSBSet(mask
.m_value
), static_cast<ZeroCondition
>(cond
)));
1806 if ((cond
== Zero
) || (cond
== NonZero
)) {
1807 LogicalImmediate logicalImm
= LogicalImmediate::create64(mask
.m_value
);
1809 if (logicalImm
.isValid()) {
1810 m_assembler
.tst
<64>(reg
, logicalImm
);
1811 return Jump(makeBranch(cond
));
1815 signExtend32ToPtr(mask
, getCachedDataTempRegisterIDAndInvalidate());
1816 m_assembler
.tst
<64>(reg
, dataTempRegister
);
1818 return Jump(makeBranch(cond
));
1821 Jump
branchTest64(ResultCondition cond
, Address address
, RegisterID mask
)
1823 load64(address
, getCachedDataTempRegisterIDAndInvalidate());
1824 return branchTest64(cond
, dataTempRegister
, mask
);
1827 Jump
branchTest64(ResultCondition cond
, Address address
, TrustedImm32 mask
= TrustedImm32(-1))
1829 load64(address
, getCachedDataTempRegisterIDAndInvalidate());
1830 return branchTest64(cond
, dataTempRegister
, mask
);
1833 Jump
branchTest64(ResultCondition cond
, BaseIndex address
, TrustedImm32 mask
= TrustedImm32(-1))
1835 load64(address
, getCachedDataTempRegisterIDAndInvalidate());
1836 return branchTest64(cond
, dataTempRegister
, mask
);
1839 Jump
branchTest64(ResultCondition cond
, AbsoluteAddress address
, TrustedImm32 mask
= TrustedImm32(-1))
1841 load64(address
.m_ptr
, getCachedDataTempRegisterIDAndInvalidate());
1842 return branchTest64(cond
, dataTempRegister
, mask
);
1845 Jump
branchTest8(ResultCondition cond
, Address address
, TrustedImm32 mask
= TrustedImm32(-1))
1847 load8(address
, getCachedDataTempRegisterIDAndInvalidate());
1848 return branchTest32(cond
, dataTempRegister
, mask
);
1851 Jump
branchTest8(ResultCondition cond
, AbsoluteAddress address
, TrustedImm32 mask
= TrustedImm32(-1))
1853 load8(address
.m_ptr
, getCachedDataTempRegisterIDAndInvalidate());
1854 return branchTest32(cond
, dataTempRegister
, mask
);
1857 Jump
branchTest8(ResultCondition cond
, ExtendedAddress address
, TrustedImm32 mask
= TrustedImm32(-1))
1859 move(TrustedImmPtr(reinterpret_cast<void*>(address
.offset
)), getCachedDataTempRegisterIDAndInvalidate());
1860 m_assembler
.ldrb(dataTempRegister
, address
.base
, dataTempRegister
);
1861 return branchTest32(cond
, dataTempRegister
, mask
);
1864 Jump
branchTest8(ResultCondition cond
, BaseIndex address
, TrustedImm32 mask
= TrustedImm32(-1))
1866 load8(address
, getCachedDataTempRegisterIDAndInvalidate());
1867 return branchTest32(cond
, dataTempRegister
, mask
);
1870 Jump
branch32WithUnalignedHalfWords(RelationalCondition cond
, BaseIndex left
, TrustedImm32 right
)
1872 return branch32(cond
, left
, right
);
1876 // Arithmetic control flow operations:
1878 // This set of conditional branch operations branch based
1879 // on the result of an arithmetic operation. The operation
1880 // is performed as normal, storing the result.
1882 // * jz operations branch if the result is zero.
1883 // * jo operations branch if the (signed) arithmetic
1884 // operation caused an overflow to occur.
1886 Jump
branchAdd32(ResultCondition cond
, RegisterID op1
, RegisterID op2
, RegisterID dest
)
1888 m_assembler
.add
<32, S
>(dest
, op1
, op2
);
1889 return Jump(makeBranch(cond
));
1892 Jump
branchAdd32(ResultCondition cond
, RegisterID op1
, TrustedImm32 imm
, RegisterID dest
)
1894 if (isUInt12(imm
.m_value
)) {
1895 m_assembler
.add
<32, S
>(dest
, op1
, UInt12(imm
.m_value
));
1896 return Jump(makeBranch(cond
));
1898 if (isUInt12(-imm
.m_value
)) {
1899 m_assembler
.sub
<32, S
>(dest
, op1
, UInt12(-imm
.m_value
));
1900 return Jump(makeBranch(cond
));
1903 signExtend32ToPtr(imm
, getCachedDataTempRegisterIDAndInvalidate());
1904 return branchAdd32(cond
, op1
, dataTempRegister
, dest
);
1907 Jump
branchAdd32(ResultCondition cond
, Address src
, RegisterID dest
)
1909 load32(src
, getCachedDataTempRegisterIDAndInvalidate());
1910 return branchAdd32(cond
, dest
, dataTempRegister
, dest
);
1913 Jump
branchAdd32(ResultCondition cond
, RegisterID src
, RegisterID dest
)
1915 return branchAdd32(cond
, dest
, src
, dest
);
1918 Jump
branchAdd32(ResultCondition cond
, TrustedImm32 imm
, RegisterID dest
)
1920 return branchAdd32(cond
, dest
, imm
, dest
);
1923 Jump
branchAdd32(ResultCondition cond
, TrustedImm32 imm
, AbsoluteAddress address
)
1925 load32(address
.m_ptr
, getCachedDataTempRegisterIDAndInvalidate());
1927 if (isUInt12(imm
.m_value
)) {
1928 m_assembler
.add
<32, S
>(dataTempRegister
, dataTempRegister
, UInt12(imm
.m_value
));
1929 store32(dataTempRegister
, address
.m_ptr
);
1930 } else if (isUInt12(-imm
.m_value
)) {
1931 m_assembler
.sub
<32, S
>(dataTempRegister
, dataTempRegister
, UInt12(-imm
.m_value
));
1932 store32(dataTempRegister
, address
.m_ptr
);
1934 move(imm
, getCachedMemoryTempRegisterIDAndInvalidate());
1935 m_assembler
.add
<32, S
>(dataTempRegister
, dataTempRegister
, memoryTempRegister
);
1936 store32(dataTempRegister
, address
.m_ptr
);
1939 return Jump(makeBranch(cond
));
1942 Jump
branchAdd64(ResultCondition cond
, RegisterID op1
, RegisterID op2
, RegisterID dest
)
1944 m_assembler
.add
<64, S
>(dest
, op1
, op2
);
1945 return Jump(makeBranch(cond
));
1948 Jump
branchAdd64(ResultCondition cond
, RegisterID op1
, TrustedImm32 imm
, RegisterID dest
)
1950 if (isUInt12(imm
.m_value
)) {
1951 m_assembler
.add
<64, S
>(dest
, op1
, UInt12(imm
.m_value
));
1952 return Jump(makeBranch(cond
));
1954 if (isUInt12(-imm
.m_value
)) {
1955 m_assembler
.sub
<64, S
>(dest
, op1
, UInt12(-imm
.m_value
));
1956 return Jump(makeBranch(cond
));
1959 move(imm
, getCachedDataTempRegisterIDAndInvalidate());
1960 return branchAdd64(cond
, op1
, dataTempRegister
, dest
);
1963 Jump
branchAdd64(ResultCondition cond
, RegisterID src
, RegisterID dest
)
1965 return branchAdd64(cond
, dest
, src
, dest
);
1968 Jump
branchAdd64(ResultCondition cond
, TrustedImm32 imm
, RegisterID dest
)
1970 return branchAdd64(cond
, dest
, imm
, dest
);
1973 Jump
branchMul32(ResultCondition cond
, RegisterID src1
, RegisterID src2
, RegisterID dest
)
1975 ASSERT(cond
!= Signed
);
1977 if (cond
!= Overflow
) {
1978 m_assembler
.mul
<32>(dest
, src1
, src2
);
1979 return branchTest32(cond
, dest
);
1982 // This is a signed multiple of two 32-bit values, producing a 64-bit result.
1983 m_assembler
.smull(dest
, src1
, src2
);
1984 // Copy bits 63..32 of the result to bits 31..0 of dataTempRegister.
1985 m_assembler
.asr
<64>(getCachedDataTempRegisterIDAndInvalidate(), dest
, 32);
1986 // Splat bit 31 of the result to bits 31..0 of memoryTempRegister.
1987 m_assembler
.asr
<32>(getCachedMemoryTempRegisterIDAndInvalidate(), dest
, 31);
1988 // After a mul32 the top 32 bits of the register should be clear.
1989 zeroExtend32ToPtr(dest
, dest
);
1990 // Check that bits 31..63 of the original result were all equal.
1991 return branch32(NotEqual
, memoryTempRegister
, dataTempRegister
);
1994 Jump
branchMul32(ResultCondition cond
, RegisterID src
, RegisterID dest
)
1996 return branchMul32(cond
, dest
, src
, dest
);
1999 Jump
branchMul32(ResultCondition cond
, TrustedImm32 imm
, RegisterID src
, RegisterID dest
)
2001 move(imm
, getCachedDataTempRegisterIDAndInvalidate());
2002 return branchMul32(cond
, dataTempRegister
, src
, dest
);
2005 Jump
branchMul64(ResultCondition cond
, RegisterID src1
, RegisterID src2
, RegisterID dest
)
2007 ASSERT(cond
!= Signed
);
2009 // This is a signed multiple of two 64-bit values, producing a 64-bit result.
2010 m_assembler
.mul
<64>(dest
, src1
, src2
);
2012 if (cond
!= Overflow
)
2013 return branchTest64(cond
, dest
);
2015 // Compute bits 127..64 of the result into dataTempRegister.
2016 m_assembler
.smulh(getCachedDataTempRegisterIDAndInvalidate(), src1
, src2
);
2017 // Splat bit 63 of the result to bits 63..0 of memoryTempRegister.
2018 m_assembler
.asr
<64>(getCachedMemoryTempRegisterIDAndInvalidate(), dest
, 63);
2019 // Check that bits 31..63 of the original result were all equal.
2020 return branch64(NotEqual
, memoryTempRegister
, dataTempRegister
);
2023 Jump
branchMul64(ResultCondition cond
, RegisterID src
, RegisterID dest
)
2025 return branchMul64(cond
, dest
, src
, dest
);
2028 Jump
branchNeg32(ResultCondition cond
, RegisterID dest
)
2030 m_assembler
.neg
<32, S
>(dest
, dest
);
2031 return Jump(makeBranch(cond
));
2034 Jump
branchNeg64(ResultCondition cond
, RegisterID srcDest
)
2036 m_assembler
.neg
<64, S
>(srcDest
, srcDest
);
2037 return Jump(makeBranch(cond
));
2040 Jump
branchSub32(ResultCondition cond
, RegisterID dest
)
2042 m_assembler
.neg
<32, S
>(dest
, dest
);
2043 return Jump(makeBranch(cond
));
2046 Jump
branchSub32(ResultCondition cond
, RegisterID op1
, RegisterID op2
, RegisterID dest
)
2048 m_assembler
.sub
<32, S
>(dest
, op1
, op2
);
2049 return Jump(makeBranch(cond
));
2052 Jump
branchSub32(ResultCondition cond
, RegisterID op1
, TrustedImm32 imm
, RegisterID dest
)
2054 if (isUInt12(imm
.m_value
)) {
2055 m_assembler
.sub
<32, S
>(dest
, op1
, UInt12(imm
.m_value
));
2056 return Jump(makeBranch(cond
));
2058 if (isUInt12(-imm
.m_value
)) {
2059 m_assembler
.add
<32, S
>(dest
, op1
, UInt12(-imm
.m_value
));
2060 return Jump(makeBranch(cond
));
2063 signExtend32ToPtr(imm
, getCachedDataTempRegisterIDAndInvalidate());
2064 return branchSub32(cond
, op1
, dataTempRegister
, dest
);
2067 Jump
branchSub32(ResultCondition cond
, RegisterID src
, RegisterID dest
)
2069 return branchSub32(cond
, dest
, src
, dest
);
2072 Jump
branchSub32(ResultCondition cond
, TrustedImm32 imm
, RegisterID dest
)
2074 return branchSub32(cond
, dest
, imm
, dest
);
2077 Jump
branchSub64(ResultCondition cond
, RegisterID op1
, RegisterID op2
, RegisterID dest
)
2079 m_assembler
.sub
<64, S
>(dest
, op1
, op2
);
2080 return Jump(makeBranch(cond
));
2083 Jump
branchSub64(ResultCondition cond
, RegisterID op1
, TrustedImm32 imm
, RegisterID dest
)
2085 if (isUInt12(imm
.m_value
)) {
2086 m_assembler
.sub
<64, S
>(dest
, op1
, UInt12(imm
.m_value
));
2087 return Jump(makeBranch(cond
));
2089 if (isUInt12(-imm
.m_value
)) {
2090 m_assembler
.add
<64, S
>(dest
, op1
, UInt12(-imm
.m_value
));
2091 return Jump(makeBranch(cond
));
2094 move(imm
, getCachedDataTempRegisterIDAndInvalidate());
2095 return branchSub64(cond
, op1
, dataTempRegister
, dest
);
2098 Jump
branchSub64(ResultCondition cond
, RegisterID src
, RegisterID dest
)
2100 return branchSub64(cond
, dest
, src
, dest
);
2103 Jump
branchSub64(ResultCondition cond
, TrustedImm32 imm
, RegisterID dest
)
2105 return branchSub64(cond
, dest
, imm
, dest
);
2109 // Jumps, calls, returns
2111 ALWAYS_INLINE Call
call()
2113 AssemblerLabel pointerLabel
= m_assembler
.label();
2114 moveWithFixedWidth(TrustedImmPtr(0), getCachedDataTempRegisterIDAndInvalidate());
2115 invalidateAllTempRegisters();
2116 m_assembler
.blr(dataTempRegister
);
2117 AssemblerLabel callLabel
= m_assembler
.label();
2118 ASSERT_UNUSED(pointerLabel
, ARM64Assembler::getDifferenceBetweenLabels(callLabel
, pointerLabel
) == REPATCH_OFFSET_CALL_TO_POINTER
);
2119 return Call(callLabel
, Call::Linkable
);
2122 ALWAYS_INLINE Call
call(RegisterID target
)
2124 invalidateAllTempRegisters();
2125 m_assembler
.blr(target
);
2126 return Call(m_assembler
.label(), Call::None
);
2129 ALWAYS_INLINE Call
call(Address address
)
2131 load64(address
, getCachedDataTempRegisterIDAndInvalidate());
2132 return call(dataTempRegister
);
2135 ALWAYS_INLINE Jump
jump()
2137 AssemblerLabel label
= m_assembler
.label();
2139 return Jump(label
, m_makeJumpPatchable
? ARM64Assembler::JumpNoConditionFixedSize
: ARM64Assembler::JumpNoCondition
);
2142 void jump(RegisterID target
)
2144 m_assembler
.br(target
);
2147 void jump(Address address
)
2149 load64(address
, getCachedDataTempRegisterIDAndInvalidate());
2150 m_assembler
.br(dataTempRegister
);
2153 void jump(AbsoluteAddress address
)
2155 move(TrustedImmPtr(address
.m_ptr
), getCachedDataTempRegisterIDAndInvalidate());
2156 load64(Address(dataTempRegister
), dataTempRegister
);
2157 m_assembler
.br(dataTempRegister
);
2160 ALWAYS_INLINE Call
makeTailRecursiveCall(Jump oldJump
)
2163 return tailRecursiveCall();
2166 ALWAYS_INLINE Call
nearCall()
2169 return Call(m_assembler
.label(), Call::LinkableNear
);
2172 ALWAYS_INLINE
void ret()
2177 ALWAYS_INLINE Call
tailRecursiveCall()
2179 // Like a normal call, but don't link.
2180 AssemblerLabel pointerLabel
= m_assembler
.label();
2181 moveWithFixedWidth(TrustedImmPtr(0), getCachedDataTempRegisterIDAndInvalidate());
2182 m_assembler
.br(dataTempRegister
);
2183 AssemblerLabel callLabel
= m_assembler
.label();
2184 ASSERT_UNUSED(pointerLabel
, ARM64Assembler::getDifferenceBetweenLabels(callLabel
, pointerLabel
) == REPATCH_OFFSET_CALL_TO_POINTER
);
2185 return Call(callLabel
, Call::Linkable
);
2189 // Comparisons operations
2191 void compare32(RelationalCondition cond
, RegisterID left
, RegisterID right
, RegisterID dest
)
2193 m_assembler
.cmp
<32>(left
, right
);
2194 m_assembler
.cset
<32>(dest
, ARM64Condition(cond
));
2197 void compare32(RelationalCondition cond
, Address left
, RegisterID right
, RegisterID dest
)
2199 load32(left
, getCachedDataTempRegisterIDAndInvalidate());
2200 m_assembler
.cmp
<32>(dataTempRegister
, right
);
2201 m_assembler
.cset
<32>(dest
, ARM64Condition(cond
));
2204 void compare32(RelationalCondition cond
, RegisterID left
, TrustedImm32 right
, RegisterID dest
)
2206 move(right
, getCachedDataTempRegisterIDAndInvalidate());
2207 m_assembler
.cmp
<32>(left
, dataTempRegister
);
2208 m_assembler
.cset
<32>(dest
, ARM64Condition(cond
));
2211 void compare64(RelationalCondition cond
, RegisterID left
, RegisterID right
, RegisterID dest
)
2213 m_assembler
.cmp
<64>(left
, right
);
2214 m_assembler
.cset
<32>(dest
, ARM64Condition(cond
));
2217 void compare64(RelationalCondition cond
, RegisterID left
, TrustedImm32 right
, RegisterID dest
)
2219 signExtend32ToPtr(right
, getCachedDataTempRegisterIDAndInvalidate());
2220 m_assembler
.cmp
<64>(left
, dataTempRegister
);
2221 m_assembler
.cset
<32>(dest
, ARM64Condition(cond
));
2224 void compare8(RelationalCondition cond
, Address left
, TrustedImm32 right
, RegisterID dest
)
2226 load8(left
, getCachedMemoryTempRegisterIDAndInvalidate());
2227 move(right
, getCachedDataTempRegisterIDAndInvalidate());
2228 compare32(cond
, memoryTempRegister
, dataTempRegister
, dest
);
2231 void test32(ResultCondition cond
, RegisterID src
, TrustedImm32 mask
, RegisterID dest
)
2233 if (mask
.m_value
== -1)
2234 m_assembler
.tst
<32>(src
, src
);
2236 signExtend32ToPtr(mask
, getCachedDataTempRegisterIDAndInvalidate());
2237 m_assembler
.tst
<32>(src
, dataTempRegister
);
2239 m_assembler
.cset
<32>(dest
, ARM64Condition(cond
));
2242 void test32(ResultCondition cond
, Address address
, TrustedImm32 mask
, RegisterID dest
)
2244 load32(address
, getCachedDataTempRegisterIDAndInvalidate());
2245 test32(cond
, dataTempRegister
, mask
, dest
);
2248 void test8(ResultCondition cond
, Address address
, TrustedImm32 mask
, RegisterID dest
)
2250 load8(address
, getCachedDataTempRegisterIDAndInvalidate());
2251 test32(cond
, dataTempRegister
, mask
, dest
);
2254 void test64(ResultCondition cond
, RegisterID op1
, RegisterID op2
, RegisterID dest
)
2256 m_assembler
.tst
<64>(op1
, op2
);
2257 m_assembler
.cset
<32>(dest
, ARM64Condition(cond
));
2260 void test64(ResultCondition cond
, RegisterID src
, TrustedImm32 mask
, RegisterID dest
)
2262 if (mask
.m_value
== -1)
2263 m_assembler
.tst
<64>(src
, src
);
2265 signExtend32ToPtr(mask
, getCachedDataTempRegisterIDAndInvalidate());
2266 m_assembler
.tst
<64>(src
, dataTempRegister
);
2268 m_assembler
.cset
<32>(dest
, ARM64Condition(cond
));
2272 // Patchable operations
2274 ALWAYS_INLINE DataLabel32
moveWithPatch(TrustedImm32 imm
, RegisterID dest
)
2276 DataLabel32
label(this);
2277 moveWithFixedWidth(imm
, dest
);
2281 ALWAYS_INLINE DataLabelPtr
moveWithPatch(TrustedImmPtr imm
, RegisterID dest
)
2283 DataLabelPtr
label(this);
2284 moveWithFixedWidth(imm
, dest
);
2288 ALWAYS_INLINE Jump
branchPtrWithPatch(RelationalCondition cond
, RegisterID left
, DataLabelPtr
& dataLabel
, TrustedImmPtr initialRightValue
= TrustedImmPtr(0))
2290 dataLabel
= DataLabelPtr(this);
2291 moveWithPatch(initialRightValue
, getCachedDataTempRegisterIDAndInvalidate());
2292 return branch64(cond
, left
, dataTempRegister
);
2295 ALWAYS_INLINE Jump
branchPtrWithPatch(RelationalCondition cond
, Address left
, DataLabelPtr
& dataLabel
, TrustedImmPtr initialRightValue
= TrustedImmPtr(0))
2297 dataLabel
= DataLabelPtr(this);
2298 moveWithPatch(initialRightValue
, getCachedDataTempRegisterIDAndInvalidate());
2299 return branch64(cond
, left
, dataTempRegister
);
2302 ALWAYS_INLINE Jump
branch32WithPatch(RelationalCondition cond
, Address left
, DataLabel32
& dataLabel
, TrustedImm32 initialRightValue
= TrustedImm32(0))
2304 dataLabel
= DataLabel32(this);
2305 moveWithPatch(initialRightValue
, getCachedDataTempRegisterIDAndInvalidate());
2306 return branch32(cond
, left
, dataTempRegister
);
2309 PatchableJump
patchableBranchPtr(RelationalCondition cond
, Address left
, TrustedImmPtr right
= TrustedImmPtr(0))
2311 m_makeJumpPatchable
= true;
2312 Jump result
= branch32(cond
, left
, TrustedImm32(right
));
2313 m_makeJumpPatchable
= false;
2314 return PatchableJump(result
);
2317 PatchableJump
patchableBranchTest32(ResultCondition cond
, RegisterID reg
, TrustedImm32 mask
= TrustedImm32(-1))
2319 m_makeJumpPatchable
= true;
2320 Jump result
= branchTest32(cond
, reg
, mask
);
2321 m_makeJumpPatchable
= false;
2322 return PatchableJump(result
);
2325 PatchableJump
patchableBranch32(RelationalCondition cond
, RegisterID reg
, TrustedImm32 imm
)
2327 m_makeJumpPatchable
= true;
2328 Jump result
= branch32(cond
, reg
, imm
);
2329 m_makeJumpPatchable
= false;
2330 return PatchableJump(result
);
2333 PatchableJump
patchableBranchPtrWithPatch(RelationalCondition cond
, Address left
, DataLabelPtr
& dataLabel
, TrustedImmPtr initialRightValue
= TrustedImmPtr(0))
2335 m_makeJumpPatchable
= true;
2336 Jump result
= branchPtrWithPatch(cond
, left
, dataLabel
, initialRightValue
);
2337 m_makeJumpPatchable
= false;
2338 return PatchableJump(result
);
2341 PatchableJump
patchableBranch32WithPatch(RelationalCondition cond
, Address left
, DataLabel32
& dataLabel
, TrustedImm32 initialRightValue
= TrustedImm32(0))
2343 m_makeJumpPatchable
= true;
2344 Jump result
= branch32WithPatch(cond
, left
, dataLabel
, initialRightValue
);
2345 m_makeJumpPatchable
= false;
2346 return PatchableJump(result
);
2349 PatchableJump
patchableJump()
2351 m_makeJumpPatchable
= true;
2352 Jump result
= jump();
2353 m_makeJumpPatchable
= false;
2354 return PatchableJump(result
);
2357 ALWAYS_INLINE DataLabelPtr
storePtrWithPatch(TrustedImmPtr initialValue
, ImplicitAddress address
)
2359 DataLabelPtr
label(this);
2360 moveWithFixedWidth(initialValue
, getCachedDataTempRegisterIDAndInvalidate());
2361 store64(dataTempRegister
, address
);
2365 ALWAYS_INLINE DataLabelPtr
storePtrWithPatch(ImplicitAddress address
)
2367 return storePtrWithPatch(TrustedImmPtr(0), address
);
2370 static void reemitInitialMoveWithPatch(void* address
, void* value
)
2372 ARM64Assembler::setPointer(static_cast<int*>(address
), value
, dataTempRegister
, true);
2375 // Miscellaneous operations:
2377 void breakpoint(uint16_t imm
= 0)
2379 m_assembler
.brk(imm
);
2389 m_assembler
.dmbSY();
2393 // Misc helper functions.
2395 // Invert a relational condition, e.g. == becomes !=, < becomes >=, etc.
2396 static RelationalCondition
invert(RelationalCondition cond
)
2398 return static_cast<RelationalCondition
>(ARM64Assembler::invert(static_cast<ARM64Assembler::Condition
>(cond
)));
2401 static FunctionPtr
readCallTarget(CodeLocationCall call
)
2403 return FunctionPtr(reinterpret_cast<void(*)()>(ARM64Assembler::readCallTarget(call
.dataLocation())));
2406 static void replaceWithJump(CodeLocationLabel instructionStart
, CodeLocationLabel destination
)
2408 ARM64Assembler::replaceWithJump(instructionStart
.dataLocation(), destination
.dataLocation());
2411 static ptrdiff_t maxJumpReplacementSize()
2413 return ARM64Assembler::maxJumpReplacementSize();
2416 RegisterID
scratchRegisterForBlinding()
2418 // We *do not* have a scratch register for blinding.
2419 RELEASE_ASSERT_NOT_REACHED();
2420 return getCachedDataTempRegisterIDAndInvalidate();
2423 static bool canJumpReplacePatchableBranchPtrWithPatch() { return false; }
2424 static bool canJumpReplacePatchableBranch32WithPatch() { return false; }
2426 static CodeLocationLabel
startOfBranchPtrWithPatchOnRegister(CodeLocationDataLabelPtr label
)
2428 return label
.labelAtOffset(0);
2431 static CodeLocationLabel
startOfPatchableBranchPtrWithPatchOnAddress(CodeLocationDataLabelPtr
)
2433 UNREACHABLE_FOR_PLATFORM();
2434 return CodeLocationLabel();
2437 static CodeLocationLabel
startOfPatchableBranch32WithPatchOnAddress(CodeLocationDataLabel32
)
2439 UNREACHABLE_FOR_PLATFORM();
2440 return CodeLocationLabel();
2443 static void revertJumpReplacementToBranchPtrWithPatch(CodeLocationLabel instructionStart
, RegisterID
, void* initialValue
)
2445 reemitInitialMoveWithPatch(instructionStart
.dataLocation(), initialValue
);
2448 static void revertJumpReplacementToPatchableBranchPtrWithPatch(CodeLocationLabel
, Address
, void*)
2450 UNREACHABLE_FOR_PLATFORM();
2453 static void revertJumpReplacementToPatchableBranch32WithPatch(CodeLocationLabel
, Address
, int32_t)
2455 UNREACHABLE_FOR_PLATFORM();
2459 ALWAYS_INLINE Jump
makeBranch(ARM64Assembler::Condition cond
)
2461 m_assembler
.b_cond(cond
);
2462 AssemblerLabel label
= m_assembler
.label();
2464 return Jump(label
, m_makeJumpPatchable
? ARM64Assembler::JumpConditionFixedSize
: ARM64Assembler::JumpCondition
, cond
);
2466 ALWAYS_INLINE Jump
makeBranch(RelationalCondition cond
) { return makeBranch(ARM64Condition(cond
)); }
2467 ALWAYS_INLINE Jump
makeBranch(ResultCondition cond
) { return makeBranch(ARM64Condition(cond
)); }
2468 ALWAYS_INLINE Jump
makeBranch(DoubleCondition cond
) { return makeBranch(ARM64Condition(cond
)); }
2470 template <int dataSize
>
2471 ALWAYS_INLINE Jump
makeCompareAndBranch(ZeroCondition cond
, RegisterID reg
)
2474 m_assembler
.cbz
<dataSize
>(reg
);
2476 m_assembler
.cbnz
<dataSize
>(reg
);
2477 AssemblerLabel label
= m_assembler
.label();
2479 return Jump(label
, m_makeJumpPatchable
? ARM64Assembler::JumpCompareAndBranchFixedSize
: ARM64Assembler::JumpCompareAndBranch
, static_cast<ARM64Assembler::Condition
>(cond
), dataSize
== 64, reg
);
2482 ALWAYS_INLINE Jump
makeTestBitAndBranch(RegisterID reg
, unsigned bit
, ZeroCondition cond
)
2487 m_assembler
.tbz(reg
, bit
);
2489 m_assembler
.tbnz(reg
, bit
);
2490 AssemblerLabel label
= m_assembler
.label();
2492 return Jump(label
, m_makeJumpPatchable
? ARM64Assembler::JumpTestBitFixedSize
: ARM64Assembler::JumpTestBit
, static_cast<ARM64Assembler::Condition
>(cond
), bit
, reg
);
2495 ARM64Assembler::Condition
ARM64Condition(RelationalCondition cond
)
2497 return static_cast<ARM64Assembler::Condition
>(cond
);
2500 ARM64Assembler::Condition
ARM64Condition(ResultCondition cond
)
2502 return static_cast<ARM64Assembler::Condition
>(cond
);
2505 ARM64Assembler::Condition
ARM64Condition(DoubleCondition cond
)
2507 return static_cast<ARM64Assembler::Condition
>(cond
);
2511 ALWAYS_INLINE RegisterID
getCachedDataTempRegisterIDAndInvalidate() { return m_dataMemoryTempRegister
.registerIDInvalidate(); }
2512 ALWAYS_INLINE RegisterID
getCachedMemoryTempRegisterIDAndInvalidate() { return m_cachedMemoryTempRegister
.registerIDInvalidate(); }
2514 ALWAYS_INLINE
bool isInIntRange(intptr_t value
)
2516 return value
== ((value
<< 32) >> 32);
2519 template<typename ImmediateType
, typename rawType
>
2520 void moveInternal(ImmediateType imm
, RegisterID dest
)
2522 const int dataSize
= sizeof(rawType
) * 8;
2523 const int numberHalfWords
= dataSize
/ 16;
2524 rawType value
= bitwise_cast
<rawType
>(imm
.m_value
);
2525 uint16_t halfword
[numberHalfWords
];
2527 // Handle 0 and ~0 here to simplify code below
2529 m_assembler
.movz
<dataSize
>(dest
, 0);
2533 m_assembler
.movn
<dataSize
>(dest
, 0);
2537 LogicalImmediate logicalImm
= dataSize
== 64 ? LogicalImmediate::create64(static_cast<uint64_t>(value
)) : LogicalImmediate::create32(static_cast<uint32_t>(value
));
2539 if (logicalImm
.isValid()) {
2540 m_assembler
.movi
<dataSize
>(dest
, logicalImm
);
2544 // Figure out how many halfwords are 0 or FFFF, then choose movz or movn accordingly.
2545 int zeroOrNegateVote
= 0;
2546 for (int i
= 0; i
< numberHalfWords
; ++i
) {
2547 halfword
[i
] = getHalfword(value
, i
);
2550 else if (halfword
[i
] == 0xffff)
2554 bool needToClearRegister
= true;
2555 if (zeroOrNegateVote
>= 0) {
2556 for (int i
= 0; i
< numberHalfWords
; i
++) {
2558 if (needToClearRegister
) {
2559 m_assembler
.movz
<dataSize
>(dest
, halfword
[i
], 16*i
);
2560 needToClearRegister
= false;
2562 m_assembler
.movk
<dataSize
>(dest
, halfword
[i
], 16*i
);
2566 for (int i
= 0; i
< numberHalfWords
; i
++) {
2567 if (halfword
[i
] != 0xffff) {
2568 if (needToClearRegister
) {
2569 m_assembler
.movn
<dataSize
>(dest
, ~halfword
[i
], 16*i
);
2570 needToClearRegister
= false;
2572 m_assembler
.movk
<dataSize
>(dest
, halfword
[i
], 16*i
);
2578 template<int datasize
>
2579 ALWAYS_INLINE
void loadUnsignedImmediate(RegisterID rt
, RegisterID rn
, unsigned pimm
)
2581 m_assembler
.ldr
<datasize
>(rt
, rn
, pimm
);
2584 template<int datasize
>
2585 ALWAYS_INLINE
void loadUnscaledImmediate(RegisterID rt
, RegisterID rn
, int simm
)
2587 m_assembler
.ldur
<datasize
>(rt
, rn
, simm
);
2590 template<int datasize
>
2591 ALWAYS_INLINE
void storeUnsignedImmediate(RegisterID rt
, RegisterID rn
, unsigned pimm
)
2593 m_assembler
.str
<datasize
>(rt
, rn
, pimm
);
2596 template<int datasize
>
2597 ALWAYS_INLINE
void storeUnscaledImmediate(RegisterID rt
, RegisterID rn
, int simm
)
2599 m_assembler
.stur
<datasize
>(rt
, rn
, simm
);
2602 void moveWithFixedWidth(TrustedImm32 imm
, RegisterID dest
)
2604 int32_t value
= imm
.m_value
;
2605 m_assembler
.movz
<32>(dest
, getHalfword(value
, 0));
2606 m_assembler
.movk
<32>(dest
, getHalfword(value
, 1), 16);
2609 void moveWithFixedWidth(TrustedImmPtr imm
, RegisterID dest
)
2611 intptr_t value
= reinterpret_cast<intptr_t>(imm
.m_value
);
2612 m_assembler
.movz
<64>(dest
, getHalfword(value
, 0));
2613 m_assembler
.movk
<64>(dest
, getHalfword(value
, 1), 16);
2614 m_assembler
.movk
<64>(dest
, getHalfword(value
, 2), 32);
2617 void signExtend32ToPtrWithFixedWidth(int32_t value
, RegisterID dest
)
2620 m_assembler
.movz
<32>(dest
, getHalfword(value
, 0));
2621 m_assembler
.movk
<32>(dest
, getHalfword(value
, 1), 16);
2623 m_assembler
.movn
<32>(dest
, ~getHalfword(value
, 0));
2624 m_assembler
.movk
<32>(dest
, getHalfword(value
, 1), 16);
2628 void signExtend32ToPtr(TrustedImm32 imm
, RegisterID dest
)
2630 move(TrustedImmPtr(reinterpret_cast<void*>(static_cast<intptr_t>(imm
.m_value
))), dest
);
2633 template<int datasize
>
2634 ALWAYS_INLINE
void load(const void* address
, RegisterID dest
)
2636 intptr_t currentRegisterContents
;
2637 if (m_cachedMemoryTempRegister
.value(currentRegisterContents
)) {
2638 intptr_t addressAsInt
= reinterpret_cast<intptr_t>(address
);
2639 intptr_t addressDelta
= addressAsInt
- currentRegisterContents
;
2641 if (dest
== memoryTempRegister
)
2642 m_cachedMemoryTempRegister
.invalidate();
2644 if (isInIntRange(addressDelta
)) {
2645 if (ARM64Assembler::canEncodeSImmOffset(addressDelta
)) {
2646 m_assembler
.ldur
<datasize
>(dest
, memoryTempRegister
, addressDelta
);
2650 if (ARM64Assembler::canEncodePImmOffset
<datasize
>(addressDelta
)) {
2651 m_assembler
.ldr
<datasize
>(dest
, memoryTempRegister
, addressDelta
);
2656 if ((addressAsInt
& (~maskHalfWord0
)) == (currentRegisterContents
& (~maskHalfWord0
))) {
2657 m_assembler
.movk
<64>(memoryTempRegister
, addressAsInt
& maskHalfWord0
, 0);
2658 m_cachedMemoryTempRegister
.setValue(reinterpret_cast<intptr_t>(address
));
2659 m_assembler
.ldr
<datasize
>(dest
, memoryTempRegister
, ARM64Registers::zr
);
2664 move(TrustedImmPtr(address
), memoryTempRegister
);
2665 if (dest
== memoryTempRegister
)
2666 m_cachedMemoryTempRegister
.invalidate();
2668 m_cachedMemoryTempRegister
.setValue(reinterpret_cast<intptr_t>(address
));
2669 m_assembler
.ldr
<datasize
>(dest
, memoryTempRegister
, ARM64Registers::zr
);
2672 template<int datasize
>
2673 ALWAYS_INLINE
void store(RegisterID src
, const void* address
)
2675 intptr_t currentRegisterContents
;
2676 if (m_cachedMemoryTempRegister
.value(currentRegisterContents
)) {
2677 intptr_t addressAsInt
= reinterpret_cast<intptr_t>(address
);
2678 intptr_t addressDelta
= addressAsInt
- currentRegisterContents
;
2680 if (isInIntRange(addressDelta
)) {
2681 if (ARM64Assembler::canEncodeSImmOffset(addressDelta
)) {
2682 m_assembler
.stur
<datasize
>(src
, memoryTempRegister
, addressDelta
);
2686 if (ARM64Assembler::canEncodePImmOffset
<datasize
>(addressDelta
)) {
2687 m_assembler
.str
<datasize
>(src
, memoryTempRegister
, addressDelta
);
2692 if ((addressAsInt
& (~maskHalfWord0
)) == (currentRegisterContents
& (~maskHalfWord0
))) {
2693 m_assembler
.movk
<64>(memoryTempRegister
, addressAsInt
& maskHalfWord0
, 0);
2694 m_cachedMemoryTempRegister
.setValue(reinterpret_cast<intptr_t>(address
));
2695 m_assembler
.str
<datasize
>(src
, memoryTempRegister
, ARM64Registers::zr
);
2700 move(TrustedImmPtr(address
), memoryTempRegister
);
2701 m_cachedMemoryTempRegister
.setValue(reinterpret_cast<intptr_t>(address
));
2702 m_assembler
.str
<datasize
>(src
, memoryTempRegister
, ARM64Registers::zr
);
2705 template <int dataSize
>
2706 ALWAYS_INLINE
bool tryMoveUsingCacheRegisterContents(intptr_t immediate
, CachedTempRegister
& dest
)
2708 intptr_t currentRegisterContents
;
2709 if (dest
.value(currentRegisterContents
)) {
2710 if (currentRegisterContents
== immediate
)
2713 LogicalImmediate logicalImm
= dataSize
== 64 ? LogicalImmediate::create64(static_cast<uint64_t>(immediate
)) : LogicalImmediate::create32(static_cast<uint32_t>(immediate
));
2715 if (logicalImm
.isValid()) {
2716 m_assembler
.movi
<dataSize
>(dest
.registerIDNoInvalidate(), logicalImm
);
2717 dest
.setValue(immediate
);
2721 if ((immediate
& maskUpperWord
) == (currentRegisterContents
& maskUpperWord
)) {
2722 if ((immediate
& maskHalfWord1
) != (currentRegisterContents
& maskHalfWord1
))
2723 m_assembler
.movk
<dataSize
>(dest
.registerIDNoInvalidate(), (immediate
& maskHalfWord1
) >> 16, 16);
2725 if ((immediate
& maskHalfWord0
) != (currentRegisterContents
& maskHalfWord0
))
2726 m_assembler
.movk
<dataSize
>(dest
.registerIDNoInvalidate(), immediate
& maskHalfWord0
, 0);
2728 dest
.setValue(immediate
);
2736 void moveToCachedReg(TrustedImm32 imm
, CachedTempRegister
& dest
)
2738 if (tryMoveUsingCacheRegisterContents
<32>(static_cast<intptr_t>(imm
.m_value
), dest
))
2741 moveInternal
<TrustedImm32
, int32_t>(imm
, dest
.registerIDNoInvalidate());
2742 dest
.setValue(imm
.m_value
);
2745 void moveToCachedReg(TrustedImmPtr imm
, CachedTempRegister
& dest
)
2747 if (tryMoveUsingCacheRegisterContents
<64>(imm
.asIntptr(), dest
))
2750 moveInternal
<TrustedImmPtr
, intptr_t>(imm
, dest
.registerIDNoInvalidate());
2751 dest
.setValue(imm
.asIntptr());
2754 void moveToCachedReg(TrustedImm64 imm
, CachedTempRegister
& dest
)
2756 if (tryMoveUsingCacheRegisterContents
<64>(static_cast<intptr_t>(imm
.m_value
), dest
))
2759 moveInternal
<TrustedImm64
, int64_t>(imm
, dest
.registerIDNoInvalidate());
2760 dest
.setValue(imm
.m_value
);
2763 template<int datasize
>
2764 ALWAYS_INLINE
bool tryLoadWithOffset(RegisterID rt
, RegisterID rn
, int32_t offset
)
2766 if (ARM64Assembler::canEncodeSImmOffset(offset
)) {
2767 loadUnscaledImmediate
<datasize
>(rt
, rn
, offset
);
2770 if (ARM64Assembler::canEncodePImmOffset
<datasize
>(offset
)) {
2771 loadUnsignedImmediate
<datasize
>(rt
, rn
, static_cast<unsigned>(offset
));
2777 template<int datasize
>
2778 ALWAYS_INLINE
bool tryLoadWithOffset(FPRegisterID rt
, RegisterID rn
, int32_t offset
)
2780 if (ARM64Assembler::canEncodeSImmOffset(offset
)) {
2781 m_assembler
.ldur
<datasize
>(rt
, rn
, offset
);
2784 if (ARM64Assembler::canEncodePImmOffset
<datasize
>(offset
)) {
2785 m_assembler
.ldr
<datasize
>(rt
, rn
, static_cast<unsigned>(offset
));
2791 template<int datasize
>
2792 ALWAYS_INLINE
bool tryStoreWithOffset(RegisterID rt
, RegisterID rn
, int32_t offset
)
2794 if (ARM64Assembler::canEncodeSImmOffset(offset
)) {
2795 storeUnscaledImmediate
<datasize
>(rt
, rn
, offset
);
2798 if (ARM64Assembler::canEncodePImmOffset
<datasize
>(offset
)) {
2799 storeUnsignedImmediate
<datasize
>(rt
, rn
, static_cast<unsigned>(offset
));
2805 template<int datasize
>
2806 ALWAYS_INLINE
bool tryStoreWithOffset(FPRegisterID rt
, RegisterID rn
, int32_t offset
)
2808 if (ARM64Assembler::canEncodeSImmOffset(offset
)) {
2809 m_assembler
.stur
<datasize
>(rt
, rn
, offset
);
2812 if (ARM64Assembler::canEncodePImmOffset
<datasize
>(offset
)) {
2813 m_assembler
.str
<datasize
>(rt
, rn
, static_cast<unsigned>(offset
));
2819 friend class LinkBuffer
;
2820 friend class RepatchBuffer
;
2822 static void linkCall(void* code
, Call call
, FunctionPtr function
)
2824 if (call
.isFlagSet(Call::Near
))
2825 ARM64Assembler::linkCall(code
, call
.m_label
, function
.value());
2827 ARM64Assembler::linkPointer(code
, call
.m_label
.labelAtOffset(REPATCH_OFFSET_CALL_TO_POINTER
), function
.value());
2830 static void repatchCall(CodeLocationCall call
, CodeLocationLabel destination
)
2832 ARM64Assembler::repatchPointer(call
.dataLabelPtrAtOffset(REPATCH_OFFSET_CALL_TO_POINTER
).dataLocation(), destination
.executableAddress());
2835 static void repatchCall(CodeLocationCall call
, FunctionPtr destination
)
2837 ARM64Assembler::repatchPointer(call
.dataLabelPtrAtOffset(REPATCH_OFFSET_CALL_TO_POINTER
).dataLocation(), destination
.executableAddress());
2840 CachedTempRegister m_dataMemoryTempRegister
;
2841 CachedTempRegister m_cachedMemoryTempRegister
;
2842 bool m_makeJumpPatchable
;
2845 // Extend the {load,store}{Unsigned,Unscaled}Immediate templated general register methods to cover all load/store sizes
2847 ALWAYS_INLINE
void MacroAssemblerARM64::loadUnsignedImmediate
<8>(RegisterID rt
, RegisterID rn
, unsigned pimm
)
2849 m_assembler
.ldrb(rt
, rn
, pimm
);
2853 ALWAYS_INLINE
void MacroAssemblerARM64::loadUnsignedImmediate
<16>(RegisterID rt
, RegisterID rn
, unsigned pimm
)
2855 m_assembler
.ldrh(rt
, rn
, pimm
);
2859 ALWAYS_INLINE
void MacroAssemblerARM64::loadUnscaledImmediate
<8>(RegisterID rt
, RegisterID rn
, int simm
)
2861 m_assembler
.ldurb(rt
, rn
, simm
);
2865 ALWAYS_INLINE
void MacroAssemblerARM64::loadUnscaledImmediate
<16>(RegisterID rt
, RegisterID rn
, int simm
)
2867 m_assembler
.ldurh(rt
, rn
, simm
);
2871 ALWAYS_INLINE
void MacroAssemblerARM64::storeUnsignedImmediate
<8>(RegisterID rt
, RegisterID rn
, unsigned pimm
)
2873 m_assembler
.strb(rt
, rn
, pimm
);
2877 ALWAYS_INLINE
void MacroAssemblerARM64::storeUnsignedImmediate
<16>(RegisterID rt
, RegisterID rn
, unsigned pimm
)
2879 m_assembler
.strh(rt
, rn
, pimm
);
2883 ALWAYS_INLINE
void MacroAssemblerARM64::storeUnscaledImmediate
<8>(RegisterID rt
, RegisterID rn
, int simm
)
2885 m_assembler
.sturb(rt
, rn
, simm
);
2889 ALWAYS_INLINE
void MacroAssemblerARM64::storeUnscaledImmediate
<16>(RegisterID rt
, RegisterID rn
, int simm
)
2891 m_assembler
.sturh(rt
, rn
, simm
);
2896 #endif // ENABLE(ASSEMBLER)
2898 #endif // MacroAssemblerARM64_h