2 * Copyright (C) 2012, 2014 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 #ifndef MacroAssemblerARM64_h
27 #define MacroAssemblerARM64_h
31 #include "ARM64Assembler.h"
32 #include "AbstractMacroAssembler.h"
33 #include <wtf/MathExtras.h>
37 class MacroAssemblerARM64
: public AbstractMacroAssembler
<ARM64Assembler
, MacroAssemblerARM64
> {
38 static const RegisterID dataTempRegister
= ARM64Registers::ip0
;
39 static const RegisterID memoryTempRegister
= ARM64Registers::ip1
;
40 static const ARM64Registers::FPRegisterID fpTempRegister
= ARM64Registers::q31
;
41 static const ARM64Assembler::SetFlags S
= ARM64Assembler::S
;
42 static const intptr_t maskHalfWord0
= 0xffffl
;
43 static const intptr_t maskHalfWord1
= 0xffff0000l
;
44 static const intptr_t maskUpperWord
= 0xffffffff00000000l
;
46 // 4 instructions - 3 to load the function pointer, + blr.
47 static const ptrdiff_t REPATCH_OFFSET_CALL_TO_POINTER
= -16;
51 : m_dataMemoryTempRegister(this, dataTempRegister
)
52 , m_cachedMemoryTempRegister(this, memoryTempRegister
)
53 , m_makeJumpPatchable(false)
57 typedef ARM64Assembler::LinkRecord LinkRecord
;
58 typedef ARM64Assembler::JumpType JumpType
;
59 typedef ARM64Assembler::JumpLinkType JumpLinkType
;
60 typedef ARM64Assembler::Condition Condition
;
62 static const ARM64Assembler::Condition DefaultCondition
= ARM64Assembler::ConditionInvalid
;
63 static const ARM64Assembler::JumpType DefaultJump
= ARM64Assembler::JumpNoConditionFixedSize
;
65 Vector
<LinkRecord
, 0, UnsafeVectorOverflow
>& jumpsToLink() { return m_assembler
.jumpsToLink(); }
66 void* unlinkedCode() { return m_assembler
.unlinkedCode(); }
67 static bool canCompact(JumpType jumpType
) { return ARM64Assembler::canCompact(jumpType
); }
68 static JumpLinkType
computeJumpType(JumpType jumpType
, const uint8_t* from
, const uint8_t* to
) { return ARM64Assembler::computeJumpType(jumpType
, from
, to
); }
69 static JumpLinkType
computeJumpType(LinkRecord
& record
, const uint8_t* from
, const uint8_t* to
) { return ARM64Assembler::computeJumpType(record
, from
, to
); }
70 static int jumpSizeDelta(JumpType jumpType
, JumpLinkType jumpLinkType
) { return ARM64Assembler::jumpSizeDelta(jumpType
, jumpLinkType
); }
71 static void link(LinkRecord
& record
, uint8_t* from
, uint8_t* to
) { return ARM64Assembler::link(record
, from
, to
); }
73 static const Scale ScalePtr
= TimesEight
;
75 static bool isCompactPtrAlignedAddressOffset(ptrdiff_t value
)
77 // This is the largest 32-bit access allowed, aligned to 64-bit boundary.
78 return !(value
& ~0x3ff8);
81 enum RelationalCondition
{
82 Equal
= ARM64Assembler::ConditionEQ
,
83 NotEqual
= ARM64Assembler::ConditionNE
,
84 Above
= ARM64Assembler::ConditionHI
,
85 AboveOrEqual
= ARM64Assembler::ConditionHS
,
86 Below
= ARM64Assembler::ConditionLO
,
87 BelowOrEqual
= ARM64Assembler::ConditionLS
,
88 GreaterThan
= ARM64Assembler::ConditionGT
,
89 GreaterThanOrEqual
= ARM64Assembler::ConditionGE
,
90 LessThan
= ARM64Assembler::ConditionLT
,
91 LessThanOrEqual
= ARM64Assembler::ConditionLE
94 enum ResultCondition
{
95 Overflow
= ARM64Assembler::ConditionVS
,
96 Signed
= ARM64Assembler::ConditionMI
,
97 PositiveOrZero
= ARM64Assembler::ConditionPL
,
98 Zero
= ARM64Assembler::ConditionEQ
,
99 NonZero
= ARM64Assembler::ConditionNE
103 IsZero
= ARM64Assembler::ConditionEQ
,
104 IsNonZero
= ARM64Assembler::ConditionNE
107 enum DoubleCondition
{
108 // These conditions will only evaluate to true if the comparison is ordered - i.e. neither operand is NaN.
109 DoubleEqual
= ARM64Assembler::ConditionEQ
,
110 DoubleNotEqual
= ARM64Assembler::ConditionVC
, // Not the right flag! check for this & handle differently.
111 DoubleGreaterThan
= ARM64Assembler::ConditionGT
,
112 DoubleGreaterThanOrEqual
= ARM64Assembler::ConditionGE
,
113 DoubleLessThan
= ARM64Assembler::ConditionLO
,
114 DoubleLessThanOrEqual
= ARM64Assembler::ConditionLS
,
115 // If either operand is NaN, these conditions always evaluate to true.
116 DoubleEqualOrUnordered
= ARM64Assembler::ConditionVS
, // Not the right flag! check for this & handle differently.
117 DoubleNotEqualOrUnordered
= ARM64Assembler::ConditionNE
,
118 DoubleGreaterThanOrUnordered
= ARM64Assembler::ConditionHI
,
119 DoubleGreaterThanOrEqualOrUnordered
= ARM64Assembler::ConditionHS
,
120 DoubleLessThanOrUnordered
= ARM64Assembler::ConditionLT
,
121 DoubleLessThanOrEqualOrUnordered
= ARM64Assembler::ConditionLE
,
124 static const RegisterID stackPointerRegister
= ARM64Registers::sp
;
125 static const RegisterID framePointerRegister
= ARM64Registers::fp
;
126 static const RegisterID linkRegister
= ARM64Registers::lr
;
128 // FIXME: Get reasonable implementations for these
129 static bool shouldBlindForSpecificArch(uint32_t value
) { return value
>= 0x00ffffff; }
130 static bool shouldBlindForSpecificArch(uint64_t value
) { return value
>= 0x00ffffff; }
132 // Integer operations:
134 void add32(RegisterID src
, RegisterID dest
)
136 m_assembler
.add
<32>(dest
, dest
, src
);
139 void add32(TrustedImm32 imm
, RegisterID dest
)
141 add32(imm
, dest
, dest
);
144 void add32(TrustedImm32 imm
, RegisterID src
, RegisterID dest
)
146 if (isUInt12(imm
.m_value
))
147 m_assembler
.add
<32>(dest
, src
, UInt12(imm
.m_value
));
148 else if (isUInt12(-imm
.m_value
))
149 m_assembler
.sub
<32>(dest
, src
, UInt12(-imm
.m_value
));
151 move(imm
, getCachedDataTempRegisterIDAndInvalidate());
152 m_assembler
.add
<32>(dest
, src
, dataTempRegister
);
156 void add32(TrustedImm32 imm
, Address address
)
158 load32(address
, getCachedDataTempRegisterIDAndInvalidate());
160 if (isUInt12(imm
.m_value
))
161 m_assembler
.add
<32>(dataTempRegister
, dataTempRegister
, UInt12(imm
.m_value
));
162 else if (isUInt12(-imm
.m_value
))
163 m_assembler
.sub
<32>(dataTempRegister
, dataTempRegister
, UInt12(-imm
.m_value
));
165 move(imm
, getCachedMemoryTempRegisterIDAndInvalidate());
166 m_assembler
.add
<32>(dataTempRegister
, dataTempRegister
, memoryTempRegister
);
169 store32(dataTempRegister
, address
);
172 void add32(TrustedImm32 imm
, AbsoluteAddress address
)
174 load32(address
.m_ptr
, getCachedDataTempRegisterIDAndInvalidate());
176 if (isUInt12(imm
.m_value
)) {
177 m_assembler
.add
<32>(dataTempRegister
, dataTempRegister
, UInt12(imm
.m_value
));
178 store32(dataTempRegister
, address
.m_ptr
);
182 if (isUInt12(-imm
.m_value
)) {
183 m_assembler
.sub
<32>(dataTempRegister
, dataTempRegister
, UInt12(-imm
.m_value
));
184 store32(dataTempRegister
, address
.m_ptr
);
188 move(imm
, getCachedMemoryTempRegisterIDAndInvalidate());
189 m_assembler
.add
<32>(dataTempRegister
, dataTempRegister
, memoryTempRegister
);
190 store32(dataTempRegister
, address
.m_ptr
);
193 void add32(Address src
, RegisterID dest
)
195 load32(src
, getCachedDataTempRegisterIDAndInvalidate());
196 add32(dataTempRegister
, dest
);
199 void add64(RegisterID src
, RegisterID dest
)
201 if (src
== ARM64Registers::sp
)
202 m_assembler
.add
<64>(dest
, src
, dest
);
204 m_assembler
.add
<64>(dest
, dest
, src
);
207 void add64(TrustedImm32 imm
, RegisterID dest
)
209 if (isUInt12(imm
.m_value
)) {
210 m_assembler
.add
<64>(dest
, dest
, UInt12(imm
.m_value
));
213 if (isUInt12(-imm
.m_value
)) {
214 m_assembler
.sub
<64>(dest
, dest
, UInt12(-imm
.m_value
));
218 signExtend32ToPtr(imm
, getCachedDataTempRegisterIDAndInvalidate());
219 m_assembler
.add
<64>(dest
, dest
, dataTempRegister
);
222 void add64(TrustedImm64 imm
, RegisterID dest
)
224 intptr_t immediate
= imm
.m_value
;
226 if (isUInt12(immediate
)) {
227 m_assembler
.add
<64>(dest
, dest
, UInt12(static_cast<int32_t>(immediate
)));
230 if (isUInt12(-immediate
)) {
231 m_assembler
.sub
<64>(dest
, dest
, UInt12(static_cast<int32_t>(-immediate
)));
235 move(imm
, getCachedDataTempRegisterIDAndInvalidate());
236 m_assembler
.add
<64>(dest
, dest
, dataTempRegister
);
239 void add64(TrustedImm32 imm
, RegisterID src
, RegisterID dest
)
241 if (isUInt12(imm
.m_value
)) {
242 m_assembler
.add
<64>(dest
, src
, UInt12(imm
.m_value
));
245 if (isUInt12(-imm
.m_value
)) {
246 m_assembler
.sub
<64>(dest
, src
, UInt12(-imm
.m_value
));
250 signExtend32ToPtr(imm
, getCachedDataTempRegisterIDAndInvalidate());
251 m_assembler
.add
<64>(dest
, src
, dataTempRegister
);
254 void add64(TrustedImm32 imm
, Address address
)
256 load64(address
, getCachedDataTempRegisterIDAndInvalidate());
258 if (isUInt12(imm
.m_value
))
259 m_assembler
.add
<64>(dataTempRegister
, dataTempRegister
, UInt12(imm
.m_value
));
260 else if (isUInt12(-imm
.m_value
))
261 m_assembler
.sub
<64>(dataTempRegister
, dataTempRegister
, UInt12(-imm
.m_value
));
263 signExtend32ToPtr(imm
, getCachedMemoryTempRegisterIDAndInvalidate());
264 m_assembler
.add
<64>(dataTempRegister
, dataTempRegister
, memoryTempRegister
);
267 store64(dataTempRegister
, address
);
270 void add64(TrustedImm32 imm
, AbsoluteAddress address
)
272 load64(address
.m_ptr
, getCachedDataTempRegisterIDAndInvalidate());
274 if (isUInt12(imm
.m_value
)) {
275 m_assembler
.add
<64>(dataTempRegister
, dataTempRegister
, UInt12(imm
.m_value
));
276 store64(dataTempRegister
, address
.m_ptr
);
280 if (isUInt12(-imm
.m_value
)) {
281 m_assembler
.sub
<64>(dataTempRegister
, dataTempRegister
, UInt12(-imm
.m_value
));
282 store64(dataTempRegister
, address
.m_ptr
);
286 signExtend32ToPtr(imm
, getCachedMemoryTempRegisterIDAndInvalidate());
287 m_assembler
.add
<64>(dataTempRegister
, dataTempRegister
, memoryTempRegister
);
288 store64(dataTempRegister
, address
.m_ptr
);
291 void addPtrNoFlags(TrustedImm32 imm
, RegisterID srcDest
)
296 void add64(Address src
, RegisterID dest
)
298 load64(src
, getCachedDataTempRegisterIDAndInvalidate());
299 m_assembler
.add
<64>(dest
, dest
, dataTempRegister
);
302 void add64(AbsoluteAddress src
, RegisterID dest
)
304 load64(src
.m_ptr
, getCachedDataTempRegisterIDAndInvalidate());
305 m_assembler
.add
<64>(dest
, dest
, dataTempRegister
);
308 void and32(RegisterID src
, RegisterID dest
)
310 and32(dest
, src
, dest
);
313 void and32(RegisterID op1
, RegisterID op2
, RegisterID dest
)
315 m_assembler
.and_
<32>(dest
, op1
, op2
);
318 void and32(TrustedImm32 imm
, RegisterID dest
)
320 and32(imm
, dest
, dest
);
323 void and32(TrustedImm32 imm
, RegisterID src
, RegisterID dest
)
325 LogicalImmediate logicalImm
= LogicalImmediate::create32(imm
.m_value
);
327 if (logicalImm
.isValid()) {
328 m_assembler
.and_
<32>(dest
, src
, logicalImm
);
332 move(imm
, getCachedDataTempRegisterIDAndInvalidate());
333 m_assembler
.and_
<32>(dest
, src
, dataTempRegister
);
336 void and32(Address src
, RegisterID dest
)
338 load32(src
, dataTempRegister
);
339 and32(dataTempRegister
, dest
);
342 void and64(RegisterID src
, RegisterID dest
)
344 m_assembler
.and_
<64>(dest
, dest
, src
);
347 void and64(TrustedImm32 imm
, RegisterID dest
)
349 LogicalImmediate logicalImm
= LogicalImmediate::create64(static_cast<intptr_t>(static_cast<int64_t>(imm
.m_value
)));
351 if (logicalImm
.isValid()) {
352 m_assembler
.and_
<64>(dest
, dest
, logicalImm
);
356 signExtend32ToPtr(imm
, getCachedDataTempRegisterIDAndInvalidate());
357 m_assembler
.and_
<64>(dest
, dest
, dataTempRegister
);
360 void and64(TrustedImmPtr imm
, RegisterID dest
)
362 LogicalImmediate logicalImm
= LogicalImmediate::create64(reinterpret_cast<uint64_t>(imm
.m_value
));
364 if (logicalImm
.isValid()) {
365 m_assembler
.and_
<64>(dest
, dest
, logicalImm
);
369 move(imm
, getCachedDataTempRegisterIDAndInvalidate());
370 m_assembler
.and_
<64>(dest
, dest
, dataTempRegister
);
373 void countLeadingZeros32(RegisterID src
, RegisterID dest
)
375 m_assembler
.clz
<32>(dest
, src
);
378 void lshift32(RegisterID src
, RegisterID shiftAmount
, RegisterID dest
)
380 m_assembler
.lsl
<32>(dest
, src
, shiftAmount
);
383 void lshift32(RegisterID src
, TrustedImm32 imm
, RegisterID dest
)
385 m_assembler
.lsl
<32>(dest
, src
, imm
.m_value
& 0x1f);
388 void lshift32(RegisterID shiftAmount
, RegisterID dest
)
390 lshift32(dest
, shiftAmount
, dest
);
393 void lshift32(TrustedImm32 imm
, RegisterID dest
)
395 lshift32(dest
, imm
, dest
);
398 void lshift64(RegisterID src
, RegisterID shiftAmount
, RegisterID dest
)
400 m_assembler
.lsl
<64>(dest
, src
, shiftAmount
);
403 void lshift64(RegisterID src
, TrustedImm32 imm
, RegisterID dest
)
405 m_assembler
.lsl
<64>(dest
, src
, imm
.m_value
& 0x3f);
408 void lshift64(RegisterID shiftAmount
, RegisterID dest
)
410 lshift64(dest
, shiftAmount
, dest
);
413 void lshift64(TrustedImm32 imm
, RegisterID dest
)
415 lshift64(dest
, imm
, dest
);
418 void mul32(RegisterID src
, RegisterID dest
)
420 m_assembler
.mul
<32>(dest
, dest
, src
);
423 void mul64(RegisterID src
, RegisterID dest
)
425 m_assembler
.mul
<64>(dest
, dest
, src
);
428 void mul32(TrustedImm32 imm
, RegisterID src
, RegisterID dest
)
430 move(imm
, getCachedDataTempRegisterIDAndInvalidate());
431 m_assembler
.mul
<32>(dest
, src
, dataTempRegister
);
434 void neg32(RegisterID dest
)
436 m_assembler
.neg
<32>(dest
, dest
);
439 void neg64(RegisterID dest
)
441 m_assembler
.neg
<64>(dest
, dest
);
444 void or32(RegisterID src
, RegisterID dest
)
446 or32(dest
, src
, dest
);
449 void or32(RegisterID op1
, RegisterID op2
, RegisterID dest
)
451 m_assembler
.orr
<32>(dest
, op1
, op2
);
454 void or32(TrustedImm32 imm
, RegisterID dest
)
456 or32(imm
, dest
, dest
);
459 void or32(TrustedImm32 imm
, RegisterID src
, RegisterID dest
)
461 LogicalImmediate logicalImm
= LogicalImmediate::create32(imm
.m_value
);
463 if (logicalImm
.isValid()) {
464 m_assembler
.orr
<32>(dest
, src
, logicalImm
);
468 move(imm
, getCachedDataTempRegisterIDAndInvalidate());
469 m_assembler
.orr
<32>(dest
, src
, dataTempRegister
);
472 void or32(RegisterID src
, AbsoluteAddress address
)
474 load32(address
.m_ptr
, getCachedDataTempRegisterIDAndInvalidate());
475 m_assembler
.orr
<32>(dataTempRegister
, dataTempRegister
, src
);
476 store32(dataTempRegister
, address
.m_ptr
);
479 void or32(TrustedImm32 imm
, Address address
)
481 load32(address
, getCachedDataTempRegisterIDAndInvalidate());
482 or32(imm
, dataTempRegister
, dataTempRegister
);
483 store32(dataTempRegister
, address
);
486 void or64(RegisterID src
, RegisterID dest
)
488 or64(dest
, src
, dest
);
491 void or64(RegisterID op1
, RegisterID op2
, RegisterID dest
)
493 m_assembler
.orr
<64>(dest
, op1
, op2
);
496 void or64(TrustedImm32 imm
, RegisterID dest
)
498 or64(imm
, dest
, dest
);
501 void or64(TrustedImm32 imm
, RegisterID src
, RegisterID dest
)
503 LogicalImmediate logicalImm
= LogicalImmediate::create64(static_cast<intptr_t>(static_cast<int64_t>(imm
.m_value
)));
505 if (logicalImm
.isValid()) {
506 m_assembler
.orr
<64>(dest
, src
, logicalImm
);
510 signExtend32ToPtr(imm
, getCachedDataTempRegisterIDAndInvalidate());
511 m_assembler
.orr
<64>(dest
, src
, dataTempRegister
);
514 void or64(TrustedImm64 imm
, RegisterID dest
)
516 LogicalImmediate logicalImm
= LogicalImmediate::create64(static_cast<intptr_t>(static_cast<int64_t>(imm
.m_value
)));
518 if (logicalImm
.isValid()) {
519 m_assembler
.orr
<64>(dest
, dest
, logicalImm
);
523 move(imm
, getCachedDataTempRegisterIDAndInvalidate());
524 m_assembler
.orr
<64>(dest
, dest
, dataTempRegister
);
527 void rotateRight64(TrustedImm32 imm
, RegisterID srcDst
)
529 m_assembler
.ror
<64>(srcDst
, srcDst
, imm
.m_value
& 63);
532 void rshift32(RegisterID src
, RegisterID shiftAmount
, RegisterID dest
)
534 m_assembler
.asr
<32>(dest
, src
, shiftAmount
);
537 void rshift32(RegisterID src
, TrustedImm32 imm
, RegisterID dest
)
539 m_assembler
.asr
<32>(dest
, src
, imm
.m_value
& 0x1f);
542 void rshift32(RegisterID shiftAmount
, RegisterID dest
)
544 rshift32(dest
, shiftAmount
, dest
);
547 void rshift32(TrustedImm32 imm
, RegisterID dest
)
549 rshift32(dest
, imm
, dest
);
552 void rshift64(RegisterID src
, RegisterID shiftAmount
, RegisterID dest
)
554 m_assembler
.asr
<64>(dest
, src
, shiftAmount
);
557 void rshift64(RegisterID src
, TrustedImm32 imm
, RegisterID dest
)
559 m_assembler
.asr
<64>(dest
, src
, imm
.m_value
& 0x3f);
562 void rshift64(RegisterID shiftAmount
, RegisterID dest
)
564 rshift64(dest
, shiftAmount
, dest
);
567 void rshift64(TrustedImm32 imm
, RegisterID dest
)
569 rshift64(dest
, imm
, dest
);
572 void sub32(RegisterID src
, RegisterID dest
)
574 m_assembler
.sub
<32>(dest
, dest
, src
);
577 void sub32(TrustedImm32 imm
, RegisterID dest
)
579 if (isUInt12(imm
.m_value
)) {
580 m_assembler
.sub
<32>(dest
, dest
, UInt12(imm
.m_value
));
583 if (isUInt12(-imm
.m_value
)) {
584 m_assembler
.add
<32>(dest
, dest
, UInt12(-imm
.m_value
));
588 move(imm
, getCachedDataTempRegisterIDAndInvalidate());
589 m_assembler
.sub
<32>(dest
, dest
, dataTempRegister
);
592 void sub32(TrustedImm32 imm
, Address address
)
594 load32(address
, getCachedDataTempRegisterIDAndInvalidate());
596 if (isUInt12(imm
.m_value
))
597 m_assembler
.sub
<32>(dataTempRegister
, dataTempRegister
, UInt12(imm
.m_value
));
598 else if (isUInt12(-imm
.m_value
))
599 m_assembler
.add
<32>(dataTempRegister
, dataTempRegister
, UInt12(-imm
.m_value
));
601 move(imm
, getCachedMemoryTempRegisterIDAndInvalidate());
602 m_assembler
.sub
<32>(dataTempRegister
, dataTempRegister
, memoryTempRegister
);
605 store32(dataTempRegister
, address
);
608 void sub32(TrustedImm32 imm
, AbsoluteAddress address
)
610 load32(address
.m_ptr
, getCachedDataTempRegisterIDAndInvalidate());
612 if (isUInt12(imm
.m_value
)) {
613 m_assembler
.sub
<32>(dataTempRegister
, dataTempRegister
, UInt12(imm
.m_value
));
614 store32(dataTempRegister
, address
.m_ptr
);
618 if (isUInt12(-imm
.m_value
)) {
619 m_assembler
.add
<32>(dataTempRegister
, dataTempRegister
, UInt12(-imm
.m_value
));
620 store32(dataTempRegister
, address
.m_ptr
);
624 move(imm
, getCachedMemoryTempRegisterIDAndInvalidate());
625 m_assembler
.sub
<32>(dataTempRegister
, dataTempRegister
, memoryTempRegister
);
626 store32(dataTempRegister
, address
.m_ptr
);
629 void sub32(Address src
, RegisterID dest
)
631 load32(src
, getCachedDataTempRegisterIDAndInvalidate());
632 sub32(dataTempRegister
, dest
);
635 void sub64(RegisterID src
, RegisterID dest
)
637 m_assembler
.sub
<64>(dest
, dest
, src
);
640 void sub64(TrustedImm32 imm
, RegisterID dest
)
642 if (isUInt12(imm
.m_value
)) {
643 m_assembler
.sub
<64>(dest
, dest
, UInt12(imm
.m_value
));
646 if (isUInt12(-imm
.m_value
)) {
647 m_assembler
.add
<64>(dest
, dest
, UInt12(-imm
.m_value
));
651 signExtend32ToPtr(imm
, getCachedDataTempRegisterIDAndInvalidate());
652 m_assembler
.sub
<64>(dest
, dest
, dataTempRegister
);
655 void sub64(TrustedImm64 imm
, RegisterID dest
)
657 intptr_t immediate
= imm
.m_value
;
659 if (isUInt12(immediate
)) {
660 m_assembler
.sub
<64>(dest
, dest
, UInt12(static_cast<int32_t>(immediate
)));
663 if (isUInt12(-immediate
)) {
664 m_assembler
.add
<64>(dest
, dest
, UInt12(static_cast<int32_t>(-immediate
)));
668 move(imm
, getCachedDataTempRegisterIDAndInvalidate());
669 m_assembler
.sub
<64>(dest
, dest
, dataTempRegister
);
672 void urshift32(RegisterID src
, RegisterID shiftAmount
, RegisterID dest
)
674 m_assembler
.lsr
<32>(dest
, src
, shiftAmount
);
677 void urshift32(RegisterID src
, TrustedImm32 imm
, RegisterID dest
)
679 m_assembler
.lsr
<32>(dest
, src
, imm
.m_value
& 0x1f);
682 void urshift32(RegisterID shiftAmount
, RegisterID dest
)
684 urshift32(dest
, shiftAmount
, dest
);
687 void urshift32(TrustedImm32 imm
, RegisterID dest
)
689 urshift32(dest
, imm
, dest
);
692 void urshift64(RegisterID src
, RegisterID shiftAmount
, RegisterID dest
)
694 m_assembler
.lsr
<64>(dest
, src
, shiftAmount
);
697 void urshift64(RegisterID src
, TrustedImm32 imm
, RegisterID dest
)
699 m_assembler
.lsr
<64>(dest
, src
, imm
.m_value
& 0x1f);
702 void urshift64(RegisterID shiftAmount
, RegisterID dest
)
704 urshift64(dest
, shiftAmount
, dest
);
707 void urshift64(TrustedImm32 imm
, RegisterID dest
)
709 urshift64(dest
, imm
, dest
);
712 void xor32(RegisterID src
, RegisterID dest
)
714 xor32(dest
, src
, dest
);
717 void xor32(RegisterID op1
, RegisterID op2
, RegisterID dest
)
719 m_assembler
.eor
<32>(dest
, op1
, op2
);
722 void xor32(TrustedImm32 imm
, RegisterID dest
)
724 xor32(imm
, dest
, dest
);
727 void xor32(TrustedImm32 imm
, RegisterID src
, RegisterID dest
)
729 if (imm
.m_value
== -1)
730 m_assembler
.mvn
<32>(dest
, src
);
732 LogicalImmediate logicalImm
= LogicalImmediate::create32(imm
.m_value
);
734 if (logicalImm
.isValid()) {
735 m_assembler
.eor
<32>(dest
, src
, logicalImm
);
739 move(imm
, getCachedDataTempRegisterIDAndInvalidate());
740 m_assembler
.eor
<32>(dest
, src
, dataTempRegister
);
744 void xor64(RegisterID src
, Address address
)
746 load64(address
, getCachedDataTempRegisterIDAndInvalidate());
747 m_assembler
.eor
<64>(dataTempRegister
, dataTempRegister
, src
);
748 store64(dataTempRegister
, address
);
751 void xor64(RegisterID src
, RegisterID dest
)
753 xor64(dest
, src
, dest
);
756 void xor64(RegisterID op1
, RegisterID op2
, RegisterID dest
)
758 m_assembler
.eor
<64>(dest
, op1
, op2
);
761 void xor64(TrustedImm32 imm
, RegisterID dest
)
763 xor64(imm
, dest
, dest
);
766 void xor64(TrustedImm32 imm
, RegisterID src
, RegisterID dest
)
768 if (imm
.m_value
== -1)
769 m_assembler
.mvn
<64>(dest
, src
);
771 LogicalImmediate logicalImm
= LogicalImmediate::create64(static_cast<intptr_t>(static_cast<int64_t>(imm
.m_value
)));
773 if (logicalImm
.isValid()) {
774 m_assembler
.eor
<64>(dest
, src
, logicalImm
);
778 signExtend32ToPtr(imm
, getCachedDataTempRegisterIDAndInvalidate());
779 m_assembler
.eor
<64>(dest
, src
, dataTempRegister
);
784 // Memory access operations:
786 void load64(ImplicitAddress address
, RegisterID dest
)
788 if (tryLoadWithOffset
<64>(dest
, address
.base
, address
.offset
))
791 signExtend32ToPtr(TrustedImm32(address
.offset
), getCachedMemoryTempRegisterIDAndInvalidate());
792 m_assembler
.ldr
<64>(dest
, address
.base
, memoryTempRegister
);
795 void load64(BaseIndex address
, RegisterID dest
)
797 if (!address
.offset
&& (!address
.scale
|| address
.scale
== 3)) {
798 m_assembler
.ldr
<64>(dest
, address
.base
, address
.index
, ARM64Assembler::UXTX
, address
.scale
);
802 signExtend32ToPtr(TrustedImm32(address
.offset
), getCachedMemoryTempRegisterIDAndInvalidate());
803 m_assembler
.add
<64>(memoryTempRegister
, memoryTempRegister
, address
.index
, ARM64Assembler::UXTX
, address
.scale
);
804 m_assembler
.ldr
<64>(dest
, address
.base
, memoryTempRegister
);
807 void load64(const void* address
, RegisterID dest
)
809 load
<64>(address
, dest
);
812 DataLabel32
load64WithAddressOffsetPatch(Address address
, RegisterID dest
)
814 DataLabel32
label(this);
815 signExtend32ToPtrWithFixedWidth(address
.offset
, getCachedMemoryTempRegisterIDAndInvalidate());
816 m_assembler
.ldr
<64>(dest
, address
.base
, memoryTempRegister
, ARM64Assembler::SXTW
, 0);
820 DataLabelCompact
load64WithCompactAddressOffsetPatch(Address address
, RegisterID dest
)
822 ASSERT(isCompactPtrAlignedAddressOffset(address
.offset
));
823 DataLabelCompact
label(this);
824 m_assembler
.ldr
<64>(dest
, address
.base
, address
.offset
);
828 void abortWithReason(AbortReason reason
)
830 move(TrustedImm32(reason
), dataTempRegister
);
834 void abortWithReason(AbortReason reason
, intptr_t misc
)
836 move(TrustedImm64(misc
), memoryTempRegister
);
837 abortWithReason(reason
);
840 ConvertibleLoadLabel
convertibleLoadPtr(Address address
, RegisterID dest
)
842 ConvertibleLoadLabel
result(this);
843 ASSERT(!(address
.offset
& ~0xff8));
844 m_assembler
.ldr
<64>(dest
, address
.base
, address
.offset
);
848 void load32(ImplicitAddress address
, RegisterID dest
)
850 if (tryLoadWithOffset
<32>(dest
, address
.base
, address
.offset
))
853 signExtend32ToPtr(TrustedImm32(address
.offset
), getCachedMemoryTempRegisterIDAndInvalidate());
854 m_assembler
.ldr
<32>(dest
, address
.base
, memoryTempRegister
);
857 void load32(BaseIndex address
, RegisterID dest
)
859 if (!address
.offset
&& (!address
.scale
|| address
.scale
== 2)) {
860 m_assembler
.ldr
<32>(dest
, address
.base
, address
.index
, ARM64Assembler::UXTX
, address
.scale
);
864 signExtend32ToPtr(TrustedImm32(address
.offset
), getCachedMemoryTempRegisterIDAndInvalidate());
865 m_assembler
.add
<64>(memoryTempRegister
, memoryTempRegister
, address
.index
, ARM64Assembler::UXTX
, address
.scale
);
866 m_assembler
.ldr
<32>(dest
, address
.base
, memoryTempRegister
);
869 void load32(const void* address
, RegisterID dest
)
871 load
<32>(address
, dest
);
874 DataLabel32
load32WithAddressOffsetPatch(Address address
, RegisterID dest
)
876 DataLabel32
label(this);
877 signExtend32ToPtrWithFixedWidth(address
.offset
, getCachedMemoryTempRegisterIDAndInvalidate());
878 m_assembler
.ldr
<32>(dest
, address
.base
, memoryTempRegister
, ARM64Assembler::SXTW
, 0);
882 DataLabelCompact
load32WithCompactAddressOffsetPatch(Address address
, RegisterID dest
)
884 ASSERT(isCompactPtrAlignedAddressOffset(address
.offset
));
885 DataLabelCompact
label(this);
886 m_assembler
.ldr
<32>(dest
, address
.base
, address
.offset
);
890 void load32WithUnalignedHalfWords(BaseIndex address
, RegisterID dest
)
892 load32(address
, dest
);
895 void load16(ImplicitAddress address
, RegisterID dest
)
897 if (tryLoadWithOffset
<16>(dest
, address
.base
, address
.offset
))
900 signExtend32ToPtr(TrustedImm32(address
.offset
), getCachedMemoryTempRegisterIDAndInvalidate());
901 m_assembler
.ldrh(dest
, address
.base
, memoryTempRegister
);
904 void load16(BaseIndex address
, RegisterID dest
)
906 if (!address
.offset
&& (!address
.scale
|| address
.scale
== 1)) {
907 m_assembler
.ldrh(dest
, address
.base
, address
.index
, ARM64Assembler::UXTX
, address
.scale
);
911 signExtend32ToPtr(TrustedImm32(address
.offset
), getCachedMemoryTempRegisterIDAndInvalidate());
912 m_assembler
.add
<64>(memoryTempRegister
, memoryTempRegister
, address
.index
, ARM64Assembler::UXTX
, address
.scale
);
913 m_assembler
.ldrh(dest
, address
.base
, memoryTempRegister
);
916 void load16Unaligned(BaseIndex address
, RegisterID dest
)
918 load16(address
, dest
);
921 void load16SignedExtendTo32(BaseIndex address
, RegisterID dest
)
923 if (!address
.offset
&& (!address
.scale
|| address
.scale
== 1)) {
924 m_assembler
.ldrsh
<32>(dest
, address
.base
, address
.index
, ARM64Assembler::UXTX
, address
.scale
);
928 signExtend32ToPtr(TrustedImm32(address
.offset
), getCachedMemoryTempRegisterIDAndInvalidate());
929 m_assembler
.add
<64>(memoryTempRegister
, memoryTempRegister
, address
.index
, ARM64Assembler::UXTX
, address
.scale
);
930 m_assembler
.ldrsh
<32>(dest
, address
.base
, memoryTempRegister
);
933 void load8(ImplicitAddress address
, RegisterID dest
)
935 if (tryLoadWithOffset
<8>(dest
, address
.base
, address
.offset
))
938 signExtend32ToPtr(TrustedImm32(address
.offset
), getCachedMemoryTempRegisterIDAndInvalidate());
939 m_assembler
.ldrb(dest
, address
.base
, memoryTempRegister
);
942 void load8(BaseIndex address
, RegisterID dest
)
944 if (!address
.offset
&& !address
.scale
) {
945 m_assembler
.ldrb(dest
, address
.base
, address
.index
, ARM64Assembler::UXTX
, address
.scale
);
949 signExtend32ToPtr(TrustedImm32(address
.offset
), getCachedMemoryTempRegisterIDAndInvalidate());
950 m_assembler
.add
<64>(memoryTempRegister
, memoryTempRegister
, address
.index
, ARM64Assembler::UXTX
, address
.scale
);
951 m_assembler
.ldrb(dest
, address
.base
, memoryTempRegister
);
954 void load8(const void* address
, RegisterID dest
)
956 moveToCachedReg(TrustedImmPtr(address
), m_cachedMemoryTempRegister
);
957 m_assembler
.ldrb(dest
, memoryTempRegister
, ARM64Registers::zr
);
958 if (dest
== memoryTempRegister
)
959 m_cachedMemoryTempRegister
.invalidate();
962 void load8SignedExtendTo32(BaseIndex address
, RegisterID dest
)
964 if (!address
.offset
&& !address
.scale
) {
965 m_assembler
.ldrsb
<32>(dest
, address
.base
, address
.index
, ARM64Assembler::UXTX
, address
.scale
);
969 signExtend32ToPtr(TrustedImm32(address
.offset
), getCachedMemoryTempRegisterIDAndInvalidate());
970 m_assembler
.add
<64>(memoryTempRegister
, memoryTempRegister
, address
.index
, ARM64Assembler::UXTX
, address
.scale
);
971 m_assembler
.ldrsb
<32>(dest
, address
.base
, memoryTempRegister
);
974 void store64(RegisterID src
, ImplicitAddress address
)
976 if (tryStoreWithOffset
<64>(src
, address
.base
, address
.offset
))
979 signExtend32ToPtr(TrustedImm32(address
.offset
), getCachedMemoryTempRegisterIDAndInvalidate());
980 m_assembler
.str
<64>(src
, address
.base
, memoryTempRegister
);
983 void store64(RegisterID src
, BaseIndex address
)
985 if (!address
.offset
&& (!address
.scale
|| address
.scale
== 3)) {
986 m_assembler
.str
<64>(src
, address
.base
, address
.index
, ARM64Assembler::UXTX
, address
.scale
);
990 signExtend32ToPtr(TrustedImm32(address
.offset
), getCachedMemoryTempRegisterIDAndInvalidate());
991 m_assembler
.add
<64>(memoryTempRegister
, memoryTempRegister
, address
.index
, ARM64Assembler::UXTX
, address
.scale
);
992 m_assembler
.str
<64>(src
, address
.base
, memoryTempRegister
);
995 void store64(RegisterID src
, const void* address
)
997 store
<64>(src
, address
);
1000 void store64(TrustedImm64 imm
, ImplicitAddress address
)
1003 store64(ARM64Registers::zr
, address
);
1007 moveToCachedReg(imm
, m_dataMemoryTempRegister
);
1008 store64(dataTempRegister
, address
);
1011 void store64(TrustedImm64 imm
, BaseIndex address
)
1014 store64(ARM64Registers::zr
, address
);
1018 moveToCachedReg(imm
, m_dataMemoryTempRegister
);
1019 store64(dataTempRegister
, address
);
1022 DataLabel32
store64WithAddressOffsetPatch(RegisterID src
, Address address
)
1024 DataLabel32
label(this);
1025 signExtend32ToPtrWithFixedWidth(address
.offset
, getCachedMemoryTempRegisterIDAndInvalidate());
1026 m_assembler
.str
<64>(src
, address
.base
, memoryTempRegister
, ARM64Assembler::SXTW
, 0);
1030 void store32(RegisterID src
, ImplicitAddress address
)
1032 if (tryStoreWithOffset
<32>(src
, address
.base
, address
.offset
))
1035 signExtend32ToPtr(TrustedImm32(address
.offset
), getCachedMemoryTempRegisterIDAndInvalidate());
1036 m_assembler
.str
<32>(src
, address
.base
, memoryTempRegister
);
1039 void store32(RegisterID src
, BaseIndex address
)
1041 if (!address
.offset
&& (!address
.scale
|| address
.scale
== 2)) {
1042 m_assembler
.str
<32>(src
, address
.base
, address
.index
, ARM64Assembler::UXTX
, address
.scale
);
1046 signExtend32ToPtr(TrustedImm32(address
.offset
), getCachedMemoryTempRegisterIDAndInvalidate());
1047 m_assembler
.add
<64>(memoryTempRegister
, memoryTempRegister
, address
.index
, ARM64Assembler::UXTX
, address
.scale
);
1048 m_assembler
.str
<32>(src
, address
.base
, memoryTempRegister
);
1051 void store32(RegisterID src
, const void* address
)
1053 store
<32>(src
, address
);
1056 void store32(TrustedImm32 imm
, ImplicitAddress address
)
1059 store32(ARM64Registers::zr
, address
);
1063 moveToCachedReg(imm
, m_dataMemoryTempRegister
);
1064 store32(dataTempRegister
, address
);
1067 void store32(TrustedImm32 imm
, BaseIndex address
)
1070 store32(ARM64Registers::zr
, address
);
1074 moveToCachedReg(imm
, m_dataMemoryTempRegister
);
1075 store32(dataTempRegister
, address
);
1078 void store32(TrustedImm32 imm
, const void* address
)
1081 store32(ARM64Registers::zr
, address
);
1085 moveToCachedReg(imm
, m_dataMemoryTempRegister
);
1086 store32(dataTempRegister
, address
);
1089 DataLabel32
store32WithAddressOffsetPatch(RegisterID src
, Address address
)
1091 DataLabel32
label(this);
1092 signExtend32ToPtrWithFixedWidth(address
.offset
, getCachedMemoryTempRegisterIDAndInvalidate());
1093 m_assembler
.str
<32>(src
, address
.base
, memoryTempRegister
, ARM64Assembler::SXTW
, 0);
1097 void store16(RegisterID src
, BaseIndex address
)
1099 if (!address
.offset
&& (!address
.scale
|| address
.scale
== 1)) {
1100 m_assembler
.strh(src
, address
.base
, address
.index
, ARM64Assembler::UXTX
, address
.scale
);
1104 signExtend32ToPtr(TrustedImm32(address
.offset
), getCachedMemoryTempRegisterIDAndInvalidate());
1105 m_assembler
.add
<64>(memoryTempRegister
, memoryTempRegister
, address
.index
, ARM64Assembler::UXTX
, address
.scale
);
1106 m_assembler
.strh(src
, address
.base
, memoryTempRegister
);
1109 void store8(RegisterID src
, BaseIndex address
)
1111 if (!address
.offset
&& !address
.scale
) {
1112 m_assembler
.strb(src
, address
.base
, address
.index
, ARM64Assembler::UXTX
, address
.scale
);
1116 signExtend32ToPtr(TrustedImm32(address
.offset
), getCachedMemoryTempRegisterIDAndInvalidate());
1117 m_assembler
.add
<64>(memoryTempRegister
, memoryTempRegister
, address
.index
, ARM64Assembler::UXTX
, address
.scale
);
1118 m_assembler
.strb(src
, address
.base
, memoryTempRegister
);
1121 void store8(RegisterID src
, void* address
)
1123 move(TrustedImmPtr(address
), getCachedMemoryTempRegisterIDAndInvalidate());
1124 m_assembler
.strb(src
, memoryTempRegister
, 0);
1127 void store8(RegisterID src
, ImplicitAddress address
)
1129 if (tryStoreWithOffset
<8>(src
, address
.base
, address
.offset
))
1132 signExtend32ToPtr(TrustedImm32(address
.offset
), getCachedMemoryTempRegisterIDAndInvalidate());
1133 m_assembler
.str
<8>(src
, address
.base
, memoryTempRegister
);
1136 void store8(TrustedImm32 imm
, void* address
)
1139 store8(ARM64Registers::zr
, address
);
1143 move(imm
, getCachedDataTempRegisterIDAndInvalidate());
1144 store8(dataTempRegister
, address
);
1147 void store8(TrustedImm32 imm
, ImplicitAddress address
)
1150 store8(ARM64Registers::zr
, address
);
1154 move(imm
, getCachedDataTempRegisterIDAndInvalidate());
1155 store8(dataTempRegister
, address
);
1158 // Floating-point operations:
1160 static bool supportsFloatingPoint() { return true; }
1161 static bool supportsFloatingPointTruncate() { return true; }
1162 static bool supportsFloatingPointSqrt() { return true; }
1163 static bool supportsFloatingPointAbs() { return true; }
1165 enum BranchTruncateType
{ BranchIfTruncateFailed
, BranchIfTruncateSuccessful
};
1167 void absDouble(FPRegisterID src
, FPRegisterID dest
)
1169 m_assembler
.fabs
<64>(dest
, src
);
1172 void addDouble(FPRegisterID src
, FPRegisterID dest
)
1174 addDouble(dest
, src
, dest
);
1177 void addDouble(FPRegisterID op1
, FPRegisterID op2
, FPRegisterID dest
)
1179 m_assembler
.fadd
<64>(dest
, op1
, op2
);
1182 void addDouble(Address src
, FPRegisterID dest
)
1184 loadDouble(src
, fpTempRegister
);
1185 addDouble(fpTempRegister
, dest
);
1188 void addDouble(AbsoluteAddress address
, FPRegisterID dest
)
1190 loadDouble(TrustedImmPtr(address
.m_ptr
), fpTempRegister
);
1191 addDouble(fpTempRegister
, dest
);
1194 void ceilDouble(FPRegisterID src
, FPRegisterID dest
)
1196 m_assembler
.frintp
<64>(dest
, src
);
1199 void floorDouble(FPRegisterID src
, FPRegisterID dest
)
1201 m_assembler
.frintm
<64>(dest
, src
);
1204 // Convert 'src' to an integer, and places the resulting 'dest'.
1205 // If the result is not representable as a 32 bit value, branch.
1206 // May also branch for some values that are representable in 32 bits
1207 // (specifically, in this case, 0).
1208 void branchConvertDoubleToInt32(FPRegisterID src
, RegisterID dest
, JumpList
& failureCases
, FPRegisterID
, bool negZeroCheck
= true)
1210 m_assembler
.fcvtns
<32, 64>(dest
, src
);
1212 // Convert the integer result back to float & compare to the original value - if not equal or unordered (NaN) then jump.
1213 m_assembler
.scvtf
<64, 32>(fpTempRegister
, dest
);
1214 failureCases
.append(branchDouble(DoubleNotEqualOrUnordered
, src
, fpTempRegister
));
1216 // Test for negative zero.
1218 Jump valueIsNonZero
= branchTest32(NonZero
, dest
);
1219 RegisterID scratch
= getCachedMemoryTempRegisterIDAndInvalidate();
1220 m_assembler
.fmov
<64>(scratch
, src
);
1221 failureCases
.append(makeTestBitAndBranch(scratch
, 63, IsNonZero
));
1222 valueIsNonZero
.link(this);
1226 Jump
branchDouble(DoubleCondition cond
, FPRegisterID left
, FPRegisterID right
)
1228 m_assembler
.fcmp
<64>(left
, right
);
1230 if (cond
== DoubleNotEqual
) {
1231 // ConditionNE jumps if NotEqual *or* unordered - force the unordered cases not to jump.
1232 Jump unordered
= makeBranch(ARM64Assembler::ConditionVS
);
1233 Jump result
= makeBranch(ARM64Assembler::ConditionNE
);
1234 unordered
.link(this);
1237 if (cond
== DoubleEqualOrUnordered
) {
1238 Jump unordered
= makeBranch(ARM64Assembler::ConditionVS
);
1239 Jump notEqual
= makeBranch(ARM64Assembler::ConditionNE
);
1240 unordered
.link(this);
1241 // We get here if either unordered or equal.
1242 Jump result
= jump();
1243 notEqual
.link(this);
1246 return makeBranch(cond
);
1249 Jump
branchDoubleNonZero(FPRegisterID reg
, FPRegisterID
)
1251 m_assembler
.fcmp_0
<64>(reg
);
1252 Jump unordered
= makeBranch(ARM64Assembler::ConditionVS
);
1253 Jump result
= makeBranch(ARM64Assembler::ConditionNE
);
1254 unordered
.link(this);
1258 Jump
branchDoubleZeroOrNaN(FPRegisterID reg
, FPRegisterID
)
1260 m_assembler
.fcmp_0
<64>(reg
);
1261 Jump unordered
= makeBranch(ARM64Assembler::ConditionVS
);
1262 Jump notEqual
= makeBranch(ARM64Assembler::ConditionNE
);
1263 unordered
.link(this);
1264 // We get here if either unordered or equal.
1265 Jump result
= jump();
1266 notEqual
.link(this);
1270 Jump
branchTruncateDoubleToInt32(FPRegisterID src
, RegisterID dest
, BranchTruncateType branchType
= BranchIfTruncateFailed
)
1272 // Truncate to a 64-bit integer in dataTempRegister, copy the low 32-bit to dest.
1273 m_assembler
.fcvtzs
<64, 64>(getCachedDataTempRegisterIDAndInvalidate(), src
);
1274 zeroExtend32ToPtr(dataTempRegister
, dest
);
1275 // Check thlow 32-bits sign extend to be equal to the full value.
1276 m_assembler
.cmp
<64>(dataTempRegister
, dataTempRegister
, ARM64Assembler::SXTW
, 0);
1277 return Jump(makeBranch(branchType
== BranchIfTruncateSuccessful
? Equal
: NotEqual
));
1280 void convertDoubleToFloat(FPRegisterID src
, FPRegisterID dest
)
1282 m_assembler
.fcvt
<32, 64>(dest
, src
);
1285 void convertFloatToDouble(FPRegisterID src
, FPRegisterID dest
)
1287 m_assembler
.fcvt
<64, 32>(dest
, src
);
1290 void convertInt32ToDouble(TrustedImm32 imm
, FPRegisterID dest
)
1292 move(imm
, getCachedDataTempRegisterIDAndInvalidate());
1293 convertInt32ToDouble(dataTempRegister
, dest
);
1296 void convertInt32ToDouble(RegisterID src
, FPRegisterID dest
)
1298 m_assembler
.scvtf
<64, 32>(dest
, src
);
1301 void convertInt32ToDouble(Address address
, FPRegisterID dest
)
1303 load32(address
, getCachedDataTempRegisterIDAndInvalidate());
1304 convertInt32ToDouble(dataTempRegister
, dest
);
1307 void convertInt32ToDouble(AbsoluteAddress address
, FPRegisterID dest
)
1309 load32(address
.m_ptr
, getCachedDataTempRegisterIDAndInvalidate());
1310 convertInt32ToDouble(dataTempRegister
, dest
);
1313 void convertInt64ToDouble(RegisterID src
, FPRegisterID dest
)
1315 m_assembler
.scvtf
<64, 64>(dest
, src
);
1318 void divDouble(FPRegisterID src
, FPRegisterID dest
)
1320 divDouble(dest
, src
, dest
);
1323 void divDouble(FPRegisterID op1
, FPRegisterID op2
, FPRegisterID dest
)
1325 m_assembler
.fdiv
<64>(dest
, op1
, op2
);
1328 void loadDouble(ImplicitAddress address
, FPRegisterID dest
)
1330 if (tryLoadWithOffset
<64>(dest
, address
.base
, address
.offset
))
1333 signExtend32ToPtr(TrustedImm32(address
.offset
), getCachedMemoryTempRegisterIDAndInvalidate());
1334 m_assembler
.ldr
<64>(dest
, address
.base
, memoryTempRegister
);
1337 void loadDouble(BaseIndex address
, FPRegisterID dest
)
1339 if (!address
.offset
&& (!address
.scale
|| address
.scale
== 3)) {
1340 m_assembler
.ldr
<64>(dest
, address
.base
, address
.index
, ARM64Assembler::UXTX
, address
.scale
);
1344 signExtend32ToPtr(TrustedImm32(address
.offset
), getCachedMemoryTempRegisterIDAndInvalidate());
1345 m_assembler
.add
<64>(memoryTempRegister
, memoryTempRegister
, address
.index
, ARM64Assembler::UXTX
, address
.scale
);
1346 m_assembler
.ldr
<64>(dest
, address
.base
, memoryTempRegister
);
1349 void loadDouble(TrustedImmPtr address
, FPRegisterID dest
)
1351 moveToCachedReg(address
, m_cachedMemoryTempRegister
);
1352 m_assembler
.ldr
<64>(dest
, memoryTempRegister
, ARM64Registers::zr
);
1355 void loadFloat(BaseIndex address
, FPRegisterID dest
)
1357 if (!address
.offset
&& (!address
.scale
|| address
.scale
== 2)) {
1358 m_assembler
.ldr
<32>(dest
, address
.base
, address
.index
, ARM64Assembler::UXTX
, address
.scale
);
1362 signExtend32ToPtr(TrustedImm32(address
.offset
), getCachedMemoryTempRegisterIDAndInvalidate());
1363 m_assembler
.add
<64>(memoryTempRegister
, memoryTempRegister
, address
.index
, ARM64Assembler::UXTX
, address
.scale
);
1364 m_assembler
.ldr
<32>(dest
, address
.base
, memoryTempRegister
);
1367 void moveDouble(FPRegisterID src
, FPRegisterID dest
)
1369 m_assembler
.fmov
<64>(dest
, src
);
1372 void moveDoubleTo64(FPRegisterID src
, RegisterID dest
)
1374 m_assembler
.fmov
<64>(dest
, src
);
1377 void move64ToDouble(RegisterID src
, FPRegisterID dest
)
1379 m_assembler
.fmov
<64>(dest
, src
);
1382 void mulDouble(FPRegisterID src
, FPRegisterID dest
)
1384 mulDouble(dest
, src
, dest
);
1387 void mulDouble(FPRegisterID op1
, FPRegisterID op2
, FPRegisterID dest
)
1389 m_assembler
.fmul
<64>(dest
, op1
, op2
);
1392 void mulDouble(Address src
, FPRegisterID dest
)
1394 loadDouble(src
, fpTempRegister
);
1395 mulDouble(fpTempRegister
, dest
);
1398 void negateDouble(FPRegisterID src
, FPRegisterID dest
)
1400 m_assembler
.fneg
<64>(dest
, src
);
1403 void sqrtDouble(FPRegisterID src
, FPRegisterID dest
)
1405 m_assembler
.fsqrt
<64>(dest
, src
);
1408 void storeDouble(FPRegisterID src
, ImplicitAddress address
)
1410 if (tryStoreWithOffset
<64>(src
, address
.base
, address
.offset
))
1413 signExtend32ToPtr(TrustedImm32(address
.offset
), getCachedMemoryTempRegisterIDAndInvalidate());
1414 m_assembler
.str
<64>(src
, address
.base
, memoryTempRegister
);
1417 void storeDouble(FPRegisterID src
, TrustedImmPtr address
)
1419 moveToCachedReg(address
, m_cachedMemoryTempRegister
);
1420 m_assembler
.str
<64>(src
, memoryTempRegister
, ARM64Registers::zr
);
1423 void storeDouble(FPRegisterID src
, BaseIndex address
)
1425 if (!address
.offset
&& (!address
.scale
|| address
.scale
== 3)) {
1426 m_assembler
.str
<64>(src
, address
.base
, address
.index
, ARM64Assembler::UXTX
, address
.scale
);
1430 signExtend32ToPtr(TrustedImm32(address
.offset
), getCachedMemoryTempRegisterIDAndInvalidate());
1431 m_assembler
.add
<64>(memoryTempRegister
, memoryTempRegister
, address
.index
, ARM64Assembler::UXTX
, address
.scale
);
1432 m_assembler
.str
<64>(src
, address
.base
, memoryTempRegister
);
1435 void storeFloat(FPRegisterID src
, BaseIndex address
)
1437 if (!address
.offset
&& (!address
.scale
|| address
.scale
== 2)) {
1438 m_assembler
.str
<32>(src
, address
.base
, address
.index
, ARM64Assembler::UXTX
, address
.scale
);
1442 signExtend32ToPtr(TrustedImm32(address
.offset
), getCachedMemoryTempRegisterIDAndInvalidate());
1443 m_assembler
.add
<64>(memoryTempRegister
, memoryTempRegister
, address
.index
, ARM64Assembler::UXTX
, address
.scale
);
1444 m_assembler
.str
<32>(src
, address
.base
, memoryTempRegister
);
1447 void subDouble(FPRegisterID src
, FPRegisterID dest
)
1449 subDouble(dest
, src
, dest
);
1452 void subDouble(FPRegisterID op1
, FPRegisterID op2
, FPRegisterID dest
)
1454 m_assembler
.fsub
<64>(dest
, op1
, op2
);
1457 void subDouble(Address src
, FPRegisterID dest
)
1459 loadDouble(src
, fpTempRegister
);
1460 subDouble(fpTempRegister
, dest
);
1463 // Result is undefined if the value is outside of the integer range.
1464 void truncateDoubleToInt32(FPRegisterID src
, RegisterID dest
)
1466 m_assembler
.fcvtzs
<32, 64>(dest
, src
);
1469 void truncateDoubleToUint32(FPRegisterID src
, RegisterID dest
)
1471 m_assembler
.fcvtzu
<32, 64>(dest
, src
);
1475 // Stack manipulation operations:
1477 // The ABI is assumed to provide a stack abstraction to memory,
1478 // containing machine word sized units of data. Push and pop
1479 // operations add and remove a single register sized unit of data
1480 // to or from the stack. These operations are not supported on
1481 // ARM64. Peek and poke operations read or write values on the
1482 // stack, without moving the current stack position. Additionally,
1483 // there are popToRestore and pushToSave operations, which are
1484 // designed just for quick-and-dirty saving and restoring of
1485 // temporary values. These operations don't claim to have any
1486 // ABI compatibility.
1488 void pop(RegisterID
) NO_RETURN_DUE_TO_CRASH
1493 void push(RegisterID
) NO_RETURN_DUE_TO_CRASH
1498 void push(Address
) NO_RETURN_DUE_TO_CRASH
1503 void push(TrustedImm32
) NO_RETURN_DUE_TO_CRASH
1508 void popPair(RegisterID dest1
, RegisterID dest2
)
1510 m_assembler
.ldp
<64>(dest1
, dest2
, ARM64Registers::sp
, PairPostIndex(16));
1513 void pushPair(RegisterID src1
, RegisterID src2
)
1515 m_assembler
.stp
<64>(src1
, src2
, ARM64Registers::sp
, PairPreIndex(-16));
1518 void popToRestore(RegisterID dest
)
1520 m_assembler
.ldr
<64>(dest
, ARM64Registers::sp
, PostIndex(16));
1523 void pushToSave(RegisterID src
)
1525 m_assembler
.str
<64>(src
, ARM64Registers::sp
, PreIndex(-16));
1528 void pushToSaveImmediateWithoutTouchingRegisters(TrustedImm32 imm
)
1530 RegisterID reg
= dataTempRegister
;
1533 store64(reg
, stackPointerRegister
);
1534 load64(Address(stackPointerRegister
, 8), reg
);
1537 void pushToSave(Address address
)
1539 load32(address
, getCachedDataTempRegisterIDAndInvalidate());
1540 pushToSave(dataTempRegister
);
1543 void pushToSave(TrustedImm32 imm
)
1545 move(imm
, getCachedDataTempRegisterIDAndInvalidate());
1546 pushToSave(dataTempRegister
);
1549 void popToRestore(FPRegisterID dest
)
1551 loadDouble(stackPointerRegister
, dest
);
1552 add64(TrustedImm32(16), stackPointerRegister
);
1555 void pushToSave(FPRegisterID src
)
1557 sub64(TrustedImm32(16), stackPointerRegister
);
1558 storeDouble(src
, stackPointerRegister
);
1561 static ptrdiff_t pushToSaveByteOffset() { return 16; }
1563 // Register move operations:
1565 void move(RegisterID src
, RegisterID dest
)
1568 m_assembler
.mov
<64>(dest
, src
);
1571 void move(TrustedImm32 imm
, RegisterID dest
)
1573 moveInternal
<TrustedImm32
, int32_t>(imm
, dest
);
1576 void move(TrustedImmPtr imm
, RegisterID dest
)
1578 moveInternal
<TrustedImmPtr
, intptr_t>(imm
, dest
);
1581 void move(TrustedImm64 imm
, RegisterID dest
)
1583 moveInternal
<TrustedImm64
, int64_t>(imm
, dest
);
1586 void swap(RegisterID reg1
, RegisterID reg2
)
1588 move(reg1
, getCachedDataTempRegisterIDAndInvalidate());
1590 move(dataTempRegister
, reg2
);
1593 void signExtend32ToPtr(RegisterID src
, RegisterID dest
)
1595 m_assembler
.sxtw(dest
, src
);
1598 void zeroExtend32ToPtr(RegisterID src
, RegisterID dest
)
1600 m_assembler
.uxtw(dest
, src
);
1604 // Forwards / external control flow operations:
1606 // This set of jump and conditional branch operations return a Jump
1607 // object which may linked at a later point, allow forwards jump,
1608 // or jumps that will require external linkage (after the code has been
1611 // For branches, signed <, >, <= and >= are denoted as l, g, le, and ge
1612 // respecitvely, for unsigned comparisons the names b, a, be, and ae are
1613 // used (representing the names 'below' and 'above').
1615 // Operands to the comparision are provided in the expected order, e.g.
1616 // jle32(reg1, TrustedImm32(5)) will branch if the value held in reg1, when
1617 // treated as a signed 32bit value, is less than or equal to 5.
1619 // jz and jnz test whether the first operand is equal to zero, and take
1620 // an optional second operand of a mask under which to perform the test.
1622 Jump
branch32(RelationalCondition cond
, RegisterID left
, RegisterID right
)
1624 m_assembler
.cmp
<32>(left
, right
);
1625 return Jump(makeBranch(cond
));
1628 Jump
branch32(RelationalCondition cond
, RegisterID left
, TrustedImm32 right
)
1630 if (isUInt12(right
.m_value
))
1631 m_assembler
.cmp
<32>(left
, UInt12(right
.m_value
));
1632 else if (isUInt12(-right
.m_value
))
1633 m_assembler
.cmn
<32>(left
, UInt12(-right
.m_value
));
1635 moveToCachedReg(right
, m_dataMemoryTempRegister
);
1636 m_assembler
.cmp
<32>(left
, dataTempRegister
);
1638 return Jump(makeBranch(cond
));
1641 Jump
branch32(RelationalCondition cond
, RegisterID left
, Address right
)
1643 load32(right
, getCachedMemoryTempRegisterIDAndInvalidate());
1644 return branch32(cond
, left
, memoryTempRegister
);
1647 Jump
branch32(RelationalCondition cond
, Address left
, RegisterID right
)
1649 load32(left
, getCachedMemoryTempRegisterIDAndInvalidate());
1650 return branch32(cond
, memoryTempRegister
, right
);
1653 Jump
branch32(RelationalCondition cond
, Address left
, TrustedImm32 right
)
1655 load32(left
, getCachedMemoryTempRegisterIDAndInvalidate());
1656 return branch32(cond
, memoryTempRegister
, right
);
1659 Jump
branch32(RelationalCondition cond
, BaseIndex left
, TrustedImm32 right
)
1661 load32(left
, getCachedMemoryTempRegisterIDAndInvalidate());
1662 return branch32(cond
, memoryTempRegister
, right
);
1665 Jump
branch32(RelationalCondition cond
, AbsoluteAddress left
, RegisterID right
)
1667 load32(left
.m_ptr
, getCachedDataTempRegisterIDAndInvalidate());
1668 return branch32(cond
, dataTempRegister
, right
);
1671 Jump
branch32(RelationalCondition cond
, AbsoluteAddress left
, TrustedImm32 right
)
1673 load32(left
.m_ptr
, getCachedMemoryTempRegisterIDAndInvalidate());
1674 return branch32(cond
, memoryTempRegister
, right
);
1677 Jump
branch64(RelationalCondition cond
, RegisterID left
, RegisterID right
)
1679 if (right
== ARM64Registers::sp
) {
1680 if (cond
== Equal
&& left
!= ARM64Registers::sp
) {
1681 // CMP can only use SP for the left argument, since we are testing for equality, the order
1682 // does not matter here.
1683 std::swap(left
, right
);
1685 move(right
, getCachedDataTempRegisterIDAndInvalidate());
1686 right
= dataTempRegister
;
1689 m_assembler
.cmp
<64>(left
, right
);
1690 return Jump(makeBranch(cond
));
1693 Jump
branch64(RelationalCondition cond
, RegisterID left
, TrustedImm64 right
)
1695 intptr_t immediate
= right
.m_value
;
1696 if (isUInt12(immediate
))
1697 m_assembler
.cmp
<64>(left
, UInt12(static_cast<int32_t>(immediate
)));
1698 else if (isUInt12(-immediate
))
1699 m_assembler
.cmn
<64>(left
, UInt12(static_cast<int32_t>(-immediate
)));
1701 moveToCachedReg(right
, m_dataMemoryTempRegister
);
1702 m_assembler
.cmp
<64>(left
, dataTempRegister
);
1704 return Jump(makeBranch(cond
));
1707 Jump
branch64(RelationalCondition cond
, RegisterID left
, Address right
)
1709 load64(right
, getCachedMemoryTempRegisterIDAndInvalidate());
1710 return branch64(cond
, left
, memoryTempRegister
);
1713 Jump
branch64(RelationalCondition cond
, AbsoluteAddress left
, RegisterID right
)
1715 load64(left
.m_ptr
, getCachedDataTempRegisterIDAndInvalidate());
1716 return branch64(cond
, dataTempRegister
, right
);
1719 Jump
branch64(RelationalCondition cond
, Address left
, RegisterID right
)
1721 load64(left
, getCachedMemoryTempRegisterIDAndInvalidate());
1722 return branch64(cond
, memoryTempRegister
, right
);
1725 Jump
branch64(RelationalCondition cond
, Address left
, TrustedImm64 right
)
1727 load64(left
, getCachedMemoryTempRegisterIDAndInvalidate());
1728 return branch64(cond
, memoryTempRegister
, right
);
1731 Jump
branchPtr(RelationalCondition cond
, BaseIndex left
, RegisterID right
)
1733 load64(left
, getCachedMemoryTempRegisterIDAndInvalidate());
1734 return branch64(cond
, memoryTempRegister
, right
);
1737 Jump
branch8(RelationalCondition cond
, Address left
, TrustedImm32 right
)
1739 ASSERT(!(0xffffff00 & right
.m_value
));
1740 load8(left
, getCachedMemoryTempRegisterIDAndInvalidate());
1741 return branch32(cond
, memoryTempRegister
, right
);
1744 Jump
branch8(RelationalCondition cond
, BaseIndex left
, TrustedImm32 right
)
1746 ASSERT(!(0xffffff00 & right
.m_value
));
1747 load8(left
, getCachedMemoryTempRegisterIDAndInvalidate());
1748 return branch32(cond
, memoryTempRegister
, right
);
1751 Jump
branch8(RelationalCondition cond
, AbsoluteAddress left
, TrustedImm32 right
)
1753 ASSERT(!(0xffffff00 & right
.m_value
));
1754 load8(left
.m_ptr
, getCachedMemoryTempRegisterIDAndInvalidate());
1755 return branch32(cond
, memoryTempRegister
, right
);
1758 Jump
branchTest32(ResultCondition cond
, RegisterID reg
, RegisterID mask
)
1760 m_assembler
.tst
<32>(reg
, mask
);
1761 return Jump(makeBranch(cond
));
1764 void test32(ResultCondition cond
, RegisterID reg
, TrustedImm32 mask
= TrustedImm32(-1))
1766 if (mask
.m_value
== -1)
1767 m_assembler
.tst
<32>(reg
, reg
);
1769 bool testedWithImmediate
= false;
1770 if ((cond
== Zero
) || (cond
== NonZero
)) {
1771 LogicalImmediate logicalImm
= LogicalImmediate::create32(mask
.m_value
);
1773 if (logicalImm
.isValid()) {
1774 m_assembler
.tst
<32>(reg
, logicalImm
);
1775 testedWithImmediate
= true;
1778 if (!testedWithImmediate
) {
1779 move(mask
, getCachedDataTempRegisterIDAndInvalidate());
1780 m_assembler
.tst
<32>(reg
, dataTempRegister
);
1785 Jump
branch(ResultCondition cond
)
1787 return Jump(makeBranch(cond
));
1790 Jump
branchTest32(ResultCondition cond
, RegisterID reg
, TrustedImm32 mask
= TrustedImm32(-1))
1792 if (mask
.m_value
== -1) {
1793 if ((cond
== Zero
) || (cond
== NonZero
))
1794 return Jump(makeCompareAndBranch
<32>(static_cast<ZeroCondition
>(cond
), reg
));
1795 m_assembler
.tst
<32>(reg
, reg
);
1796 } else if (hasOneBitSet(mask
.m_value
) && ((cond
== Zero
) || (cond
== NonZero
)))
1797 return Jump(makeTestBitAndBranch(reg
, getLSBSet(mask
.m_value
), static_cast<ZeroCondition
>(cond
)));
1799 if ((cond
== Zero
) || (cond
== NonZero
)) {
1800 LogicalImmediate logicalImm
= LogicalImmediate::create32(mask
.m_value
);
1802 if (logicalImm
.isValid()) {
1803 m_assembler
.tst
<32>(reg
, logicalImm
);
1804 return Jump(makeBranch(cond
));
1808 move(mask
, getCachedDataTempRegisterIDAndInvalidate());
1809 m_assembler
.tst
<32>(reg
, dataTempRegister
);
1811 return Jump(makeBranch(cond
));
1814 Jump
branchTest32(ResultCondition cond
, Address address
, TrustedImm32 mask
= TrustedImm32(-1))
1816 load32(address
, getCachedMemoryTempRegisterIDAndInvalidate());
1817 return branchTest32(cond
, memoryTempRegister
, mask
);
1820 Jump
branchTest32(ResultCondition cond
, BaseIndex address
, TrustedImm32 mask
= TrustedImm32(-1))
1822 load32(address
, getCachedMemoryTempRegisterIDAndInvalidate());
1823 return branchTest32(cond
, memoryTempRegister
, mask
);
1826 Jump
branchTest64(ResultCondition cond
, RegisterID reg
, RegisterID mask
)
1828 m_assembler
.tst
<64>(reg
, mask
);
1829 return Jump(makeBranch(cond
));
1832 Jump
branchTest64(ResultCondition cond
, RegisterID reg
, TrustedImm32 mask
= TrustedImm32(-1))
1834 if (mask
.m_value
== -1) {
1835 if ((cond
== Zero
) || (cond
== NonZero
))
1836 return Jump(makeCompareAndBranch
<64>(static_cast<ZeroCondition
>(cond
), reg
));
1837 m_assembler
.tst
<64>(reg
, reg
);
1838 } else if (hasOneBitSet(mask
.m_value
) && ((cond
== Zero
) || (cond
== NonZero
)))
1839 return Jump(makeTestBitAndBranch(reg
, getLSBSet(mask
.m_value
), static_cast<ZeroCondition
>(cond
)));
1841 if ((cond
== Zero
) || (cond
== NonZero
)) {
1842 LogicalImmediate logicalImm
= LogicalImmediate::create64(mask
.m_value
);
1844 if (logicalImm
.isValid()) {
1845 m_assembler
.tst
<64>(reg
, logicalImm
);
1846 return Jump(makeBranch(cond
));
1850 signExtend32ToPtr(mask
, getCachedDataTempRegisterIDAndInvalidate());
1851 m_assembler
.tst
<64>(reg
, dataTempRegister
);
1853 return Jump(makeBranch(cond
));
1856 Jump
branchTest64(ResultCondition cond
, Address address
, RegisterID mask
)
1858 load64(address
, getCachedDataTempRegisterIDAndInvalidate());
1859 return branchTest64(cond
, dataTempRegister
, mask
);
1862 Jump
branchTest64(ResultCondition cond
, Address address
, TrustedImm32 mask
= TrustedImm32(-1))
1864 load64(address
, getCachedDataTempRegisterIDAndInvalidate());
1865 return branchTest64(cond
, dataTempRegister
, mask
);
1868 Jump
branchTest64(ResultCondition cond
, BaseIndex address
, TrustedImm32 mask
= TrustedImm32(-1))
1870 load64(address
, getCachedDataTempRegisterIDAndInvalidate());
1871 return branchTest64(cond
, dataTempRegister
, mask
);
1874 Jump
branchTest64(ResultCondition cond
, AbsoluteAddress address
, TrustedImm32 mask
= TrustedImm32(-1))
1876 load64(address
.m_ptr
, getCachedDataTempRegisterIDAndInvalidate());
1877 return branchTest64(cond
, dataTempRegister
, mask
);
1880 Jump
branchTest8(ResultCondition cond
, Address address
, TrustedImm32 mask
= TrustedImm32(-1))
1882 load8(address
, getCachedDataTempRegisterIDAndInvalidate());
1883 return branchTest32(cond
, dataTempRegister
, mask
);
1886 Jump
branchTest8(ResultCondition cond
, AbsoluteAddress address
, TrustedImm32 mask
= TrustedImm32(-1))
1888 load8(address
.m_ptr
, getCachedDataTempRegisterIDAndInvalidate());
1889 return branchTest32(cond
, dataTempRegister
, mask
);
1892 Jump
branchTest8(ResultCondition cond
, ExtendedAddress address
, TrustedImm32 mask
= TrustedImm32(-1))
1894 move(TrustedImmPtr(reinterpret_cast<void*>(address
.offset
)), getCachedDataTempRegisterIDAndInvalidate());
1895 m_assembler
.ldrb(dataTempRegister
, address
.base
, dataTempRegister
);
1896 return branchTest32(cond
, dataTempRegister
, mask
);
1899 Jump
branchTest8(ResultCondition cond
, BaseIndex address
, TrustedImm32 mask
= TrustedImm32(-1))
1901 load8(address
, getCachedDataTempRegisterIDAndInvalidate());
1902 return branchTest32(cond
, dataTempRegister
, mask
);
1905 Jump
branch32WithUnalignedHalfWords(RelationalCondition cond
, BaseIndex left
, TrustedImm32 right
)
1907 return branch32(cond
, left
, right
);
1911 // Arithmetic control flow operations:
1913 // This set of conditional branch operations branch based
1914 // on the result of an arithmetic operation. The operation
1915 // is performed as normal, storing the result.
1917 // * jz operations branch if the result is zero.
1918 // * jo operations branch if the (signed) arithmetic
1919 // operation caused an overflow to occur.
1921 Jump
branchAdd32(ResultCondition cond
, RegisterID op1
, RegisterID op2
, RegisterID dest
)
1923 m_assembler
.add
<32, S
>(dest
, op1
, op2
);
1924 return Jump(makeBranch(cond
));
1927 Jump
branchAdd32(ResultCondition cond
, RegisterID op1
, TrustedImm32 imm
, RegisterID dest
)
1929 if (isUInt12(imm
.m_value
)) {
1930 m_assembler
.add
<32, S
>(dest
, op1
, UInt12(imm
.m_value
));
1931 return Jump(makeBranch(cond
));
1933 if (isUInt12(-imm
.m_value
)) {
1934 m_assembler
.sub
<32, S
>(dest
, op1
, UInt12(-imm
.m_value
));
1935 return Jump(makeBranch(cond
));
1938 signExtend32ToPtr(imm
, getCachedDataTempRegisterIDAndInvalidate());
1939 return branchAdd32(cond
, op1
, dataTempRegister
, dest
);
1942 Jump
branchAdd32(ResultCondition cond
, Address src
, RegisterID dest
)
1944 load32(src
, getCachedDataTempRegisterIDAndInvalidate());
1945 return branchAdd32(cond
, dest
, dataTempRegister
, dest
);
1948 Jump
branchAdd32(ResultCondition cond
, RegisterID src
, RegisterID dest
)
1950 return branchAdd32(cond
, dest
, src
, dest
);
1953 Jump
branchAdd32(ResultCondition cond
, TrustedImm32 imm
, RegisterID dest
)
1955 return branchAdd32(cond
, dest
, imm
, dest
);
1958 Jump
branchAdd32(ResultCondition cond
, TrustedImm32 imm
, AbsoluteAddress address
)
1960 load32(address
.m_ptr
, getCachedDataTempRegisterIDAndInvalidate());
1962 if (isUInt12(imm
.m_value
)) {
1963 m_assembler
.add
<32, S
>(dataTempRegister
, dataTempRegister
, UInt12(imm
.m_value
));
1964 store32(dataTempRegister
, address
.m_ptr
);
1965 } else if (isUInt12(-imm
.m_value
)) {
1966 m_assembler
.sub
<32, S
>(dataTempRegister
, dataTempRegister
, UInt12(-imm
.m_value
));
1967 store32(dataTempRegister
, address
.m_ptr
);
1969 move(imm
, getCachedMemoryTempRegisterIDAndInvalidate());
1970 m_assembler
.add
<32, S
>(dataTempRegister
, dataTempRegister
, memoryTempRegister
);
1971 store32(dataTempRegister
, address
.m_ptr
);
1974 return Jump(makeBranch(cond
));
1977 Jump
branchAdd64(ResultCondition cond
, RegisterID op1
, RegisterID op2
, RegisterID dest
)
1979 m_assembler
.add
<64, S
>(dest
, op1
, op2
);
1980 return Jump(makeBranch(cond
));
1983 Jump
branchAdd64(ResultCondition cond
, RegisterID op1
, TrustedImm32 imm
, RegisterID dest
)
1985 if (isUInt12(imm
.m_value
)) {
1986 m_assembler
.add
<64, S
>(dest
, op1
, UInt12(imm
.m_value
));
1987 return Jump(makeBranch(cond
));
1989 if (isUInt12(-imm
.m_value
)) {
1990 m_assembler
.sub
<64, S
>(dest
, op1
, UInt12(-imm
.m_value
));
1991 return Jump(makeBranch(cond
));
1994 move(imm
, getCachedDataTempRegisterIDAndInvalidate());
1995 return branchAdd64(cond
, op1
, dataTempRegister
, dest
);
1998 Jump
branchAdd64(ResultCondition cond
, RegisterID src
, RegisterID dest
)
2000 return branchAdd64(cond
, dest
, src
, dest
);
2003 Jump
branchAdd64(ResultCondition cond
, TrustedImm32 imm
, RegisterID dest
)
2005 return branchAdd64(cond
, dest
, imm
, dest
);
2008 Jump
branchMul32(ResultCondition cond
, RegisterID src1
, RegisterID src2
, RegisterID dest
)
2010 ASSERT(cond
!= Signed
);
2012 if (cond
!= Overflow
) {
2013 m_assembler
.mul
<32>(dest
, src1
, src2
);
2014 return branchTest32(cond
, dest
);
2017 // This is a signed multiple of two 32-bit values, producing a 64-bit result.
2018 m_assembler
.smull(dest
, src1
, src2
);
2019 // Copy bits 63..32 of the result to bits 31..0 of dataTempRegister.
2020 m_assembler
.asr
<64>(getCachedDataTempRegisterIDAndInvalidate(), dest
, 32);
2021 // Splat bit 31 of the result to bits 31..0 of memoryTempRegister.
2022 m_assembler
.asr
<32>(getCachedMemoryTempRegisterIDAndInvalidate(), dest
, 31);
2023 // After a mul32 the top 32 bits of the register should be clear.
2024 zeroExtend32ToPtr(dest
, dest
);
2025 // Check that bits 31..63 of the original result were all equal.
2026 return branch32(NotEqual
, memoryTempRegister
, dataTempRegister
);
2029 Jump
branchMul32(ResultCondition cond
, RegisterID src
, RegisterID dest
)
2031 return branchMul32(cond
, dest
, src
, dest
);
2034 Jump
branchMul32(ResultCondition cond
, TrustedImm32 imm
, RegisterID src
, RegisterID dest
)
2036 move(imm
, getCachedDataTempRegisterIDAndInvalidate());
2037 return branchMul32(cond
, dataTempRegister
, src
, dest
);
2040 Jump
branchMul64(ResultCondition cond
, RegisterID src1
, RegisterID src2
, RegisterID dest
)
2042 ASSERT(cond
!= Signed
);
2044 // This is a signed multiple of two 64-bit values, producing a 64-bit result.
2045 m_assembler
.mul
<64>(dest
, src1
, src2
);
2047 if (cond
!= Overflow
)
2048 return branchTest64(cond
, dest
);
2050 // Compute bits 127..64 of the result into dataTempRegister.
2051 m_assembler
.smulh(getCachedDataTempRegisterIDAndInvalidate(), src1
, src2
);
2052 // Splat bit 63 of the result to bits 63..0 of memoryTempRegister.
2053 m_assembler
.asr
<64>(getCachedMemoryTempRegisterIDAndInvalidate(), dest
, 63);
2054 // Check that bits 31..63 of the original result were all equal.
2055 return branch64(NotEqual
, memoryTempRegister
, dataTempRegister
);
2058 Jump
branchMul64(ResultCondition cond
, RegisterID src
, RegisterID dest
)
2060 return branchMul64(cond
, dest
, src
, dest
);
2063 Jump
branchNeg32(ResultCondition cond
, RegisterID dest
)
2065 m_assembler
.neg
<32, S
>(dest
, dest
);
2066 return Jump(makeBranch(cond
));
2069 Jump
branchNeg64(ResultCondition cond
, RegisterID srcDest
)
2071 m_assembler
.neg
<64, S
>(srcDest
, srcDest
);
2072 return Jump(makeBranch(cond
));
2075 Jump
branchSub32(ResultCondition cond
, RegisterID dest
)
2077 m_assembler
.neg
<32, S
>(dest
, dest
);
2078 return Jump(makeBranch(cond
));
2081 Jump
branchSub32(ResultCondition cond
, RegisterID op1
, RegisterID op2
, RegisterID dest
)
2083 m_assembler
.sub
<32, S
>(dest
, op1
, op2
);
2084 return Jump(makeBranch(cond
));
2087 Jump
branchSub32(ResultCondition cond
, RegisterID op1
, TrustedImm32 imm
, RegisterID dest
)
2089 if (isUInt12(imm
.m_value
)) {
2090 m_assembler
.sub
<32, S
>(dest
, op1
, UInt12(imm
.m_value
));
2091 return Jump(makeBranch(cond
));
2093 if (isUInt12(-imm
.m_value
)) {
2094 m_assembler
.add
<32, S
>(dest
, op1
, UInt12(-imm
.m_value
));
2095 return Jump(makeBranch(cond
));
2098 signExtend32ToPtr(imm
, getCachedDataTempRegisterIDAndInvalidate());
2099 return branchSub32(cond
, op1
, dataTempRegister
, dest
);
2102 Jump
branchSub32(ResultCondition cond
, RegisterID src
, RegisterID dest
)
2104 return branchSub32(cond
, dest
, src
, dest
);
2107 Jump
branchSub32(ResultCondition cond
, TrustedImm32 imm
, RegisterID dest
)
2109 return branchSub32(cond
, dest
, imm
, dest
);
2112 Jump
branchSub64(ResultCondition cond
, RegisterID op1
, RegisterID op2
, RegisterID dest
)
2114 m_assembler
.sub
<64, S
>(dest
, op1
, op2
);
2115 return Jump(makeBranch(cond
));
2118 Jump
branchSub64(ResultCondition cond
, RegisterID op1
, TrustedImm32 imm
, RegisterID dest
)
2120 if (isUInt12(imm
.m_value
)) {
2121 m_assembler
.sub
<64, S
>(dest
, op1
, UInt12(imm
.m_value
));
2122 return Jump(makeBranch(cond
));
2124 if (isUInt12(-imm
.m_value
)) {
2125 m_assembler
.add
<64, S
>(dest
, op1
, UInt12(-imm
.m_value
));
2126 return Jump(makeBranch(cond
));
2129 move(imm
, getCachedDataTempRegisterIDAndInvalidate());
2130 return branchSub64(cond
, op1
, dataTempRegister
, dest
);
2133 Jump
branchSub64(ResultCondition cond
, RegisterID src
, RegisterID dest
)
2135 return branchSub64(cond
, dest
, src
, dest
);
2138 Jump
branchSub64(ResultCondition cond
, TrustedImm32 imm
, RegisterID dest
)
2140 return branchSub64(cond
, dest
, imm
, dest
);
2144 // Jumps, calls, returns
2146 ALWAYS_INLINE Call
call()
2148 AssemblerLabel pointerLabel
= m_assembler
.label();
2149 moveWithFixedWidth(TrustedImmPtr(0), getCachedDataTempRegisterIDAndInvalidate());
2150 invalidateAllTempRegisters();
2151 m_assembler
.blr(dataTempRegister
);
2152 AssemblerLabel callLabel
= m_assembler
.label();
2153 ASSERT_UNUSED(pointerLabel
, ARM64Assembler::getDifferenceBetweenLabels(callLabel
, pointerLabel
) == REPATCH_OFFSET_CALL_TO_POINTER
);
2154 return Call(callLabel
, Call::Linkable
);
2157 ALWAYS_INLINE Call
call(RegisterID target
)
2159 invalidateAllTempRegisters();
2160 m_assembler
.blr(target
);
2161 return Call(m_assembler
.label(), Call::None
);
2164 ALWAYS_INLINE Call
call(Address address
)
2166 load64(address
, getCachedDataTempRegisterIDAndInvalidate());
2167 return call(dataTempRegister
);
2170 ALWAYS_INLINE Jump
jump()
2172 AssemblerLabel label
= m_assembler
.label();
2174 return Jump(label
, m_makeJumpPatchable
? ARM64Assembler::JumpNoConditionFixedSize
: ARM64Assembler::JumpNoCondition
);
2177 void jump(RegisterID target
)
2179 m_assembler
.br(target
);
2182 void jump(Address address
)
2184 load64(address
, getCachedDataTempRegisterIDAndInvalidate());
2185 m_assembler
.br(dataTempRegister
);
2188 void jump(AbsoluteAddress address
)
2190 move(TrustedImmPtr(address
.m_ptr
), getCachedDataTempRegisterIDAndInvalidate());
2191 load64(Address(dataTempRegister
), dataTempRegister
);
2192 m_assembler
.br(dataTempRegister
);
2195 ALWAYS_INLINE Call
makeTailRecursiveCall(Jump oldJump
)
2198 return tailRecursiveCall();
2201 ALWAYS_INLINE Call
nearCall()
2204 return Call(m_assembler
.label(), Call::LinkableNear
);
2207 ALWAYS_INLINE
void ret()
2212 ALWAYS_INLINE Call
tailRecursiveCall()
2214 // Like a normal call, but don't link.
2215 AssemblerLabel pointerLabel
= m_assembler
.label();
2216 moveWithFixedWidth(TrustedImmPtr(0), getCachedDataTempRegisterIDAndInvalidate());
2217 m_assembler
.br(dataTempRegister
);
2218 AssemblerLabel callLabel
= m_assembler
.label();
2219 ASSERT_UNUSED(pointerLabel
, ARM64Assembler::getDifferenceBetweenLabels(callLabel
, pointerLabel
) == REPATCH_OFFSET_CALL_TO_POINTER
);
2220 return Call(callLabel
, Call::Linkable
);
2224 // Comparisons operations
2226 void compare32(RelationalCondition cond
, RegisterID left
, RegisterID right
, RegisterID dest
)
2228 m_assembler
.cmp
<32>(left
, right
);
2229 m_assembler
.cset
<32>(dest
, ARM64Condition(cond
));
2232 void compare32(RelationalCondition cond
, Address left
, RegisterID right
, RegisterID dest
)
2234 load32(left
, getCachedDataTempRegisterIDAndInvalidate());
2235 m_assembler
.cmp
<32>(dataTempRegister
, right
);
2236 m_assembler
.cset
<32>(dest
, ARM64Condition(cond
));
2239 void compare32(RelationalCondition cond
, RegisterID left
, TrustedImm32 right
, RegisterID dest
)
2241 move(right
, getCachedDataTempRegisterIDAndInvalidate());
2242 m_assembler
.cmp
<32>(left
, dataTempRegister
);
2243 m_assembler
.cset
<32>(dest
, ARM64Condition(cond
));
2246 void compare64(RelationalCondition cond
, RegisterID left
, RegisterID right
, RegisterID dest
)
2248 m_assembler
.cmp
<64>(left
, right
);
2249 m_assembler
.cset
<32>(dest
, ARM64Condition(cond
));
2252 void compare64(RelationalCondition cond
, RegisterID left
, TrustedImm32 right
, RegisterID dest
)
2254 signExtend32ToPtr(right
, getCachedDataTempRegisterIDAndInvalidate());
2255 m_assembler
.cmp
<64>(left
, dataTempRegister
);
2256 m_assembler
.cset
<32>(dest
, ARM64Condition(cond
));
2259 void compare8(RelationalCondition cond
, Address left
, TrustedImm32 right
, RegisterID dest
)
2261 load8(left
, getCachedMemoryTempRegisterIDAndInvalidate());
2262 move(right
, getCachedDataTempRegisterIDAndInvalidate());
2263 compare32(cond
, memoryTempRegister
, dataTempRegister
, dest
);
2266 void test32(ResultCondition cond
, RegisterID src
, TrustedImm32 mask
, RegisterID dest
)
2268 if (mask
.m_value
== -1)
2269 m_assembler
.tst
<32>(src
, src
);
2271 signExtend32ToPtr(mask
, getCachedDataTempRegisterIDAndInvalidate());
2272 m_assembler
.tst
<32>(src
, dataTempRegister
);
2274 m_assembler
.cset
<32>(dest
, ARM64Condition(cond
));
2277 void test32(ResultCondition cond
, Address address
, TrustedImm32 mask
, RegisterID dest
)
2279 load32(address
, getCachedDataTempRegisterIDAndInvalidate());
2280 test32(cond
, dataTempRegister
, mask
, dest
);
2283 void test8(ResultCondition cond
, Address address
, TrustedImm32 mask
, RegisterID dest
)
2285 load8(address
, getCachedDataTempRegisterIDAndInvalidate());
2286 test32(cond
, dataTempRegister
, mask
, dest
);
2289 void test64(ResultCondition cond
, RegisterID op1
, RegisterID op2
, RegisterID dest
)
2291 m_assembler
.tst
<64>(op1
, op2
);
2292 m_assembler
.cset
<32>(dest
, ARM64Condition(cond
));
2295 void test64(ResultCondition cond
, RegisterID src
, TrustedImm32 mask
, RegisterID dest
)
2297 if (mask
.m_value
== -1)
2298 m_assembler
.tst
<64>(src
, src
);
2300 signExtend32ToPtr(mask
, getCachedDataTempRegisterIDAndInvalidate());
2301 m_assembler
.tst
<64>(src
, dataTempRegister
);
2303 m_assembler
.cset
<32>(dest
, ARM64Condition(cond
));
2307 // Patchable operations
2309 ALWAYS_INLINE DataLabel32
moveWithPatch(TrustedImm32 imm
, RegisterID dest
)
2311 DataLabel32
label(this);
2312 moveWithFixedWidth(imm
, dest
);
2316 ALWAYS_INLINE DataLabelPtr
moveWithPatch(TrustedImmPtr imm
, RegisterID dest
)
2318 DataLabelPtr
label(this);
2319 moveWithFixedWidth(imm
, dest
);
2323 ALWAYS_INLINE Jump
branchPtrWithPatch(RelationalCondition cond
, RegisterID left
, DataLabelPtr
& dataLabel
, TrustedImmPtr initialRightValue
= TrustedImmPtr(0))
2325 dataLabel
= DataLabelPtr(this);
2326 moveWithPatch(initialRightValue
, getCachedDataTempRegisterIDAndInvalidate());
2327 return branch64(cond
, left
, dataTempRegister
);
2330 ALWAYS_INLINE Jump
branchPtrWithPatch(RelationalCondition cond
, Address left
, DataLabelPtr
& dataLabel
, TrustedImmPtr initialRightValue
= TrustedImmPtr(0))
2332 dataLabel
= DataLabelPtr(this);
2333 moveWithPatch(initialRightValue
, getCachedDataTempRegisterIDAndInvalidate());
2334 return branch64(cond
, left
, dataTempRegister
);
2337 ALWAYS_INLINE Jump
branch32WithPatch(RelationalCondition cond
, Address left
, DataLabel32
& dataLabel
, TrustedImm32 initialRightValue
= TrustedImm32(0))
2339 dataLabel
= DataLabel32(this);
2340 moveWithPatch(initialRightValue
, getCachedDataTempRegisterIDAndInvalidate());
2341 return branch32(cond
, left
, dataTempRegister
);
2344 PatchableJump
patchableBranchPtr(RelationalCondition cond
, Address left
, TrustedImmPtr right
= TrustedImmPtr(0))
2346 m_makeJumpPatchable
= true;
2347 Jump result
= branch32(cond
, left
, TrustedImm32(right
));
2348 m_makeJumpPatchable
= false;
2349 return PatchableJump(result
);
2352 PatchableJump
patchableBranchTest32(ResultCondition cond
, RegisterID reg
, TrustedImm32 mask
= TrustedImm32(-1))
2354 m_makeJumpPatchable
= true;
2355 Jump result
= branchTest32(cond
, reg
, mask
);
2356 m_makeJumpPatchable
= false;
2357 return PatchableJump(result
);
2360 PatchableJump
patchableBranch32(RelationalCondition cond
, RegisterID reg
, TrustedImm32 imm
)
2362 m_makeJumpPatchable
= true;
2363 Jump result
= branch32(cond
, reg
, imm
);
2364 m_makeJumpPatchable
= false;
2365 return PatchableJump(result
);
2368 PatchableJump
patchableBranchPtrWithPatch(RelationalCondition cond
, Address left
, DataLabelPtr
& dataLabel
, TrustedImmPtr initialRightValue
= TrustedImmPtr(0))
2370 m_makeJumpPatchable
= true;
2371 Jump result
= branchPtrWithPatch(cond
, left
, dataLabel
, initialRightValue
);
2372 m_makeJumpPatchable
= false;
2373 return PatchableJump(result
);
2376 PatchableJump
patchableBranch32WithPatch(RelationalCondition cond
, Address left
, DataLabel32
& dataLabel
, TrustedImm32 initialRightValue
= TrustedImm32(0))
2378 m_makeJumpPatchable
= true;
2379 Jump result
= branch32WithPatch(cond
, left
, dataLabel
, initialRightValue
);
2380 m_makeJumpPatchable
= false;
2381 return PatchableJump(result
);
2384 PatchableJump
patchableJump()
2386 m_makeJumpPatchable
= true;
2387 Jump result
= jump();
2388 m_makeJumpPatchable
= false;
2389 return PatchableJump(result
);
2392 ALWAYS_INLINE DataLabelPtr
storePtrWithPatch(TrustedImmPtr initialValue
, ImplicitAddress address
)
2394 DataLabelPtr
label(this);
2395 moveWithFixedWidth(initialValue
, getCachedDataTempRegisterIDAndInvalidate());
2396 store64(dataTempRegister
, address
);
2400 ALWAYS_INLINE DataLabelPtr
storePtrWithPatch(ImplicitAddress address
)
2402 return storePtrWithPatch(TrustedImmPtr(0), address
);
2405 static void reemitInitialMoveWithPatch(void* address
, void* value
)
2407 ARM64Assembler::setPointer(static_cast<int*>(address
), value
, dataTempRegister
, true);
2410 // Miscellaneous operations:
2412 void breakpoint(uint16_t imm
= 0)
2414 m_assembler
.brk(imm
);
2424 m_assembler
.dmbSY();
2428 // Misc helper functions.
2430 // Invert a relational condition, e.g. == becomes !=, < becomes >=, etc.
2431 static RelationalCondition
invert(RelationalCondition cond
)
2433 return static_cast<RelationalCondition
>(ARM64Assembler::invert(static_cast<ARM64Assembler::Condition
>(cond
)));
2436 static FunctionPtr
readCallTarget(CodeLocationCall call
)
2438 return FunctionPtr(reinterpret_cast<void(*)()>(ARM64Assembler::readCallTarget(call
.dataLocation())));
2441 static void replaceWithJump(CodeLocationLabel instructionStart
, CodeLocationLabel destination
)
2443 ARM64Assembler::replaceWithJump(instructionStart
.dataLocation(), destination
.dataLocation());
2446 static ptrdiff_t maxJumpReplacementSize()
2448 return ARM64Assembler::maxJumpReplacementSize();
2451 RegisterID
scratchRegisterForBlinding()
2453 // We *do not* have a scratch register for blinding.
2454 RELEASE_ASSERT_NOT_REACHED();
2455 return getCachedDataTempRegisterIDAndInvalidate();
2458 static bool canJumpReplacePatchableBranchPtrWithPatch() { return false; }
2459 static bool canJumpReplacePatchableBranch32WithPatch() { return false; }
2461 static CodeLocationLabel
startOfBranchPtrWithPatchOnRegister(CodeLocationDataLabelPtr label
)
2463 return label
.labelAtOffset(0);
2466 static CodeLocationLabel
startOfPatchableBranchPtrWithPatchOnAddress(CodeLocationDataLabelPtr
)
2468 UNREACHABLE_FOR_PLATFORM();
2469 return CodeLocationLabel();
2472 static CodeLocationLabel
startOfPatchableBranch32WithPatchOnAddress(CodeLocationDataLabel32
)
2474 UNREACHABLE_FOR_PLATFORM();
2475 return CodeLocationLabel();
2478 static void revertJumpReplacementToBranchPtrWithPatch(CodeLocationLabel instructionStart
, RegisterID
, void* initialValue
)
2480 reemitInitialMoveWithPatch(instructionStart
.dataLocation(), initialValue
);
2483 static void revertJumpReplacementToPatchableBranchPtrWithPatch(CodeLocationLabel
, Address
, void*)
2485 UNREACHABLE_FOR_PLATFORM();
2488 static void revertJumpReplacementToPatchableBranch32WithPatch(CodeLocationLabel
, Address
, int32_t)
2490 UNREACHABLE_FOR_PLATFORM();
2494 ALWAYS_INLINE Jump
makeBranch(ARM64Assembler::Condition cond
)
2496 m_assembler
.b_cond(cond
);
2497 AssemblerLabel label
= m_assembler
.label();
2499 return Jump(label
, m_makeJumpPatchable
? ARM64Assembler::JumpConditionFixedSize
: ARM64Assembler::JumpCondition
, cond
);
2501 ALWAYS_INLINE Jump
makeBranch(RelationalCondition cond
) { return makeBranch(ARM64Condition(cond
)); }
2502 ALWAYS_INLINE Jump
makeBranch(ResultCondition cond
) { return makeBranch(ARM64Condition(cond
)); }
2503 ALWAYS_INLINE Jump
makeBranch(DoubleCondition cond
) { return makeBranch(ARM64Condition(cond
)); }
2505 template <int dataSize
>
2506 ALWAYS_INLINE Jump
makeCompareAndBranch(ZeroCondition cond
, RegisterID reg
)
2509 m_assembler
.cbz
<dataSize
>(reg
);
2511 m_assembler
.cbnz
<dataSize
>(reg
);
2512 AssemblerLabel label
= m_assembler
.label();
2514 return Jump(label
, m_makeJumpPatchable
? ARM64Assembler::JumpCompareAndBranchFixedSize
: ARM64Assembler::JumpCompareAndBranch
, static_cast<ARM64Assembler::Condition
>(cond
), dataSize
== 64, reg
);
2517 ALWAYS_INLINE Jump
makeTestBitAndBranch(RegisterID reg
, unsigned bit
, ZeroCondition cond
)
2522 m_assembler
.tbz(reg
, bit
);
2524 m_assembler
.tbnz(reg
, bit
);
2525 AssemblerLabel label
= m_assembler
.label();
2527 return Jump(label
, m_makeJumpPatchable
? ARM64Assembler::JumpTestBitFixedSize
: ARM64Assembler::JumpTestBit
, static_cast<ARM64Assembler::Condition
>(cond
), bit
, reg
);
2530 ARM64Assembler::Condition
ARM64Condition(RelationalCondition cond
)
2532 return static_cast<ARM64Assembler::Condition
>(cond
);
2535 ARM64Assembler::Condition
ARM64Condition(ResultCondition cond
)
2537 return static_cast<ARM64Assembler::Condition
>(cond
);
2540 ARM64Assembler::Condition
ARM64Condition(DoubleCondition cond
)
2542 return static_cast<ARM64Assembler::Condition
>(cond
);
2546 ALWAYS_INLINE RegisterID
getCachedDataTempRegisterIDAndInvalidate() { return m_dataMemoryTempRegister
.registerIDInvalidate(); }
2547 ALWAYS_INLINE RegisterID
getCachedMemoryTempRegisterIDAndInvalidate() { return m_cachedMemoryTempRegister
.registerIDInvalidate(); }
2549 ALWAYS_INLINE
bool isInIntRange(intptr_t value
)
2551 return value
== ((value
<< 32) >> 32);
2554 template<typename ImmediateType
, typename rawType
>
2555 void moveInternal(ImmediateType imm
, RegisterID dest
)
2557 const int dataSize
= sizeof(rawType
) * 8;
2558 const int numberHalfWords
= dataSize
/ 16;
2559 rawType value
= bitwise_cast
<rawType
>(imm
.m_value
);
2560 uint16_t halfword
[numberHalfWords
];
2562 // Handle 0 and ~0 here to simplify code below
2564 m_assembler
.movz
<dataSize
>(dest
, 0);
2568 m_assembler
.movn
<dataSize
>(dest
, 0);
2572 LogicalImmediate logicalImm
= dataSize
== 64 ? LogicalImmediate::create64(static_cast<uint64_t>(value
)) : LogicalImmediate::create32(static_cast<uint32_t>(value
));
2574 if (logicalImm
.isValid()) {
2575 m_assembler
.movi
<dataSize
>(dest
, logicalImm
);
2579 // Figure out how many halfwords are 0 or FFFF, then choose movz or movn accordingly.
2580 int zeroOrNegateVote
= 0;
2581 for (int i
= 0; i
< numberHalfWords
; ++i
) {
2582 halfword
[i
] = getHalfword(value
, i
);
2585 else if (halfword
[i
] == 0xffff)
2589 bool needToClearRegister
= true;
2590 if (zeroOrNegateVote
>= 0) {
2591 for (int i
= 0; i
< numberHalfWords
; i
++) {
2593 if (needToClearRegister
) {
2594 m_assembler
.movz
<dataSize
>(dest
, halfword
[i
], 16*i
);
2595 needToClearRegister
= false;
2597 m_assembler
.movk
<dataSize
>(dest
, halfword
[i
], 16*i
);
2601 for (int i
= 0; i
< numberHalfWords
; i
++) {
2602 if (halfword
[i
] != 0xffff) {
2603 if (needToClearRegister
) {
2604 m_assembler
.movn
<dataSize
>(dest
, ~halfword
[i
], 16*i
);
2605 needToClearRegister
= false;
2607 m_assembler
.movk
<dataSize
>(dest
, halfword
[i
], 16*i
);
2613 template<int datasize
>
2614 ALWAYS_INLINE
void loadUnsignedImmediate(RegisterID rt
, RegisterID rn
, unsigned pimm
)
2616 m_assembler
.ldr
<datasize
>(rt
, rn
, pimm
);
2619 template<int datasize
>
2620 ALWAYS_INLINE
void loadUnscaledImmediate(RegisterID rt
, RegisterID rn
, int simm
)
2622 m_assembler
.ldur
<datasize
>(rt
, rn
, simm
);
2625 template<int datasize
>
2626 ALWAYS_INLINE
void storeUnsignedImmediate(RegisterID rt
, RegisterID rn
, unsigned pimm
)
2628 m_assembler
.str
<datasize
>(rt
, rn
, pimm
);
2631 template<int datasize
>
2632 ALWAYS_INLINE
void storeUnscaledImmediate(RegisterID rt
, RegisterID rn
, int simm
)
2634 m_assembler
.stur
<datasize
>(rt
, rn
, simm
);
2637 void moveWithFixedWidth(TrustedImm32 imm
, RegisterID dest
)
2639 int32_t value
= imm
.m_value
;
2640 m_assembler
.movz
<32>(dest
, getHalfword(value
, 0));
2641 m_assembler
.movk
<32>(dest
, getHalfword(value
, 1), 16);
2644 void moveWithFixedWidth(TrustedImmPtr imm
, RegisterID dest
)
2646 intptr_t value
= reinterpret_cast<intptr_t>(imm
.m_value
);
2647 m_assembler
.movz
<64>(dest
, getHalfword(value
, 0));
2648 m_assembler
.movk
<64>(dest
, getHalfword(value
, 1), 16);
2649 m_assembler
.movk
<64>(dest
, getHalfword(value
, 2), 32);
2652 void signExtend32ToPtrWithFixedWidth(int32_t value
, RegisterID dest
)
2655 m_assembler
.movz
<32>(dest
, getHalfword(value
, 0));
2656 m_assembler
.movk
<32>(dest
, getHalfword(value
, 1), 16);
2658 m_assembler
.movn
<32>(dest
, ~getHalfword(value
, 0));
2659 m_assembler
.movk
<32>(dest
, getHalfword(value
, 1), 16);
2663 void signExtend32ToPtr(TrustedImm32 imm
, RegisterID dest
)
2665 move(TrustedImmPtr(reinterpret_cast<void*>(static_cast<intptr_t>(imm
.m_value
))), dest
);
2668 template<int datasize
>
2669 ALWAYS_INLINE
void load(const void* address
, RegisterID dest
)
2671 intptr_t currentRegisterContents
;
2672 if (m_cachedMemoryTempRegister
.value(currentRegisterContents
)) {
2673 intptr_t addressAsInt
= reinterpret_cast<intptr_t>(address
);
2674 intptr_t addressDelta
= addressAsInt
- currentRegisterContents
;
2676 if (dest
== memoryTempRegister
)
2677 m_cachedMemoryTempRegister
.invalidate();
2679 if (isInIntRange(addressDelta
)) {
2680 if (ARM64Assembler::canEncodeSImmOffset(addressDelta
)) {
2681 m_assembler
.ldur
<datasize
>(dest
, memoryTempRegister
, addressDelta
);
2685 if (ARM64Assembler::canEncodePImmOffset
<datasize
>(addressDelta
)) {
2686 m_assembler
.ldr
<datasize
>(dest
, memoryTempRegister
, addressDelta
);
2691 if ((addressAsInt
& (~maskHalfWord0
)) == (currentRegisterContents
& (~maskHalfWord0
))) {
2692 m_assembler
.movk
<64>(memoryTempRegister
, addressAsInt
& maskHalfWord0
, 0);
2693 m_cachedMemoryTempRegister
.setValue(reinterpret_cast<intptr_t>(address
));
2694 m_assembler
.ldr
<datasize
>(dest
, memoryTempRegister
, ARM64Registers::zr
);
2699 move(TrustedImmPtr(address
), memoryTempRegister
);
2700 if (dest
== memoryTempRegister
)
2701 m_cachedMemoryTempRegister
.invalidate();
2703 m_cachedMemoryTempRegister
.setValue(reinterpret_cast<intptr_t>(address
));
2704 m_assembler
.ldr
<datasize
>(dest
, memoryTempRegister
, ARM64Registers::zr
);
2707 template<int datasize
>
2708 ALWAYS_INLINE
void store(RegisterID src
, const void* address
)
2710 intptr_t currentRegisterContents
;
2711 if (m_cachedMemoryTempRegister
.value(currentRegisterContents
)) {
2712 intptr_t addressAsInt
= reinterpret_cast<intptr_t>(address
);
2713 intptr_t addressDelta
= addressAsInt
- currentRegisterContents
;
2715 if (isInIntRange(addressDelta
)) {
2716 if (ARM64Assembler::canEncodeSImmOffset(addressDelta
)) {
2717 m_assembler
.stur
<datasize
>(src
, memoryTempRegister
, addressDelta
);
2721 if (ARM64Assembler::canEncodePImmOffset
<datasize
>(addressDelta
)) {
2722 m_assembler
.str
<datasize
>(src
, memoryTempRegister
, addressDelta
);
2727 if ((addressAsInt
& (~maskHalfWord0
)) == (currentRegisterContents
& (~maskHalfWord0
))) {
2728 m_assembler
.movk
<64>(memoryTempRegister
, addressAsInt
& maskHalfWord0
, 0);
2729 m_cachedMemoryTempRegister
.setValue(reinterpret_cast<intptr_t>(address
));
2730 m_assembler
.str
<datasize
>(src
, memoryTempRegister
, ARM64Registers::zr
);
2735 move(TrustedImmPtr(address
), memoryTempRegister
);
2736 m_cachedMemoryTempRegister
.setValue(reinterpret_cast<intptr_t>(address
));
2737 m_assembler
.str
<datasize
>(src
, memoryTempRegister
, ARM64Registers::zr
);
2740 template <int dataSize
>
2741 ALWAYS_INLINE
bool tryMoveUsingCacheRegisterContents(intptr_t immediate
, CachedTempRegister
& dest
)
2743 intptr_t currentRegisterContents
;
2744 if (dest
.value(currentRegisterContents
)) {
2745 if (currentRegisterContents
== immediate
)
2748 LogicalImmediate logicalImm
= dataSize
== 64 ? LogicalImmediate::create64(static_cast<uint64_t>(immediate
)) : LogicalImmediate::create32(static_cast<uint32_t>(immediate
));
2750 if (logicalImm
.isValid()) {
2751 m_assembler
.movi
<dataSize
>(dest
.registerIDNoInvalidate(), logicalImm
);
2752 dest
.setValue(immediate
);
2756 if ((immediate
& maskUpperWord
) == (currentRegisterContents
& maskUpperWord
)) {
2757 if ((immediate
& maskHalfWord1
) != (currentRegisterContents
& maskHalfWord1
))
2758 m_assembler
.movk
<dataSize
>(dest
.registerIDNoInvalidate(), (immediate
& maskHalfWord1
) >> 16, 16);
2760 if ((immediate
& maskHalfWord0
) != (currentRegisterContents
& maskHalfWord0
))
2761 m_assembler
.movk
<dataSize
>(dest
.registerIDNoInvalidate(), immediate
& maskHalfWord0
, 0);
2763 dest
.setValue(immediate
);
2771 void moveToCachedReg(TrustedImm32 imm
, CachedTempRegister
& dest
)
2773 if (tryMoveUsingCacheRegisterContents
<32>(static_cast<intptr_t>(imm
.m_value
), dest
))
2776 moveInternal
<TrustedImm32
, int32_t>(imm
, dest
.registerIDNoInvalidate());
2777 dest
.setValue(imm
.m_value
);
2780 void moveToCachedReg(TrustedImmPtr imm
, CachedTempRegister
& dest
)
2782 if (tryMoveUsingCacheRegisterContents
<64>(imm
.asIntptr(), dest
))
2785 moveInternal
<TrustedImmPtr
, intptr_t>(imm
, dest
.registerIDNoInvalidate());
2786 dest
.setValue(imm
.asIntptr());
2789 void moveToCachedReg(TrustedImm64 imm
, CachedTempRegister
& dest
)
2791 if (tryMoveUsingCacheRegisterContents
<64>(static_cast<intptr_t>(imm
.m_value
), dest
))
2794 moveInternal
<TrustedImm64
, int64_t>(imm
, dest
.registerIDNoInvalidate());
2795 dest
.setValue(imm
.m_value
);
2798 template<int datasize
>
2799 ALWAYS_INLINE
bool tryLoadWithOffset(RegisterID rt
, RegisterID rn
, int32_t offset
)
2801 if (ARM64Assembler::canEncodeSImmOffset(offset
)) {
2802 loadUnscaledImmediate
<datasize
>(rt
, rn
, offset
);
2805 if (ARM64Assembler::canEncodePImmOffset
<datasize
>(offset
)) {
2806 loadUnsignedImmediate
<datasize
>(rt
, rn
, static_cast<unsigned>(offset
));
2812 template<int datasize
>
2813 ALWAYS_INLINE
bool tryLoadWithOffset(FPRegisterID rt
, RegisterID rn
, int32_t offset
)
2815 if (ARM64Assembler::canEncodeSImmOffset(offset
)) {
2816 m_assembler
.ldur
<datasize
>(rt
, rn
, offset
);
2819 if (ARM64Assembler::canEncodePImmOffset
<datasize
>(offset
)) {
2820 m_assembler
.ldr
<datasize
>(rt
, rn
, static_cast<unsigned>(offset
));
2826 template<int datasize
>
2827 ALWAYS_INLINE
bool tryStoreWithOffset(RegisterID rt
, RegisterID rn
, int32_t offset
)
2829 if (ARM64Assembler::canEncodeSImmOffset(offset
)) {
2830 storeUnscaledImmediate
<datasize
>(rt
, rn
, offset
);
2833 if (ARM64Assembler::canEncodePImmOffset
<datasize
>(offset
)) {
2834 storeUnsignedImmediate
<datasize
>(rt
, rn
, static_cast<unsigned>(offset
));
2840 template<int datasize
>
2841 ALWAYS_INLINE
bool tryStoreWithOffset(FPRegisterID rt
, RegisterID rn
, int32_t offset
)
2843 if (ARM64Assembler::canEncodeSImmOffset(offset
)) {
2844 m_assembler
.stur
<datasize
>(rt
, rn
, offset
);
2847 if (ARM64Assembler::canEncodePImmOffset
<datasize
>(offset
)) {
2848 m_assembler
.str
<datasize
>(rt
, rn
, static_cast<unsigned>(offset
));
2854 friend class LinkBuffer
;
2855 friend class RepatchBuffer
;
2857 static void linkCall(void* code
, Call call
, FunctionPtr function
)
2859 if (call
.isFlagSet(Call::Near
))
2860 ARM64Assembler::linkCall(code
, call
.m_label
, function
.value());
2862 ARM64Assembler::linkPointer(code
, call
.m_label
.labelAtOffset(REPATCH_OFFSET_CALL_TO_POINTER
), function
.value());
2865 static void repatchCall(CodeLocationCall call
, CodeLocationLabel destination
)
2867 ARM64Assembler::repatchPointer(call
.dataLabelPtrAtOffset(REPATCH_OFFSET_CALL_TO_POINTER
).dataLocation(), destination
.executableAddress());
2870 static void repatchCall(CodeLocationCall call
, FunctionPtr destination
)
2872 ARM64Assembler::repatchPointer(call
.dataLabelPtrAtOffset(REPATCH_OFFSET_CALL_TO_POINTER
).dataLocation(), destination
.executableAddress());
2875 CachedTempRegister m_dataMemoryTempRegister
;
2876 CachedTempRegister m_cachedMemoryTempRegister
;
2877 bool m_makeJumpPatchable
;
2880 // Extend the {load,store}{Unsigned,Unscaled}Immediate templated general register methods to cover all load/store sizes
2882 ALWAYS_INLINE
void MacroAssemblerARM64::loadUnsignedImmediate
<8>(RegisterID rt
, RegisterID rn
, unsigned pimm
)
2884 m_assembler
.ldrb(rt
, rn
, pimm
);
2888 ALWAYS_INLINE
void MacroAssemblerARM64::loadUnsignedImmediate
<16>(RegisterID rt
, RegisterID rn
, unsigned pimm
)
2890 m_assembler
.ldrh(rt
, rn
, pimm
);
2894 ALWAYS_INLINE
void MacroAssemblerARM64::loadUnscaledImmediate
<8>(RegisterID rt
, RegisterID rn
, int simm
)
2896 m_assembler
.ldurb(rt
, rn
, simm
);
2900 ALWAYS_INLINE
void MacroAssemblerARM64::loadUnscaledImmediate
<16>(RegisterID rt
, RegisterID rn
, int simm
)
2902 m_assembler
.ldurh(rt
, rn
, simm
);
2906 ALWAYS_INLINE
void MacroAssemblerARM64::storeUnsignedImmediate
<8>(RegisterID rt
, RegisterID rn
, unsigned pimm
)
2908 m_assembler
.strb(rt
, rn
, pimm
);
2912 ALWAYS_INLINE
void MacroAssemblerARM64::storeUnsignedImmediate
<16>(RegisterID rt
, RegisterID rn
, unsigned pimm
)
2914 m_assembler
.strh(rt
, rn
, pimm
);
2918 ALWAYS_INLINE
void MacroAssemblerARM64::storeUnscaledImmediate
<8>(RegisterID rt
, RegisterID rn
, int simm
)
2920 m_assembler
.sturb(rt
, rn
, simm
);
2924 ALWAYS_INLINE
void MacroAssemblerARM64::storeUnscaledImmediate
<16>(RegisterID rt
, RegisterID rn
, int simm
)
2926 m_assembler
.sturh(rt
, rn
, simm
);
2931 #endif // ENABLE(ASSEMBLER)
2933 #endif // MacroAssemblerARM64_h