2 * Copyright (C) 2012 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 #ifndef MacroAssemblerARM64_h
27 #define MacroAssemblerARM64_h
31 #include "ARM64Assembler.h"
32 #include "AbstractMacroAssembler.h"
33 #include <wtf/MathExtras.h>
37 class MacroAssemblerARM64
: public AbstractMacroAssembler
<ARM64Assembler
> {
38 static const RegisterID dataTempRegister
= ARM64Registers::ip0
;
39 static const RegisterID memoryTempRegister
= ARM64Registers::ip1
;
40 static const ARM64Registers::FPRegisterID fpTempRegister
= ARM64Registers::q31
;
41 static const ARM64Assembler::SetFlags S
= ARM64Assembler::S
;
42 static const intptr_t maskHalfWord0
= 0xffffl
;
43 static const intptr_t maskHalfWord1
= 0xffff0000l
;
44 static const intptr_t maskUpperWord
= 0xffffffff00000000l
;
46 // 4 instructions - 3 to load the function pointer, + blr.
47 static const ptrdiff_t REPATCH_OFFSET_CALL_TO_POINTER
= -16;
51 : m_dataMemoryTempRegister(this, dataTempRegister
)
52 , m_cachedMemoryTempRegister(this, memoryTempRegister
)
53 , m_makeJumpPatchable(false)
57 typedef ARM64Registers::FPRegisterID FPRegisterID
;
58 typedef ARM64Assembler::LinkRecord LinkRecord
;
59 typedef ARM64Assembler::JumpType JumpType
;
60 typedef ARM64Assembler::JumpLinkType JumpLinkType
;
61 typedef ARM64Assembler::Condition Condition
;
63 static const ARM64Assembler::Condition DefaultCondition
= ARM64Assembler::ConditionInvalid
;
64 static const ARM64Assembler::JumpType DefaultJump
= ARM64Assembler::JumpNoConditionFixedSize
;
66 Vector
<LinkRecord
, 0, UnsafeVectorOverflow
>& jumpsToLink() { return m_assembler
.jumpsToLink(); }
67 void* unlinkedCode() { return m_assembler
.unlinkedCode(); }
68 bool canCompact(JumpType jumpType
) { return m_assembler
.canCompact(jumpType
); }
69 JumpLinkType
computeJumpType(JumpType jumpType
, const uint8_t* from
, const uint8_t* to
) { return m_assembler
.computeJumpType(jumpType
, from
, to
); }
70 JumpLinkType
computeJumpType(LinkRecord
& record
, const uint8_t* from
, const uint8_t* to
) { return m_assembler
.computeJumpType(record
, from
, to
); }
71 void recordLinkOffsets(int32_t regionStart
, int32_t regionEnd
, int32_t offset
) {return m_assembler
.recordLinkOffsets(regionStart
, regionEnd
, offset
); }
72 int jumpSizeDelta(JumpType jumpType
, JumpLinkType jumpLinkType
) { return m_assembler
.jumpSizeDelta(jumpType
, jumpLinkType
); }
73 void link(LinkRecord
& record
, uint8_t* from
, uint8_t* to
) { return m_assembler
.link(record
, from
, to
); }
74 int executableOffsetFor(int location
) { return m_assembler
.executableOffsetFor(location
); }
76 static const Scale ScalePtr
= TimesEight
;
78 static bool isCompactPtrAlignedAddressOffset(ptrdiff_t value
)
80 // This is the largest 32-bit access allowed, aligned to 64-bit boundary.
81 return !(value
& ~0x3ff8);
84 enum RelationalCondition
{
85 Equal
= ARM64Assembler::ConditionEQ
,
86 NotEqual
= ARM64Assembler::ConditionNE
,
87 Above
= ARM64Assembler::ConditionHI
,
88 AboveOrEqual
= ARM64Assembler::ConditionHS
,
89 Below
= ARM64Assembler::ConditionLO
,
90 BelowOrEqual
= ARM64Assembler::ConditionLS
,
91 GreaterThan
= ARM64Assembler::ConditionGT
,
92 GreaterThanOrEqual
= ARM64Assembler::ConditionGE
,
93 LessThan
= ARM64Assembler::ConditionLT
,
94 LessThanOrEqual
= ARM64Assembler::ConditionLE
97 enum ResultCondition
{
98 Overflow
= ARM64Assembler::ConditionVS
,
99 Signed
= ARM64Assembler::ConditionMI
,
100 PositiveOrZero
= ARM64Assembler::ConditionPL
,
101 Zero
= ARM64Assembler::ConditionEQ
,
102 NonZero
= ARM64Assembler::ConditionNE
106 IsZero
= ARM64Assembler::ConditionEQ
,
107 IsNonZero
= ARM64Assembler::ConditionNE
110 enum DoubleCondition
{
111 // These conditions will only evaluate to true if the comparison is ordered - i.e. neither operand is NaN.
112 DoubleEqual
= ARM64Assembler::ConditionEQ
,
113 DoubleNotEqual
= ARM64Assembler::ConditionVC
, // Not the right flag! check for this & handle differently.
114 DoubleGreaterThan
= ARM64Assembler::ConditionGT
,
115 DoubleGreaterThanOrEqual
= ARM64Assembler::ConditionGE
,
116 DoubleLessThan
= ARM64Assembler::ConditionLO
,
117 DoubleLessThanOrEqual
= ARM64Assembler::ConditionLS
,
118 // If either operand is NaN, these conditions always evaluate to true.
119 DoubleEqualOrUnordered
= ARM64Assembler::ConditionVS
, // Not the right flag! check for this & handle differently.
120 DoubleNotEqualOrUnordered
= ARM64Assembler::ConditionNE
,
121 DoubleGreaterThanOrUnordered
= ARM64Assembler::ConditionHI
,
122 DoubleGreaterThanOrEqualOrUnordered
= ARM64Assembler::ConditionHS
,
123 DoubleLessThanOrUnordered
= ARM64Assembler::ConditionLT
,
124 DoubleLessThanOrEqualOrUnordered
= ARM64Assembler::ConditionLE
,
127 static const RegisterID stackPointerRegister
= ARM64Registers::sp
;
128 static const RegisterID linkRegister
= ARM64Registers::lr
;
131 // Integer operations:
133 void add32(RegisterID src
, RegisterID dest
)
135 m_assembler
.add
<32>(dest
, dest
, src
);
138 void add32(TrustedImm32 imm
, RegisterID dest
)
140 add32(imm
, dest
, dest
);
143 void add32(TrustedImm32 imm
, RegisterID src
, RegisterID dest
)
145 if (isUInt12(imm
.m_value
))
146 m_assembler
.add
<32>(dest
, src
, UInt12(imm
.m_value
));
147 else if (isUInt12(-imm
.m_value
))
148 m_assembler
.sub
<32>(dest
, src
, UInt12(-imm
.m_value
));
150 move(imm
, getCachedDataTempRegisterIDAndInvalidate());
151 m_assembler
.add
<32>(dest
, src
, dataTempRegister
);
155 void add32(TrustedImm32 imm
, Address address
)
157 load32(address
, getCachedDataTempRegisterIDAndInvalidate());
159 if (isUInt12(imm
.m_value
))
160 m_assembler
.add
<32>(dataTempRegister
, dataTempRegister
, UInt12(imm
.m_value
));
161 else if (isUInt12(-imm
.m_value
))
162 m_assembler
.sub
<32>(dataTempRegister
, dataTempRegister
, UInt12(-imm
.m_value
));
164 move(imm
, getCachedMemoryTempRegisterIDAndInvalidate());
165 m_assembler
.add
<32>(dataTempRegister
, dataTempRegister
, memoryTempRegister
);
168 store32(dataTempRegister
, address
);
171 void add32(TrustedImm32 imm
, AbsoluteAddress address
)
173 load32(address
.m_ptr
, getCachedDataTempRegisterIDAndInvalidate());
175 if (isUInt12(imm
.m_value
)) {
176 m_assembler
.add
<32>(dataTempRegister
, dataTempRegister
, UInt12(imm
.m_value
));
177 store32(dataTempRegister
, address
.m_ptr
);
181 if (isUInt12(-imm
.m_value
)) {
182 m_assembler
.sub
<32>(dataTempRegister
, dataTempRegister
, UInt12(-imm
.m_value
));
183 store32(dataTempRegister
, address
.m_ptr
);
187 move(imm
, getCachedMemoryTempRegisterIDAndInvalidate());
188 m_assembler
.add
<32>(dataTempRegister
, dataTempRegister
, memoryTempRegister
);
189 store32(dataTempRegister
, address
.m_ptr
);
192 void add32(Address src
, RegisterID dest
)
194 load32(src
, getCachedDataTempRegisterIDAndInvalidate());
195 add32(dataTempRegister
, dest
);
198 void add64(RegisterID src
, RegisterID dest
)
200 m_assembler
.add
<64>(dest
, dest
, src
);
203 void add64(TrustedImm32 imm
, RegisterID dest
)
205 if (isUInt12(imm
.m_value
)) {
206 m_assembler
.add
<64>(dest
, dest
, UInt12(imm
.m_value
));
209 if (isUInt12(-imm
.m_value
)) {
210 m_assembler
.sub
<64>(dest
, dest
, UInt12(-imm
.m_value
));
214 signExtend32ToPtr(imm
, getCachedDataTempRegisterIDAndInvalidate());
215 m_assembler
.add
<64>(dest
, dest
, dataTempRegister
);
218 void add64(TrustedImm64 imm
, RegisterID dest
)
220 intptr_t immediate
= imm
.m_value
;
222 if (isUInt12(immediate
)) {
223 m_assembler
.add
<64>(dest
, dest
, UInt12(static_cast<int32_t>(immediate
)));
226 if (isUInt12(-immediate
)) {
227 m_assembler
.sub
<64>(dest
, dest
, UInt12(static_cast<int32_t>(-immediate
)));
231 move(imm
, getCachedDataTempRegisterIDAndInvalidate());
232 m_assembler
.add
<64>(dest
, dest
, dataTempRegister
);
235 void add64(TrustedImm32 imm
, RegisterID src
, RegisterID dest
)
237 if (isUInt12(imm
.m_value
)) {
238 m_assembler
.add
<64>(dest
, src
, UInt12(imm
.m_value
));
241 if (isUInt12(-imm
.m_value
)) {
242 m_assembler
.sub
<64>(dest
, src
, UInt12(-imm
.m_value
));
246 signExtend32ToPtr(imm
, getCachedDataTempRegisterIDAndInvalidate());
247 m_assembler
.add
<64>(dest
, src
, dataTempRegister
);
250 void add64(TrustedImm32 imm
, Address address
)
252 load64(address
, getCachedDataTempRegisterIDAndInvalidate());
254 if (isUInt12(imm
.m_value
))
255 m_assembler
.add
<64>(dataTempRegister
, dataTempRegister
, UInt12(imm
.m_value
));
256 else if (isUInt12(-imm
.m_value
))
257 m_assembler
.sub
<64>(dataTempRegister
, dataTempRegister
, UInt12(-imm
.m_value
));
259 signExtend32ToPtr(imm
, getCachedMemoryTempRegisterIDAndInvalidate());
260 m_assembler
.add
<64>(dataTempRegister
, dataTempRegister
, memoryTempRegister
);
263 store64(dataTempRegister
, address
);
266 void add64(TrustedImm32 imm
, AbsoluteAddress address
)
268 load64(address
.m_ptr
, getCachedDataTempRegisterIDAndInvalidate());
270 if (isUInt12(imm
.m_value
)) {
271 m_assembler
.add
<64>(dataTempRegister
, dataTempRegister
, UInt12(imm
.m_value
));
272 store64(dataTempRegister
, address
.m_ptr
);
276 if (isUInt12(-imm
.m_value
)) {
277 m_assembler
.sub
<64>(dataTempRegister
, dataTempRegister
, UInt12(-imm
.m_value
));
278 store64(dataTempRegister
, address
.m_ptr
);
282 signExtend32ToPtr(imm
, getCachedMemoryTempRegisterIDAndInvalidate());
283 m_assembler
.add
<64>(dataTempRegister
, dataTempRegister
, memoryTempRegister
);
284 store64(dataTempRegister
, address
.m_ptr
);
287 void add64(Address src
, RegisterID dest
)
289 load64(src
, getCachedDataTempRegisterIDAndInvalidate());
290 m_assembler
.add
<64>(dest
, dest
, dataTempRegister
);
293 void add64(AbsoluteAddress src
, RegisterID dest
)
295 load64(src
.m_ptr
, getCachedDataTempRegisterIDAndInvalidate());
296 m_assembler
.add
<64>(dest
, dest
, dataTempRegister
);
299 void and32(RegisterID src
, RegisterID dest
)
301 and32(dest
, src
, dest
);
304 void and32(RegisterID op1
, RegisterID op2
, RegisterID dest
)
306 m_assembler
.and_
<32>(dest
, op1
, op2
);
309 void and32(TrustedImm32 imm
, RegisterID dest
)
311 and32(imm
, dest
, dest
);
314 void and32(TrustedImm32 imm
, RegisterID src
, RegisterID dest
)
316 LogicalImmediate logicalImm
= LogicalImmediate::create32(imm
.m_value
);
318 if (logicalImm
.isValid()) {
319 m_assembler
.and_
<32>(dest
, src
, logicalImm
);
323 move(imm
, getCachedDataTempRegisterIDAndInvalidate());
324 m_assembler
.and_
<32>(dest
, src
, dataTempRegister
);
327 void and32(Address src
, RegisterID dest
)
329 load32(src
, dataTempRegister
);
330 and32(dataTempRegister
, dest
);
333 void and64(RegisterID src
, RegisterID dest
)
335 m_assembler
.and_
<64>(dest
, dest
, src
);
338 void and64(TrustedImm32 imm
, RegisterID dest
)
340 LogicalImmediate logicalImm
= LogicalImmediate::create64(static_cast<intptr_t>(static_cast<int64_t>(imm
.m_value
)));
342 if (logicalImm
.isValid()) {
343 m_assembler
.and_
<64>(dest
, dest
, logicalImm
);
347 signExtend32ToPtr(imm
, getCachedDataTempRegisterIDAndInvalidate());
348 m_assembler
.and_
<64>(dest
, dest
, dataTempRegister
);
351 void countLeadingZeros32(RegisterID src
, RegisterID dest
)
353 m_assembler
.clz
<32>(dest
, src
);
356 void lshift32(RegisterID src
, RegisterID shiftAmount
, RegisterID dest
)
358 m_assembler
.lsl
<32>(dest
, src
, shiftAmount
);
361 void lshift32(RegisterID src
, TrustedImm32 imm
, RegisterID dest
)
363 m_assembler
.lsl
<32>(dest
, src
, imm
.m_value
& 0x1f);
366 void lshift32(RegisterID shiftAmount
, RegisterID dest
)
368 lshift32(dest
, shiftAmount
, dest
);
371 void lshift32(TrustedImm32 imm
, RegisterID dest
)
373 lshift32(dest
, imm
, dest
);
376 void mul32(RegisterID src
, RegisterID dest
)
378 m_assembler
.mul
<32>(dest
, dest
, src
);
381 void mul32(TrustedImm32 imm
, RegisterID src
, RegisterID dest
)
383 move(imm
, getCachedDataTempRegisterIDAndInvalidate());
384 m_assembler
.mul
<32>(dest
, src
, dataTempRegister
);
387 void neg32(RegisterID dest
)
389 m_assembler
.neg
<32>(dest
, dest
);
392 void neg64(RegisterID dest
)
394 m_assembler
.neg
<64>(dest
, dest
);
397 void or32(RegisterID src
, RegisterID dest
)
399 or32(dest
, src
, dest
);
402 void or32(RegisterID op1
, RegisterID op2
, RegisterID dest
)
404 m_assembler
.orr
<32>(dest
, op1
, op2
);
407 void or32(TrustedImm32 imm
, RegisterID dest
)
409 or32(imm
, dest
, dest
);
412 void or32(TrustedImm32 imm
, RegisterID src
, RegisterID dest
)
414 LogicalImmediate logicalImm
= LogicalImmediate::create32(imm
.m_value
);
416 if (logicalImm
.isValid()) {
417 m_assembler
.orr
<32>(dest
, src
, logicalImm
);
421 move(imm
, getCachedDataTempRegisterIDAndInvalidate());
422 m_assembler
.orr
<32>(dest
, src
, dataTempRegister
);
425 void or32(RegisterID src
, AbsoluteAddress address
)
427 load32(address
.m_ptr
, getCachedDataTempRegisterIDAndInvalidate());
428 m_assembler
.orr
<32>(dataTempRegister
, dataTempRegister
, src
);
429 store32(dataTempRegister
, address
.m_ptr
);
432 void or64(RegisterID src
, RegisterID dest
)
434 or64(dest
, src
, dest
);
437 void or64(RegisterID op1
, RegisterID op2
, RegisterID dest
)
439 m_assembler
.orr
<64>(dest
, op1
, op2
);
442 void or64(TrustedImm32 imm
, RegisterID dest
)
444 or64(imm
, dest
, dest
);
447 void or64(TrustedImm32 imm
, RegisterID src
, RegisterID dest
)
449 LogicalImmediate logicalImm
= LogicalImmediate::create64(static_cast<intptr_t>(static_cast<int64_t>(imm
.m_value
)));
451 if (logicalImm
.isValid()) {
452 m_assembler
.orr
<64>(dest
, dest
, logicalImm
);
456 signExtend32ToPtr(imm
, getCachedDataTempRegisterIDAndInvalidate());
457 m_assembler
.orr
<64>(dest
, src
, dataTempRegister
);
460 void or64(TrustedImm64 imm
, RegisterID dest
)
462 LogicalImmediate logicalImm
= LogicalImmediate::create64(static_cast<intptr_t>(static_cast<int64_t>(imm
.m_value
)));
464 if (logicalImm
.isValid()) {
465 m_assembler
.orr
<64>(dest
, dest
, logicalImm
);
469 move(imm
, getCachedDataTempRegisterIDAndInvalidate());
470 m_assembler
.orr
<64>(dest
, dest
, dataTempRegister
);
473 void rotateRight64(TrustedImm32 imm
, RegisterID srcDst
)
475 m_assembler
.ror
<64>(srcDst
, srcDst
, imm
.m_value
& 63);
478 void rshift32(RegisterID src
, RegisterID shiftAmount
, RegisterID dest
)
480 m_assembler
.asr
<32>(dest
, src
, shiftAmount
);
483 void rshift32(RegisterID src
, TrustedImm32 imm
, RegisterID dest
)
485 m_assembler
.asr
<32>(dest
, src
, imm
.m_value
& 0x1f);
488 void rshift32(RegisterID shiftAmount
, RegisterID dest
)
490 rshift32(dest
, shiftAmount
, dest
);
493 void rshift32(TrustedImm32 imm
, RegisterID dest
)
495 rshift32(dest
, imm
, dest
);
498 void sub32(RegisterID src
, RegisterID dest
)
500 m_assembler
.sub
<32>(dest
, dest
, src
);
503 void sub32(TrustedImm32 imm
, RegisterID dest
)
505 if (isUInt12(imm
.m_value
)) {
506 m_assembler
.sub
<32>(dest
, dest
, UInt12(imm
.m_value
));
509 if (isUInt12(-imm
.m_value
)) {
510 m_assembler
.add
<32>(dest
, dest
, UInt12(-imm
.m_value
));
514 move(imm
, getCachedDataTempRegisterIDAndInvalidate());
515 m_assembler
.sub
<32>(dest
, dest
, dataTempRegister
);
518 void sub32(TrustedImm32 imm
, Address address
)
520 load32(address
, getCachedDataTempRegisterIDAndInvalidate());
522 if (isUInt12(imm
.m_value
))
523 m_assembler
.sub
<32>(dataTempRegister
, dataTempRegister
, UInt12(imm
.m_value
));
524 else if (isUInt12(-imm
.m_value
))
525 m_assembler
.add
<32>(dataTempRegister
, dataTempRegister
, UInt12(-imm
.m_value
));
527 move(imm
, getCachedMemoryTempRegisterIDAndInvalidate());
528 m_assembler
.sub
<32>(dataTempRegister
, dataTempRegister
, memoryTempRegister
);
531 store32(dataTempRegister
, address
);
534 void sub32(TrustedImm32 imm
, AbsoluteAddress address
)
536 load32(address
.m_ptr
, getCachedDataTempRegisterIDAndInvalidate());
538 if (isUInt12(imm
.m_value
)) {
539 m_assembler
.sub
<32>(dataTempRegister
, dataTempRegister
, UInt12(imm
.m_value
));
540 store32(dataTempRegister
, address
.m_ptr
);
544 if (isUInt12(-imm
.m_value
)) {
545 m_assembler
.add
<32>(dataTempRegister
, dataTempRegister
, UInt12(-imm
.m_value
));
546 store32(dataTempRegister
, address
.m_ptr
);
550 move(imm
, getCachedMemoryTempRegisterIDAndInvalidate());
551 m_assembler
.sub
<32>(dataTempRegister
, dataTempRegister
, memoryTempRegister
);
552 store32(dataTempRegister
, address
.m_ptr
);
555 void sub32(Address src
, RegisterID dest
)
557 load32(src
, getCachedDataTempRegisterIDAndInvalidate());
558 sub32(dataTempRegister
, dest
);
561 void sub64(RegisterID src
, RegisterID dest
)
563 m_assembler
.sub
<64>(dest
, dest
, src
);
566 void sub64(TrustedImm32 imm
, RegisterID dest
)
568 if (isUInt12(imm
.m_value
)) {
569 m_assembler
.sub
<64>(dest
, dest
, UInt12(imm
.m_value
));
572 if (isUInt12(-imm
.m_value
)) {
573 m_assembler
.add
<64>(dest
, dest
, UInt12(-imm
.m_value
));
577 signExtend32ToPtr(imm
, getCachedDataTempRegisterIDAndInvalidate());
578 m_assembler
.sub
<64>(dest
, dest
, dataTempRegister
);
581 void sub64(TrustedImm64 imm
, RegisterID dest
)
583 intptr_t immediate
= imm
.m_value
;
585 if (isUInt12(immediate
)) {
586 m_assembler
.sub
<64>(dest
, dest
, UInt12(static_cast<int32_t>(immediate
)));
589 if (isUInt12(-immediate
)) {
590 m_assembler
.add
<64>(dest
, dest
, UInt12(static_cast<int32_t>(-immediate
)));
594 move(imm
, getCachedDataTempRegisterIDAndInvalidate());
595 m_assembler
.sub
<64>(dest
, dest
, dataTempRegister
);
598 void urshift32(RegisterID src
, RegisterID shiftAmount
, RegisterID dest
)
600 m_assembler
.lsr
<32>(dest
, src
, shiftAmount
);
603 void urshift32(RegisterID src
, TrustedImm32 imm
, RegisterID dest
)
605 m_assembler
.lsr
<32>(dest
, src
, imm
.m_value
& 0x1f);
608 void urshift32(RegisterID shiftAmount
, RegisterID dest
)
610 urshift32(dest
, shiftAmount
, dest
);
613 void urshift32(TrustedImm32 imm
, RegisterID dest
)
615 urshift32(dest
, imm
, dest
);
618 void xor32(RegisterID src
, RegisterID dest
)
620 xor32(dest
, src
, dest
);
623 void xor32(RegisterID op1
, RegisterID op2
, RegisterID dest
)
625 m_assembler
.eor
<32>(dest
, op1
, op2
);
628 void xor32(TrustedImm32 imm
, RegisterID dest
)
630 xor32(imm
, dest
, dest
);
633 void xor32(TrustedImm32 imm
, RegisterID src
, RegisterID dest
)
635 if (imm
.m_value
== -1)
636 m_assembler
.mvn
<32>(dest
, src
);
638 LogicalImmediate logicalImm
= LogicalImmediate::create32(imm
.m_value
);
640 if (logicalImm
.isValid()) {
641 m_assembler
.eor
<32>(dest
, dest
, logicalImm
);
645 move(imm
, getCachedDataTempRegisterIDAndInvalidate());
646 m_assembler
.eor
<32>(dest
, src
, dataTempRegister
);
650 void xor64(RegisterID src
, Address address
)
652 load64(address
, getCachedDataTempRegisterIDAndInvalidate());
653 m_assembler
.eor
<64>(dataTempRegister
, dataTempRegister
, src
);
654 store64(dataTempRegister
, address
);
657 void xor64(RegisterID src
, RegisterID dest
)
659 xor64(dest
, src
, dest
);
662 void xor64(RegisterID op1
, RegisterID op2
, RegisterID dest
)
664 m_assembler
.eor
<64>(dest
, op1
, op2
);
667 void xor64(TrustedImm32 imm
, RegisterID dest
)
669 xor64(imm
, dest
, dest
);
672 void xor64(TrustedImm32 imm
, RegisterID src
, RegisterID dest
)
674 if (imm
.m_value
== -1)
675 m_assembler
.mvn
<64>(dest
, src
);
677 LogicalImmediate logicalImm
= LogicalImmediate::create64(static_cast<intptr_t>(static_cast<int64_t>(imm
.m_value
)));
679 if (logicalImm
.isValid()) {
680 m_assembler
.eor
<64>(dest
, dest
, logicalImm
);
684 signExtend32ToPtr(imm
, getCachedDataTempRegisterIDAndInvalidate());
685 m_assembler
.eor
<64>(dest
, src
, dataTempRegister
);
690 // Memory access operations:
692 void load64(ImplicitAddress address
, RegisterID dest
)
694 if (tryLoadWithOffset
<64>(dest
, address
.base
, address
.offset
))
697 signExtend32ToPtr(TrustedImm32(address
.offset
), getCachedMemoryTempRegisterIDAndInvalidate());
698 m_assembler
.ldr
<64>(dest
, address
.base
, memoryTempRegister
);
701 void load64(BaseIndex address
, RegisterID dest
)
703 if (!address
.offset
&& (!address
.scale
|| address
.scale
== 3)) {
704 m_assembler
.ldr
<64>(dest
, address
.base
, address
.index
, ARM64Assembler::UXTX
, address
.scale
);
708 signExtend32ToPtr(TrustedImm32(address
.offset
), getCachedMemoryTempRegisterIDAndInvalidate());
709 m_assembler
.add
<64>(memoryTempRegister
, memoryTempRegister
, address
.index
, ARM64Assembler::UXTX
, address
.scale
);
710 m_assembler
.ldr
<64>(dest
, address
.base
, memoryTempRegister
);
713 void load64(const void* address
, RegisterID dest
)
715 load
<64>(address
, dest
);
718 DataLabel32
load64WithAddressOffsetPatch(Address address
, RegisterID dest
)
720 DataLabel32
label(this);
721 signExtend32ToPtrWithFixedWidth(address
.offset
, getCachedMemoryTempRegisterIDAndInvalidate());
722 m_assembler
.ldr
<64>(dest
, address
.base
, memoryTempRegister
, ARM64Assembler::SXTW
, 0);
726 DataLabelCompact
load64WithCompactAddressOffsetPatch(Address address
, RegisterID dest
)
728 ASSERT(isCompactPtrAlignedAddressOffset(address
.offset
));
729 DataLabelCompact
label(this);
730 m_assembler
.ldr
<64>(dest
, address
.base
, address
.offset
);
734 ConvertibleLoadLabel
convertibleLoadPtr(Address address
, RegisterID dest
)
736 ConvertibleLoadLabel
result(this);
737 ASSERT(!(address
.offset
& ~0xff8));
738 m_assembler
.ldr
<64>(dest
, address
.base
, address
.offset
);
742 void load32(ImplicitAddress address
, RegisterID dest
)
744 if (tryLoadWithOffset
<32>(dest
, address
.base
, address
.offset
))
747 signExtend32ToPtr(TrustedImm32(address
.offset
), getCachedMemoryTempRegisterIDAndInvalidate());
748 m_assembler
.ldr
<32>(dest
, address
.base
, memoryTempRegister
);
751 void load32(BaseIndex address
, RegisterID dest
)
753 if (!address
.offset
&& (!address
.scale
|| address
.scale
== 2)) {
754 m_assembler
.ldr
<32>(dest
, address
.base
, address
.index
, ARM64Assembler::UXTX
, address
.scale
);
758 signExtend32ToPtr(TrustedImm32(address
.offset
), getCachedMemoryTempRegisterIDAndInvalidate());
759 m_assembler
.add
<64>(memoryTempRegister
, memoryTempRegister
, address
.index
, ARM64Assembler::UXTX
, address
.scale
);
760 m_assembler
.ldr
<32>(dest
, address
.base
, memoryTempRegister
);
763 void load32(const void* address
, RegisterID dest
)
765 load
<32>(address
, dest
);
768 DataLabel32
load32WithAddressOffsetPatch(Address address
, RegisterID dest
)
770 DataLabel32
label(this);
771 signExtend32ToPtrWithFixedWidth(address
.offset
, getCachedMemoryTempRegisterIDAndInvalidate());
772 m_assembler
.ldr
<32>(dest
, address
.base
, memoryTempRegister
, ARM64Assembler::SXTW
, 0);
776 DataLabelCompact
load32WithCompactAddressOffsetPatch(Address address
, RegisterID dest
)
778 ASSERT(isCompactPtrAlignedAddressOffset(address
.offset
));
779 DataLabelCompact
label(this);
780 m_assembler
.ldr
<32>(dest
, address
.base
, address
.offset
);
784 void load32WithUnalignedHalfWords(BaseIndex address
, RegisterID dest
)
786 load32(address
, dest
);
789 void load16(ImplicitAddress address
, RegisterID dest
)
791 if (tryLoadWithOffset
<16>(dest
, address
.base
, address
.offset
))
794 signExtend32ToPtr(TrustedImm32(address
.offset
), getCachedMemoryTempRegisterIDAndInvalidate());
795 m_assembler
.ldrh(dest
, address
.base
, memoryTempRegister
);
798 void load16(BaseIndex address
, RegisterID dest
)
800 if (!address
.offset
&& (!address
.scale
|| address
.scale
== 1)) {
801 m_assembler
.ldrh(dest
, address
.base
, address
.index
, ARM64Assembler::UXTX
, address
.scale
);
805 signExtend32ToPtr(TrustedImm32(address
.offset
), getCachedMemoryTempRegisterIDAndInvalidate());
806 m_assembler
.add
<64>(memoryTempRegister
, memoryTempRegister
, address
.index
, ARM64Assembler::UXTX
, address
.scale
);
807 m_assembler
.ldrh(dest
, address
.base
, memoryTempRegister
);
810 void load16Unaligned(BaseIndex address
, RegisterID dest
)
812 load16(address
, dest
);
815 void load16Signed(BaseIndex address
, RegisterID dest
)
817 if (!address
.offset
&& (!address
.scale
|| address
.scale
== 1)) {
818 m_assembler
.ldrsh
<64>(dest
, address
.base
, address
.index
, ARM64Assembler::UXTX
, address
.scale
);
822 signExtend32ToPtr(TrustedImm32(address
.offset
), getCachedMemoryTempRegisterIDAndInvalidate());
823 m_assembler
.add
<64>(memoryTempRegister
, memoryTempRegister
, address
.index
, ARM64Assembler::UXTX
, address
.scale
);
824 m_assembler
.ldrsh
<64>(dest
, address
.base
, memoryTempRegister
);
827 void load8(ImplicitAddress address
, RegisterID dest
)
829 if (tryLoadWithOffset
<8>(dest
, address
.base
, address
.offset
))
832 signExtend32ToPtr(TrustedImm32(address
.offset
), getCachedMemoryTempRegisterIDAndInvalidate());
833 m_assembler
.ldrb(dest
, address
.base
, memoryTempRegister
);
836 void load8(BaseIndex address
, RegisterID dest
)
838 if (!address
.offset
&& !address
.scale
) {
839 m_assembler
.ldrb(dest
, address
.base
, address
.index
, ARM64Assembler::UXTX
, address
.scale
);
843 signExtend32ToPtr(TrustedImm32(address
.offset
), getCachedMemoryTempRegisterIDAndInvalidate());
844 m_assembler
.add
<64>(memoryTempRegister
, memoryTempRegister
, address
.index
, ARM64Assembler::UXTX
, address
.scale
);
845 m_assembler
.ldrb(dest
, address
.base
, memoryTempRegister
);
848 void load8(const void* address
, RegisterID dest
)
850 moveToCachedReg(TrustedImmPtr(address
), m_cachedMemoryTempRegister
);
851 m_assembler
.ldrb(dest
, memoryTempRegister
, ARM64Registers::zr
);
852 if (dest
== memoryTempRegister
)
853 m_cachedMemoryTempRegister
.invalidate();
856 void load8Signed(BaseIndex address
, RegisterID dest
)
858 if (!address
.offset
&& !address
.scale
) {
859 m_assembler
.ldrsb
<64>(dest
, address
.base
, address
.index
, ARM64Assembler::UXTX
, address
.scale
);
863 signExtend32ToPtr(TrustedImm32(address
.offset
), getCachedMemoryTempRegisterIDAndInvalidate());
864 m_assembler
.add
<64>(memoryTempRegister
, memoryTempRegister
, address
.index
, ARM64Assembler::UXTX
, address
.scale
);
865 m_assembler
.ldrsb
<64>(dest
, address
.base
, memoryTempRegister
);
868 void store64(RegisterID src
, ImplicitAddress address
)
870 if (tryStoreWithOffset
<64>(src
, address
.base
, address
.offset
))
873 signExtend32ToPtr(TrustedImm32(address
.offset
), getCachedMemoryTempRegisterIDAndInvalidate());
874 m_assembler
.str
<64>(src
, address
.base
, memoryTempRegister
);
877 void store64(RegisterID src
, BaseIndex address
)
879 if (!address
.offset
&& (!address
.scale
|| address
.scale
== 3)) {
880 m_assembler
.str
<64>(src
, address
.base
, address
.index
, ARM64Assembler::UXTX
, address
.scale
);
884 signExtend32ToPtr(TrustedImm32(address
.offset
), getCachedMemoryTempRegisterIDAndInvalidate());
885 m_assembler
.add
<64>(memoryTempRegister
, memoryTempRegister
, address
.index
, ARM64Assembler::UXTX
, address
.scale
);
886 m_assembler
.str
<64>(src
, address
.base
, memoryTempRegister
);
889 void store64(RegisterID src
, const void* address
)
891 store
<64>(src
, address
);
894 void store64(TrustedImm64 imm
, ImplicitAddress address
)
897 store64(ARM64Registers::zr
, address
);
901 moveToCachedReg(imm
, m_dataMemoryTempRegister
);
902 store64(dataTempRegister
, address
);
905 void store64(TrustedImm64 imm
, BaseIndex address
)
908 store64(ARM64Registers::zr
, address
);
912 moveToCachedReg(imm
, m_dataMemoryTempRegister
);
913 store64(dataTempRegister
, address
);
916 DataLabel32
store64WithAddressOffsetPatch(RegisterID src
, Address address
)
918 DataLabel32
label(this);
919 signExtend32ToPtrWithFixedWidth(address
.offset
, getCachedMemoryTempRegisterIDAndInvalidate());
920 m_assembler
.str
<64>(src
, address
.base
, memoryTempRegister
, ARM64Assembler::SXTW
, 0);
924 void store32(RegisterID src
, ImplicitAddress address
)
926 if (tryStoreWithOffset
<32>(src
, address
.base
, address
.offset
))
929 signExtend32ToPtr(TrustedImm32(address
.offset
), getCachedMemoryTempRegisterIDAndInvalidate());
930 m_assembler
.str
<32>(src
, address
.base
, memoryTempRegister
);
933 void store32(RegisterID src
, BaseIndex address
)
935 if (!address
.offset
&& (!address
.scale
|| address
.scale
== 2)) {
936 m_assembler
.str
<32>(src
, address
.base
, address
.index
, ARM64Assembler::UXTX
, address
.scale
);
940 signExtend32ToPtr(TrustedImm32(address
.offset
), getCachedMemoryTempRegisterIDAndInvalidate());
941 m_assembler
.add
<64>(memoryTempRegister
, memoryTempRegister
, address
.index
, ARM64Assembler::UXTX
, address
.scale
);
942 m_assembler
.str
<32>(src
, address
.base
, memoryTempRegister
);
945 void store32(RegisterID src
, const void* address
)
947 store
<32>(src
, address
);
950 void store32(TrustedImm32 imm
, ImplicitAddress address
)
953 store32(ARM64Registers::zr
, address
);
957 moveToCachedReg(imm
, m_dataMemoryTempRegister
);
958 store32(dataTempRegister
, address
);
961 void store32(TrustedImm32 imm
, BaseIndex address
)
964 store32(ARM64Registers::zr
, address
);
968 moveToCachedReg(imm
, m_dataMemoryTempRegister
);
969 store32(dataTempRegister
, address
);
972 void store32(TrustedImm32 imm
, const void* address
)
975 store32(ARM64Registers::zr
, address
);
979 moveToCachedReg(imm
, m_dataMemoryTempRegister
);
980 store32(dataTempRegister
, address
);
983 DataLabel32
store32WithAddressOffsetPatch(RegisterID src
, Address address
)
985 DataLabel32
label(this);
986 signExtend32ToPtrWithFixedWidth(address
.offset
, getCachedMemoryTempRegisterIDAndInvalidate());
987 m_assembler
.str
<32>(src
, address
.base
, memoryTempRegister
, ARM64Assembler::SXTW
, 0);
991 void store16(RegisterID src
, BaseIndex address
)
993 if (!address
.offset
&& (!address
.scale
|| address
.scale
== 1)) {
994 m_assembler
.strh(src
, address
.base
, address
.index
, ARM64Assembler::UXTX
, address
.scale
);
998 signExtend32ToPtr(TrustedImm32(address
.offset
), getCachedMemoryTempRegisterIDAndInvalidate());
999 m_assembler
.add
<64>(memoryTempRegister
, memoryTempRegister
, address
.index
, ARM64Assembler::UXTX
, address
.scale
);
1000 m_assembler
.strh(src
, address
.base
, memoryTempRegister
);
1003 void store8(RegisterID src
, BaseIndex address
)
1005 if (!address
.offset
&& !address
.scale
) {
1006 m_assembler
.strb(src
, address
.base
, address
.index
, ARM64Assembler::UXTX
, address
.scale
);
1010 signExtend32ToPtr(TrustedImm32(address
.offset
), getCachedMemoryTempRegisterIDAndInvalidate());
1011 m_assembler
.add
<64>(memoryTempRegister
, memoryTempRegister
, address
.index
, ARM64Assembler::UXTX
, address
.scale
);
1012 m_assembler
.strb(src
, address
.base
, memoryTempRegister
);
1015 void store8(RegisterID src
, void* address
)
1017 move(ImmPtr(address
), getCachedMemoryTempRegisterIDAndInvalidate());
1018 m_assembler
.strb(src
, memoryTempRegister
, 0);
1021 void store8(TrustedImm32 imm
, void* address
)
1024 store8(ARM64Registers::zr
, address
);
1028 move(imm
, getCachedDataTempRegisterIDAndInvalidate());
1029 store8(dataTempRegister
, address
);
1033 // Floating-point operations:
1035 static bool supportsFloatingPoint() { return true; }
1036 static bool supportsFloatingPointTruncate() { return true; }
1037 static bool supportsFloatingPointSqrt() { return true; }
1038 static bool supportsFloatingPointAbs() { return true; }
1040 enum BranchTruncateType
{ BranchIfTruncateFailed
, BranchIfTruncateSuccessful
};
1042 void absDouble(FPRegisterID src
, FPRegisterID dest
)
1044 m_assembler
.fabs
<64>(dest
, src
);
1047 void addDouble(FPRegisterID src
, FPRegisterID dest
)
1049 addDouble(dest
, src
, dest
);
1052 void addDouble(FPRegisterID op1
, FPRegisterID op2
, FPRegisterID dest
)
1054 m_assembler
.fadd
<64>(dest
, op1
, op2
);
1057 void addDouble(Address src
, FPRegisterID dest
)
1059 loadDouble(src
, fpTempRegister
);
1060 addDouble(fpTempRegister
, dest
);
1063 void addDouble(AbsoluteAddress address
, FPRegisterID dest
)
1065 loadDouble(address
.m_ptr
, fpTempRegister
);
1066 addDouble(fpTempRegister
, dest
);
1069 void ceilDouble(FPRegisterID src
, FPRegisterID dest
)
1071 m_assembler
.frintp
<64>(dest
, src
);
1074 void floorDouble(FPRegisterID src
, FPRegisterID dest
)
1076 m_assembler
.frintm
<64>(dest
, src
);
1079 // Convert 'src' to an integer, and places the resulting 'dest'.
1080 // If the result is not representable as a 32 bit value, branch.
1081 // May also branch for some values that are representable in 32 bits
1082 // (specifically, in this case, 0).
1083 void branchConvertDoubleToInt32(FPRegisterID src
, RegisterID dest
, JumpList
& failureCases
, FPRegisterID
, bool negZeroCheck
= true)
1085 m_assembler
.fcvtns
<32, 64>(dest
, src
);
1087 // Convert the integer result back to float & compare to the original value - if not equal or unordered (NaN) then jump.
1088 m_assembler
.scvtf
<64, 32>(fpTempRegister
, dest
);
1089 failureCases
.append(branchDouble(DoubleNotEqualOrUnordered
, src
, fpTempRegister
));
1091 // If the result is zero, it might have been -0.0, and the double comparison won't catch this!
1093 failureCases
.append(branchTest32(Zero
, dest
));
1096 Jump
branchDouble(DoubleCondition cond
, FPRegisterID left
, FPRegisterID right
)
1098 m_assembler
.fcmp
<64>(left
, right
);
1100 if (cond
== DoubleNotEqual
) {
1101 // ConditionNE jumps if NotEqual *or* unordered - force the unordered cases not to jump.
1102 Jump unordered
= makeBranch(ARM64Assembler::ConditionVS
);
1103 Jump result
= makeBranch(ARM64Assembler::ConditionNE
);
1104 unordered
.link(this);
1107 if (cond
== DoubleEqualOrUnordered
) {
1108 Jump unordered
= makeBranch(ARM64Assembler::ConditionVS
);
1109 Jump notEqual
= makeBranch(ARM64Assembler::ConditionNE
);
1110 unordered
.link(this);
1111 // We get here if either unordered or equal.
1112 Jump result
= jump();
1113 notEqual
.link(this);
1116 return makeBranch(cond
);
1119 Jump
branchDoubleNonZero(FPRegisterID reg
, FPRegisterID
)
1121 m_assembler
.fcmp_0
<64>(reg
);
1122 Jump unordered
= makeBranch(ARM64Assembler::ConditionVS
);
1123 Jump result
= makeBranch(ARM64Assembler::ConditionNE
);
1124 unordered
.link(this);
1128 Jump
branchDoubleZeroOrNaN(FPRegisterID reg
, FPRegisterID
)
1130 m_assembler
.fcmp_0
<64>(reg
);
1131 Jump unordered
= makeBranch(ARM64Assembler::ConditionVS
);
1132 Jump notEqual
= makeBranch(ARM64Assembler::ConditionNE
);
1133 unordered
.link(this);
1134 // We get here if either unordered or equal.
1135 Jump result
= jump();
1136 notEqual
.link(this);
1140 Jump
branchTruncateDoubleToInt32(FPRegisterID src
, RegisterID dest
, BranchTruncateType branchType
= BranchIfTruncateFailed
)
1142 // Truncate to a 64-bit integer in dataTempRegister, copy the low 32-bit to dest.
1143 m_assembler
.fcvtzs
<64, 64>(getCachedDataTempRegisterIDAndInvalidate(), src
);
1144 zeroExtend32ToPtr(dataTempRegister
, dest
);
1145 // Check thlow 32-bits sign extend to be equal to the full value.
1146 m_assembler
.cmp
<64>(dataTempRegister
, dataTempRegister
, ARM64Assembler::SXTW
, 0);
1147 return Jump(makeBranch(branchType
== BranchIfTruncateSuccessful
? Equal
: NotEqual
));
1150 Jump
branchTruncateDoubleToUint32(FPRegisterID src
, RegisterID dest
, BranchTruncateType branchType
= BranchIfTruncateFailed
)
1152 // Truncate to a 64-bit integer in dataTempRegister, copy the low 32-bit to dest.
1153 m_assembler
.fcvtzs
<64, 64>(dest
, src
);
1154 // Check thlow 32-bits zero extend to be equal to the full value.
1155 m_assembler
.cmp
<64>(dest
, dest
, ARM64Assembler::UXTW
, 0);
1156 return Jump(makeBranch(branchType
== BranchIfTruncateSuccessful
? Equal
: NotEqual
));
1159 void convertDoubleToFloat(FPRegisterID src
, FPRegisterID dest
)
1161 m_assembler
.fcvt
<32, 64>(dest
, src
);
1164 void convertFloatToDouble(FPRegisterID src
, FPRegisterID dest
)
1166 m_assembler
.fcvt
<64, 32>(dest
, src
);
1169 void convertInt32ToDouble(TrustedImm32 imm
, FPRegisterID dest
)
1171 move(imm
, getCachedDataTempRegisterIDAndInvalidate());
1172 convertInt32ToDouble(dataTempRegister
, dest
);
1175 void convertInt32ToDouble(RegisterID src
, FPRegisterID dest
)
1177 m_assembler
.scvtf
<64, 32>(dest
, src
);
1180 void convertInt32ToDouble(Address address
, FPRegisterID dest
)
1182 load32(address
, getCachedDataTempRegisterIDAndInvalidate());
1183 convertInt32ToDouble(dataTempRegister
, dest
);
1186 void convertInt32ToDouble(AbsoluteAddress address
, FPRegisterID dest
)
1188 load32(address
.m_ptr
, getCachedDataTempRegisterIDAndInvalidate());
1189 convertInt32ToDouble(dataTempRegister
, dest
);
1192 void divDouble(FPRegisterID src
, FPRegisterID dest
)
1194 divDouble(dest
, src
, dest
);
1197 void divDouble(FPRegisterID op1
, FPRegisterID op2
, FPRegisterID dest
)
1199 m_assembler
.fdiv
<64>(dest
, op1
, op2
);
1202 void loadDouble(ImplicitAddress address
, FPRegisterID dest
)
1204 if (tryLoadWithOffset
<64>(dest
, address
.base
, address
.offset
))
1207 signExtend32ToPtr(TrustedImm32(address
.offset
), getCachedMemoryTempRegisterIDAndInvalidate());
1208 m_assembler
.ldr
<64>(dest
, address
.base
, memoryTempRegister
);
1211 void loadDouble(BaseIndex address
, FPRegisterID dest
)
1213 if (!address
.offset
&& (!address
.scale
|| address
.scale
== 3)) {
1214 m_assembler
.ldr
<64>(dest
, address
.base
, address
.index
, ARM64Assembler::UXTX
, address
.scale
);
1218 signExtend32ToPtr(TrustedImm32(address
.offset
), getCachedMemoryTempRegisterIDAndInvalidate());
1219 m_assembler
.add
<64>(memoryTempRegister
, memoryTempRegister
, address
.index
, ARM64Assembler::UXTX
, address
.scale
);
1220 m_assembler
.ldr
<64>(dest
, address
.base
, memoryTempRegister
);
1223 void loadDouble(const void* address
, FPRegisterID dest
)
1225 moveToCachedReg(TrustedImmPtr(address
), m_cachedMemoryTempRegister
);
1226 m_assembler
.ldr
<64>(dest
, memoryTempRegister
, ARM64Registers::zr
);
1229 void loadFloat(BaseIndex address
, FPRegisterID dest
)
1231 if (!address
.offset
&& (!address
.scale
|| address
.scale
== 2)) {
1232 m_assembler
.ldr
<32>(dest
, address
.base
, address
.index
, ARM64Assembler::UXTX
, address
.scale
);
1236 signExtend32ToPtr(TrustedImm32(address
.offset
), getCachedMemoryTempRegisterIDAndInvalidate());
1237 m_assembler
.add
<64>(memoryTempRegister
, memoryTempRegister
, address
.index
, ARM64Assembler::UXTX
, address
.scale
);
1238 m_assembler
.ldr
<32>(dest
, address
.base
, memoryTempRegister
);
1241 void moveDouble(FPRegisterID src
, FPRegisterID dest
)
1243 m_assembler
.fmov
<64>(dest
, src
);
1246 void moveDoubleTo64(FPRegisterID src
, RegisterID dest
)
1248 m_assembler
.fmov
<64>(dest
, src
);
1251 void move64ToDouble(RegisterID src
, FPRegisterID dest
)
1253 m_assembler
.fmov
<64>(dest
, src
);
1256 void mulDouble(FPRegisterID src
, FPRegisterID dest
)
1258 mulDouble(dest
, src
, dest
);
1261 void mulDouble(FPRegisterID op1
, FPRegisterID op2
, FPRegisterID dest
)
1263 m_assembler
.fmul
<64>(dest
, op1
, op2
);
1266 void mulDouble(Address src
, FPRegisterID dest
)
1268 loadDouble(src
, fpTempRegister
);
1269 mulDouble(fpTempRegister
, dest
);
1272 void negateDouble(FPRegisterID src
, FPRegisterID dest
)
1274 m_assembler
.fneg
<64>(dest
, src
);
1277 void sqrtDouble(FPRegisterID src
, FPRegisterID dest
)
1279 m_assembler
.fsqrt
<64>(dest
, src
);
1282 void storeDouble(FPRegisterID src
, ImplicitAddress address
)
1284 if (tryStoreWithOffset
<64>(src
, address
.base
, address
.offset
))
1287 signExtend32ToPtr(TrustedImm32(address
.offset
), getCachedMemoryTempRegisterIDAndInvalidate());
1288 m_assembler
.str
<64>(src
, address
.base
, memoryTempRegister
);
1291 void storeDouble(FPRegisterID src
, const void* address
)
1293 moveToCachedReg(TrustedImmPtr(address
), m_cachedMemoryTempRegister
);
1294 m_assembler
.str
<64>(src
, memoryTempRegister
, ARM64Registers::zr
);
1297 void storeDouble(FPRegisterID src
, BaseIndex address
)
1299 if (!address
.offset
&& (!address
.scale
|| address
.scale
== 3)) {
1300 m_assembler
.str
<64>(src
, address
.base
, address
.index
, ARM64Assembler::UXTX
, address
.scale
);
1304 signExtend32ToPtr(TrustedImm32(address
.offset
), getCachedMemoryTempRegisterIDAndInvalidate());
1305 m_assembler
.add
<64>(memoryTempRegister
, memoryTempRegister
, address
.index
, ARM64Assembler::UXTX
, address
.scale
);
1306 m_assembler
.str
<64>(src
, address
.base
, memoryTempRegister
);
1309 void storeFloat(FPRegisterID src
, BaseIndex address
)
1311 if (!address
.offset
&& (!address
.scale
|| address
.scale
== 2)) {
1312 m_assembler
.str
<32>(src
, address
.base
, address
.index
, ARM64Assembler::UXTX
, address
.scale
);
1316 signExtend32ToPtr(TrustedImm32(address
.offset
), getCachedMemoryTempRegisterIDAndInvalidate());
1317 m_assembler
.add
<64>(memoryTempRegister
, memoryTempRegister
, address
.index
, ARM64Assembler::UXTX
, address
.scale
);
1318 m_assembler
.str
<32>(src
, address
.base
, memoryTempRegister
);
1321 void subDouble(FPRegisterID src
, FPRegisterID dest
)
1323 subDouble(dest
, src
, dest
);
1326 void subDouble(FPRegisterID op1
, FPRegisterID op2
, FPRegisterID dest
)
1328 m_assembler
.fsub
<64>(dest
, op1
, op2
);
1331 void subDouble(Address src
, FPRegisterID dest
)
1333 loadDouble(src
, fpTempRegister
);
1334 subDouble(fpTempRegister
, dest
);
1337 // Result is undefined if the value is outside of the integer range.
1338 void truncateDoubleToInt32(FPRegisterID src
, RegisterID dest
)
1340 m_assembler
.fcvtzs
<32, 64>(dest
, src
);
1343 void truncateDoubleToUint32(FPRegisterID src
, RegisterID dest
)
1345 m_assembler
.fcvtzu
<32, 64>(dest
, src
);
1349 // Stack manipulation operations:
1351 // The ABI is assumed to provide a stack abstraction to memory,
1352 // containing machine word sized units of data. Push and pop
1353 // operations add and remove a single register sized unit of data
1354 // to or from the stack. These operations are not supported on
1355 // ARM64. Peek and poke operations read or write values on the
1356 // stack, without moving the current stack position. Additionally,
1357 // there are popToRestore and pushToSave operations, which are
1358 // designed just for quick-and-dirty saving and restoring of
1359 // temporary values. These operations don't claim to have any
1360 // ABI compatibility.
1362 void pop(RegisterID
) NO_RETURN_DUE_TO_CRASH
1367 void push(RegisterID
) NO_RETURN_DUE_TO_CRASH
1372 void push(Address
) NO_RETURN_DUE_TO_CRASH
1377 void push(TrustedImm32
) NO_RETURN_DUE_TO_CRASH
1382 void popToRestore(RegisterID dest
)
1384 m_assembler
.ldr
<64>(dest
, ARM64Registers::sp
, PostIndex(16));
1387 void pushToSave(RegisterID src
)
1389 m_assembler
.str
<64>(src
, ARM64Registers::sp
, PreIndex(-16));
1392 void pushToSave(Address address
)
1394 load32(address
, getCachedDataTempRegisterIDAndInvalidate());
1395 pushToSave(dataTempRegister
);
1398 void pushToSave(TrustedImm32 imm
)
1400 move(imm
, getCachedDataTempRegisterIDAndInvalidate());
1401 pushToSave(dataTempRegister
);
1404 void popToRestore(FPRegisterID dest
)
1406 loadDouble(stackPointerRegister
, dest
);
1407 add64(TrustedImm32(16), stackPointerRegister
);
1410 void pushToSave(FPRegisterID src
)
1412 sub64(TrustedImm32(16), stackPointerRegister
);
1413 storeDouble(src
, stackPointerRegister
);
1417 // Register move operations:
1419 void move(RegisterID src
, RegisterID dest
)
1422 m_assembler
.mov
<64>(dest
, src
);
1425 void move(TrustedImm32 imm
, RegisterID dest
)
1427 moveInternal
<TrustedImm32
, int32_t>(imm
, dest
);
1430 void move(TrustedImmPtr imm
, RegisterID dest
)
1432 moveInternal
<TrustedImmPtr
, intptr_t>(imm
, dest
);
1435 void move(TrustedImm64 imm
, RegisterID dest
)
1437 moveInternal
<TrustedImm64
, int64_t>(imm
, dest
);
1440 void swap(RegisterID reg1
, RegisterID reg2
)
1442 move(reg1
, getCachedDataTempRegisterIDAndInvalidate());
1444 move(dataTempRegister
, reg2
);
1447 void signExtend32ToPtr(RegisterID src
, RegisterID dest
)
1449 m_assembler
.sxtw(dest
, src
);
1452 void zeroExtend32ToPtr(RegisterID src
, RegisterID dest
)
1454 m_assembler
.uxtw(dest
, src
);
1458 // Forwards / external control flow operations:
1460 // This set of jump and conditional branch operations return a Jump
1461 // object which may linked at a later point, allow forwards jump,
1462 // or jumps that will require external linkage (after the code has been
1465 // For branches, signed <, >, <= and >= are denoted as l, g, le, and ge
1466 // respecitvely, for unsigned comparisons the names b, a, be, and ae are
1467 // used (representing the names 'below' and 'above').
1469 // Operands to the comparision are provided in the expected order, e.g.
1470 // jle32(reg1, TrustedImm32(5)) will branch if the value held in reg1, when
1471 // treated as a signed 32bit value, is less than or equal to 5.
1473 // jz and jnz test whether the first operand is equal to zero, and take
1474 // an optional second operand of a mask under which to perform the test.
1476 Jump
branch32(RelationalCondition cond
, RegisterID left
, RegisterID right
)
1478 m_assembler
.cmp
<32>(left
, right
);
1479 return Jump(makeBranch(cond
));
1482 Jump
branch32(RelationalCondition cond
, RegisterID left
, TrustedImm32 right
)
1484 if (isUInt12(right
.m_value
))
1485 m_assembler
.cmp
<32>(left
, UInt12(right
.m_value
));
1486 else if (isUInt12(-right
.m_value
))
1487 m_assembler
.cmn
<32>(left
, UInt12(-right
.m_value
));
1489 moveToCachedReg(right
, m_dataMemoryTempRegister
);
1490 m_assembler
.cmp
<32>(left
, dataTempRegister
);
1492 return Jump(makeBranch(cond
));
1495 Jump
branch32(RelationalCondition cond
, RegisterID left
, Address right
)
1497 load32(right
, getCachedMemoryTempRegisterIDAndInvalidate());
1498 return branch32(cond
, left
, memoryTempRegister
);
1501 Jump
branch32(RelationalCondition cond
, Address left
, RegisterID right
)
1503 load32(left
, getCachedMemoryTempRegisterIDAndInvalidate());
1504 return branch32(cond
, memoryTempRegister
, right
);
1507 Jump
branch32(RelationalCondition cond
, Address left
, TrustedImm32 right
)
1509 load32(left
, getCachedMemoryTempRegisterIDAndInvalidate());
1510 return branch32(cond
, memoryTempRegister
, right
);
1513 Jump
branch32(RelationalCondition cond
, BaseIndex left
, TrustedImm32 right
)
1515 load32(left
, getCachedMemoryTempRegisterIDAndInvalidate());
1516 return branch32(cond
, memoryTempRegister
, right
);
1519 Jump
branch32(RelationalCondition cond
, AbsoluteAddress left
, RegisterID right
)
1521 load32(left
.m_ptr
, getCachedDataTempRegisterIDAndInvalidate());
1522 return branch32(cond
, dataTempRegister
, right
);
1525 Jump
branch32(RelationalCondition cond
, AbsoluteAddress left
, TrustedImm32 right
)
1527 load32(left
.m_ptr
, getCachedMemoryTempRegisterIDAndInvalidate());
1528 return branch32(cond
, memoryTempRegister
, right
);
1531 Jump
branch64(RelationalCondition cond
, RegisterID left
, RegisterID right
)
1533 m_assembler
.cmp
<64>(left
, right
);
1534 return Jump(makeBranch(cond
));
1537 Jump
branch64(RelationalCondition cond
, RegisterID left
, TrustedImm64 right
)
1539 intptr_t immediate
= right
.m_value
;
1540 if (isUInt12(immediate
))
1541 m_assembler
.cmp
<64>(left
, UInt12(static_cast<int32_t>(immediate
)));
1542 else if (isUInt12(-immediate
))
1543 m_assembler
.cmn
<64>(left
, UInt12(static_cast<int32_t>(-immediate
)));
1545 moveToCachedReg(right
, m_dataMemoryTempRegister
);
1546 m_assembler
.cmp
<64>(left
, dataTempRegister
);
1548 return Jump(makeBranch(cond
));
1551 Jump
branch64(RelationalCondition cond
, RegisterID left
, Address right
)
1553 load64(right
, getCachedMemoryTempRegisterIDAndInvalidate());
1554 return branch64(cond
, left
, memoryTempRegister
);
1557 Jump
branch64(RelationalCondition cond
, AbsoluteAddress left
, RegisterID right
)
1559 load64(left
.m_ptr
, getCachedDataTempRegisterIDAndInvalidate());
1560 return branch64(cond
, dataTempRegister
, right
);
1563 Jump
branch64(RelationalCondition cond
, Address left
, RegisterID right
)
1565 load64(left
, getCachedMemoryTempRegisterIDAndInvalidate());
1566 return branch64(cond
, memoryTempRegister
, right
);
1569 Jump
branch64(RelationalCondition cond
, Address left
, TrustedImm64 right
)
1571 load64(left
, getCachedMemoryTempRegisterIDAndInvalidate());
1572 return branch64(cond
, memoryTempRegister
, right
);
1575 Jump
branch8(RelationalCondition cond
, Address left
, TrustedImm32 right
)
1577 ASSERT(!(0xffffff00 & right
.m_value
));
1578 load8(left
, getCachedMemoryTempRegisterIDAndInvalidate());
1579 return branch32(cond
, memoryTempRegister
, right
);
1582 Jump
branch8(RelationalCondition cond
, BaseIndex left
, TrustedImm32 right
)
1584 ASSERT(!(0xffffff00 & right
.m_value
));
1585 load8(left
, getCachedMemoryTempRegisterIDAndInvalidate());
1586 return branch32(cond
, memoryTempRegister
, right
);
1589 Jump
branchTest32(ResultCondition cond
, RegisterID reg
, RegisterID mask
)
1591 m_assembler
.tst
<32>(reg
, mask
);
1592 return Jump(makeBranch(cond
));
1595 Jump
branchTest32(ResultCondition cond
, RegisterID reg
, TrustedImm32 mask
= TrustedImm32(-1))
1597 if (mask
.m_value
== -1) {
1598 if ((cond
== Zero
) || (cond
== NonZero
))
1599 return Jump(makeCompareAndBranch
<32>(static_cast<ZeroCondition
>(cond
), reg
));
1600 m_assembler
.tst
<32>(reg
, reg
);
1601 } else if (hasOneBitSet(mask
.m_value
) && ((cond
== Zero
) || (cond
== NonZero
)))
1602 return Jump(makeTestBitAndBranch(reg
, getLSBSet(mask
.m_value
), static_cast<ZeroCondition
>(cond
)));
1604 if ((cond
== Zero
) || (cond
== NonZero
)) {
1605 LogicalImmediate logicalImm
= LogicalImmediate::create32(mask
.m_value
);
1607 if (logicalImm
.isValid()) {
1608 m_assembler
.tst
<32>(reg
, logicalImm
);
1609 return Jump(makeBranch(cond
));
1613 move(mask
, getCachedDataTempRegisterIDAndInvalidate());
1614 m_assembler
.tst
<32>(reg
, dataTempRegister
);
1616 return Jump(makeBranch(cond
));
1619 Jump
branchTest32(ResultCondition cond
, Address address
, TrustedImm32 mask
= TrustedImm32(-1))
1621 load32(address
, getCachedMemoryTempRegisterIDAndInvalidate());
1622 return branchTest32(cond
, memoryTempRegister
, mask
);
1625 Jump
branchTest32(ResultCondition cond
, BaseIndex address
, TrustedImm32 mask
= TrustedImm32(-1))
1627 load32(address
, getCachedMemoryTempRegisterIDAndInvalidate());
1628 return branchTest32(cond
, memoryTempRegister
, mask
);
1631 Jump
branchTest64(ResultCondition cond
, RegisterID reg
, RegisterID mask
)
1633 m_assembler
.tst
<64>(reg
, mask
);
1634 return Jump(makeBranch(cond
));
1637 Jump
branchTest64(ResultCondition cond
, RegisterID reg
, TrustedImm32 mask
= TrustedImm32(-1))
1639 if (mask
.m_value
== -1) {
1640 if ((cond
== Zero
) || (cond
== NonZero
))
1641 return Jump(makeCompareAndBranch
<64>(static_cast<ZeroCondition
>(cond
), reg
));
1642 m_assembler
.tst
<64>(reg
, reg
);
1643 } else if (hasOneBitSet(mask
.m_value
) && ((cond
== Zero
) || (cond
== NonZero
)))
1644 return Jump(makeTestBitAndBranch(reg
, getLSBSet(mask
.m_value
), static_cast<ZeroCondition
>(cond
)));
1646 if ((cond
== Zero
) || (cond
== NonZero
)) {
1647 LogicalImmediate logicalImm
= LogicalImmediate::create64(mask
.m_value
);
1649 if (logicalImm
.isValid()) {
1650 m_assembler
.tst
<64>(reg
, logicalImm
);
1651 return Jump(makeBranch(cond
));
1655 signExtend32ToPtr(mask
, getCachedDataTempRegisterIDAndInvalidate());
1656 m_assembler
.tst
<64>(reg
, dataTempRegister
);
1658 return Jump(makeBranch(cond
));
1661 Jump
branchTest64(ResultCondition cond
, Address address
, RegisterID mask
)
1663 load64(address
, getCachedDataTempRegisterIDAndInvalidate());
1664 return branchTest64(cond
, dataTempRegister
, mask
);
1667 Jump
branchTest64(ResultCondition cond
, Address address
, TrustedImm32 mask
= TrustedImm32(-1))
1669 load64(address
, getCachedDataTempRegisterIDAndInvalidate());
1670 return branchTest64(cond
, dataTempRegister
, mask
);
1673 Jump
branchTest64(ResultCondition cond
, BaseIndex address
, TrustedImm32 mask
= TrustedImm32(-1))
1675 load64(address
, getCachedDataTempRegisterIDAndInvalidate());
1676 return branchTest64(cond
, dataTempRegister
, mask
);
1679 Jump
branchTest64(ResultCondition cond
, AbsoluteAddress address
, TrustedImm32 mask
= TrustedImm32(-1))
1681 load64(address
.m_ptr
, getCachedDataTempRegisterIDAndInvalidate());
1682 return branchTest64(cond
, dataTempRegister
, mask
);
1685 Jump
branchTest8(ResultCondition cond
, Address address
, TrustedImm32 mask
= TrustedImm32(-1))
1687 load8(address
, getCachedDataTempRegisterIDAndInvalidate());
1688 return branchTest32(cond
, dataTempRegister
, mask
);
1691 Jump
branchTest8(ResultCondition cond
, AbsoluteAddress address
, TrustedImm32 mask
= TrustedImm32(-1))
1693 load8(address
.m_ptr
, getCachedDataTempRegisterIDAndInvalidate());
1694 return branchTest32(cond
, dataTempRegister
, mask
);
1697 Jump
branchTest8(ResultCondition cond
, ExtendedAddress address
, TrustedImm32 mask
= TrustedImm32(-1))
1699 move(ImmPtr(reinterpret_cast<void*>(address
.offset
)), getCachedDataTempRegisterIDAndInvalidate());
1700 m_assembler
.ldrb(dataTempRegister
, address
.base
, dataTempRegister
);
1701 return branchTest32(cond
, dataTempRegister
, mask
);
1704 Jump
branch32WithUnalignedHalfWords(RelationalCondition cond
, BaseIndex left
, TrustedImm32 right
)
1706 return branch32(cond
, left
, right
);
1710 // Arithmetic control flow operations:
1712 // This set of conditional branch operations branch based
1713 // on the result of an arithmetic operation. The operation
1714 // is performed as normal, storing the result.
1716 // * jz operations branch if the result is zero.
1717 // * jo operations branch if the (signed) arithmetic
1718 // operation caused an overflow to occur.
1720 Jump
branchAdd32(ResultCondition cond
, RegisterID op1
, RegisterID op2
, RegisterID dest
)
1722 m_assembler
.add
<32, S
>(dest
, op1
, op2
);
1723 return Jump(makeBranch(cond
));
1726 Jump
branchAdd32(ResultCondition cond
, RegisterID op1
, TrustedImm32 imm
, RegisterID dest
)
1728 if (isUInt12(imm
.m_value
)) {
1729 m_assembler
.add
<32, S
>(dest
, op1
, UInt12(imm
.m_value
));
1730 return Jump(makeBranch(cond
));
1732 if (isUInt12(-imm
.m_value
)) {
1733 m_assembler
.sub
<32, S
>(dest
, op1
, UInt12(-imm
.m_value
));
1734 return Jump(makeBranch(cond
));
1737 signExtend32ToPtr(imm
, getCachedDataTempRegisterIDAndInvalidate());
1738 return branchAdd32(cond
, op1
, dataTempRegister
, dest
);
1741 Jump
branchAdd32(ResultCondition cond
, RegisterID src
, RegisterID dest
)
1743 return branchAdd32(cond
, dest
, src
, dest
);
1746 Jump
branchAdd32(ResultCondition cond
, TrustedImm32 imm
, RegisterID dest
)
1748 return branchAdd32(cond
, dest
, imm
, dest
);
1751 Jump
branchAdd32(ResultCondition cond
, TrustedImm32 imm
, AbsoluteAddress address
)
1753 load32(address
.m_ptr
, getCachedDataTempRegisterIDAndInvalidate());
1755 if (isUInt12(imm
.m_value
)) {
1756 m_assembler
.add
<32, S
>(dataTempRegister
, dataTempRegister
, UInt12(imm
.m_value
));
1757 store32(dataTempRegister
, address
.m_ptr
);
1758 } else if (isUInt12(-imm
.m_value
)) {
1759 m_assembler
.sub
<32, S
>(dataTempRegister
, dataTempRegister
, UInt12(-imm
.m_value
));
1760 store32(dataTempRegister
, address
.m_ptr
);
1762 move(imm
, getCachedMemoryTempRegisterIDAndInvalidate());
1763 m_assembler
.add
<32, S
>(dataTempRegister
, dataTempRegister
, memoryTempRegister
);
1764 store32(dataTempRegister
, address
.m_ptr
);
1767 return Jump(makeBranch(cond
));
1770 Jump
branchAdd64(ResultCondition cond
, RegisterID op1
, RegisterID op2
, RegisterID dest
)
1772 m_assembler
.add
<64, S
>(dest
, op1
, op2
);
1773 return Jump(makeBranch(cond
));
1776 Jump
branchAdd64(ResultCondition cond
, RegisterID op1
, TrustedImm32 imm
, RegisterID dest
)
1778 if (isUInt12(imm
.m_value
)) {
1779 m_assembler
.add
<64, S
>(dest
, op1
, UInt12(imm
.m_value
));
1780 return Jump(makeBranch(cond
));
1782 if (isUInt12(-imm
.m_value
)) {
1783 m_assembler
.sub
<64, S
>(dest
, op1
, UInt12(-imm
.m_value
));
1784 return Jump(makeBranch(cond
));
1787 move(imm
, getCachedDataTempRegisterIDAndInvalidate());
1788 return branchAdd64(cond
, op1
, dataTempRegister
, dest
);
1791 Jump
branchAdd64(ResultCondition cond
, RegisterID src
, RegisterID dest
)
1793 return branchAdd64(cond
, dest
, src
, dest
);
1796 Jump
branchAdd64(ResultCondition cond
, TrustedImm32 imm
, RegisterID dest
)
1798 return branchAdd64(cond
, dest
, imm
, dest
);
1801 Jump
branchMul32(ResultCondition cond
, RegisterID src1
, RegisterID src2
, RegisterID dest
)
1803 ASSERT(cond
!= Signed
);
1805 if (cond
!= Overflow
) {
1806 m_assembler
.mul
<32>(dest
, src1
, src2
);
1807 return branchTest32(cond
, dest
);
1810 // This is a signed multiple of two 32-bit values, producing a 64-bit result.
1811 m_assembler
.smull(dest
, src1
, src2
);
1812 // Copy bits 63..32 of the result to bits 31..0 of dataTempRegister.
1813 m_assembler
.asr
<64>(getCachedDataTempRegisterIDAndInvalidate(), dest
, 32);
1814 // Splat bit 31 of the result to bits 31..0 of memoryTempRegister.
1815 m_assembler
.asr
<32>(getCachedMemoryTempRegisterIDAndInvalidate(), dest
, 31);
1816 // After a mul32 the top 32 bits of the register should be clear.
1817 zeroExtend32ToPtr(dest
, dest
);
1818 // Check that bits 31..63 of the original result were all equal.
1819 return branch32(NotEqual
, memoryTempRegister
, dataTempRegister
);
1822 Jump
branchMul32(ResultCondition cond
, RegisterID src
, RegisterID dest
)
1824 return branchMul32(cond
, dest
, src
, dest
);
1827 Jump
branchMul32(ResultCondition cond
, TrustedImm32 imm
, RegisterID src
, RegisterID dest
)
1829 move(imm
, getCachedDataTempRegisterIDAndInvalidate());
1830 return branchMul32(cond
, dataTempRegister
, src
, dest
);
1833 Jump
branchNeg32(ResultCondition cond
, RegisterID dest
)
1835 m_assembler
.neg
<32, S
>(dest
, dest
);
1836 return Jump(makeBranch(cond
));
1839 Jump
branchSub32(ResultCondition cond
, RegisterID dest
)
1841 m_assembler
.neg
<32, S
>(dest
, dest
);
1842 return Jump(makeBranch(cond
));
1845 Jump
branchSub32(ResultCondition cond
, RegisterID op1
, RegisterID op2
, RegisterID dest
)
1847 m_assembler
.sub
<32, S
>(dest
, op1
, op2
);
1848 return Jump(makeBranch(cond
));
1851 Jump
branchSub32(ResultCondition cond
, RegisterID op1
, TrustedImm32 imm
, RegisterID dest
)
1853 if (isUInt12(imm
.m_value
)) {
1854 m_assembler
.sub
<32, S
>(dest
, op1
, UInt12(imm
.m_value
));
1855 return Jump(makeBranch(cond
));
1857 if (isUInt12(-imm
.m_value
)) {
1858 m_assembler
.add
<32, S
>(dest
, op1
, UInt12(-imm
.m_value
));
1859 return Jump(makeBranch(cond
));
1862 signExtend32ToPtr(imm
, getCachedDataTempRegisterIDAndInvalidate());
1863 return branchSub32(cond
, op1
, dataTempRegister
, dest
);
1866 Jump
branchSub32(ResultCondition cond
, RegisterID src
, RegisterID dest
)
1868 return branchSub32(cond
, dest
, src
, dest
);
1871 Jump
branchSub32(ResultCondition cond
, TrustedImm32 imm
, RegisterID dest
)
1873 return branchSub32(cond
, dest
, imm
, dest
);
1876 Jump
branchSub64(ResultCondition cond
, RegisterID op1
, RegisterID op2
, RegisterID dest
)
1878 m_assembler
.sub
<64, S
>(dest
, op1
, op2
);
1879 return Jump(makeBranch(cond
));
1882 Jump
branchSub64(ResultCondition cond
, RegisterID op1
, TrustedImm32 imm
, RegisterID dest
)
1884 if (isUInt12(imm
.m_value
)) {
1885 m_assembler
.sub
<64, S
>(dest
, op1
, UInt12(imm
.m_value
));
1886 return Jump(makeBranch(cond
));
1888 if (isUInt12(-imm
.m_value
)) {
1889 m_assembler
.add
<64, S
>(dest
, op1
, UInt12(-imm
.m_value
));
1890 return Jump(makeBranch(cond
));
1893 move(imm
, getCachedDataTempRegisterIDAndInvalidate());
1894 return branchSub64(cond
, op1
, dataTempRegister
, dest
);
1897 Jump
branchSub64(ResultCondition cond
, RegisterID src
, RegisterID dest
)
1899 return branchSub64(cond
, dest
, src
, dest
);
1902 Jump
branchSub64(ResultCondition cond
, TrustedImm32 imm
, RegisterID dest
)
1904 return branchSub64(cond
, dest
, imm
, dest
);
1908 // Jumps, calls, returns
1910 ALWAYS_INLINE Call
call()
1912 AssemblerLabel pointerLabel
= m_assembler
.label();
1913 moveWithFixedWidth(TrustedImmPtr(0), getCachedDataTempRegisterIDAndInvalidate());
1914 invalidateAllTempRegisters();
1915 m_assembler
.blr(dataTempRegister
);
1916 AssemblerLabel callLabel
= m_assembler
.label();
1917 ASSERT_UNUSED(pointerLabel
, ARM64Assembler::getDifferenceBetweenLabels(callLabel
, pointerLabel
) == REPATCH_OFFSET_CALL_TO_POINTER
);
1918 return Call(callLabel
, Call::Linkable
);
1921 ALWAYS_INLINE Call
call(RegisterID target
)
1923 invalidateAllTempRegisters();
1924 m_assembler
.blr(target
);
1925 return Call(m_assembler
.label(), Call::None
);
1928 ALWAYS_INLINE Call
call(Address address
)
1930 load64(address
, getCachedDataTempRegisterIDAndInvalidate());
1931 return call(dataTempRegister
);
1934 ALWAYS_INLINE Jump
jump()
1936 AssemblerLabel label
= m_assembler
.label();
1938 return Jump(label
, m_makeJumpPatchable
? ARM64Assembler::JumpNoConditionFixedSize
: ARM64Assembler::JumpNoCondition
);
1941 void jump(RegisterID target
)
1943 m_assembler
.br(target
);
1946 void jump(Address address
)
1948 load64(address
, getCachedDataTempRegisterIDAndInvalidate());
1949 m_assembler
.br(dataTempRegister
);
1952 void jump(AbsoluteAddress address
)
1954 move(TrustedImmPtr(address
.m_ptr
), getCachedDataTempRegisterIDAndInvalidate());
1955 load64(Address(dataTempRegister
), dataTempRegister
);
1956 m_assembler
.br(dataTempRegister
);
1959 ALWAYS_INLINE Call
makeTailRecursiveCall(Jump oldJump
)
1962 return tailRecursiveCall();
1965 ALWAYS_INLINE Call
nearCall()
1968 return Call(m_assembler
.label(), Call::LinkableNear
);
1971 ALWAYS_INLINE
void ret()
1976 ALWAYS_INLINE Call
tailRecursiveCall()
1978 // Like a normal call, but don't link.
1979 AssemblerLabel pointerLabel
= m_assembler
.label();
1980 moveWithFixedWidth(TrustedImmPtr(0), getCachedDataTempRegisterIDAndInvalidate());
1981 m_assembler
.br(dataTempRegister
);
1982 AssemblerLabel callLabel
= m_assembler
.label();
1983 ASSERT_UNUSED(pointerLabel
, ARM64Assembler::getDifferenceBetweenLabels(callLabel
, pointerLabel
) == REPATCH_OFFSET_CALL_TO_POINTER
);
1984 return Call(callLabel
, Call::Linkable
);
1988 // Comparisons operations
1990 void compare32(RelationalCondition cond
, RegisterID left
, RegisterID right
, RegisterID dest
)
1992 m_assembler
.cmp
<32>(left
, right
);
1993 m_assembler
.cset
<32>(dest
, ARM64Condition(cond
));
1996 void compare32(RelationalCondition cond
, Address left
, RegisterID right
, RegisterID dest
)
1998 load32(left
, getCachedDataTempRegisterIDAndInvalidate());
1999 m_assembler
.cmp
<32>(dataTempRegister
, right
);
2000 m_assembler
.cset
<32>(dest
, ARM64Condition(cond
));
2003 void compare32(RelationalCondition cond
, RegisterID left
, TrustedImm32 right
, RegisterID dest
)
2005 move(right
, getCachedDataTempRegisterIDAndInvalidate());
2006 m_assembler
.cmp
<32>(left
, dataTempRegister
);
2007 m_assembler
.cset
<32>(dest
, ARM64Condition(cond
));
2010 void compare64(RelationalCondition cond
, RegisterID left
, RegisterID right
, RegisterID dest
)
2012 m_assembler
.cmp
<64>(left
, right
);
2013 m_assembler
.cset
<32>(dest
, ARM64Condition(cond
));
2016 void compare64(RelationalCondition cond
, RegisterID left
, TrustedImm32 right
, RegisterID dest
)
2018 signExtend32ToPtr(right
, getCachedDataTempRegisterIDAndInvalidate());
2019 m_assembler
.cmp
<64>(left
, dataTempRegister
);
2020 m_assembler
.cset
<32>(dest
, ARM64Condition(cond
));
2023 void compare8(RelationalCondition cond
, Address left
, TrustedImm32 right
, RegisterID dest
)
2025 load8(left
, getCachedMemoryTempRegisterIDAndInvalidate());
2026 move(right
, getCachedDataTempRegisterIDAndInvalidate());
2027 compare32(cond
, memoryTempRegister
, dataTempRegister
, dest
);
2030 void test32(ResultCondition cond
, RegisterID src
, TrustedImm32 mask
, RegisterID dest
)
2032 if (mask
.m_value
== -1)
2033 m_assembler
.tst
<32>(src
, src
);
2035 signExtend32ToPtr(mask
, getCachedDataTempRegisterIDAndInvalidate());
2036 m_assembler
.tst
<32>(src
, dataTempRegister
);
2038 m_assembler
.cset
<32>(dest
, ARM64Condition(cond
));
2041 void test32(ResultCondition cond
, Address address
, TrustedImm32 mask
, RegisterID dest
)
2043 load32(address
, getCachedDataTempRegisterIDAndInvalidate());
2044 test32(cond
, dataTempRegister
, mask
, dest
);
2047 void test8(ResultCondition cond
, Address address
, TrustedImm32 mask
, RegisterID dest
)
2049 load8(address
, getCachedDataTempRegisterIDAndInvalidate());
2050 test32(cond
, dataTempRegister
, mask
, dest
);
2053 void test64(ResultCondition cond
, RegisterID op1
, RegisterID op2
, RegisterID dest
)
2055 m_assembler
.tst
<64>(op1
, op2
);
2056 m_assembler
.cset
<32>(dest
, ARM64Condition(cond
));
2059 void test64(ResultCondition cond
, RegisterID src
, TrustedImm32 mask
, RegisterID dest
)
2061 if (mask
.m_value
== -1)
2062 m_assembler
.tst
<64>(src
, src
);
2064 signExtend32ToPtr(mask
, getCachedDataTempRegisterIDAndInvalidate());
2065 m_assembler
.tst
<64>(src
, dataTempRegister
);
2067 m_assembler
.cset
<32>(dest
, ARM64Condition(cond
));
2071 // Patchable operations
2073 ALWAYS_INLINE DataLabel32
moveWithPatch(TrustedImm32 imm
, RegisterID dest
)
2075 DataLabel32
label(this);
2076 moveWithFixedWidth(imm
, dest
);
2080 ALWAYS_INLINE DataLabelPtr
moveWithPatch(TrustedImmPtr imm
, RegisterID dest
)
2082 DataLabelPtr
label(this);
2083 moveWithFixedWidth(imm
, dest
);
2087 ALWAYS_INLINE Jump
branchPtrWithPatch(RelationalCondition cond
, RegisterID left
, DataLabelPtr
& dataLabel
, TrustedImmPtr initialRightValue
= TrustedImmPtr(0))
2089 dataLabel
= DataLabelPtr(this);
2090 moveWithPatch(initialRightValue
, getCachedDataTempRegisterIDAndInvalidate());
2091 return branch64(cond
, left
, dataTempRegister
);
2094 ALWAYS_INLINE Jump
branchPtrWithPatch(RelationalCondition cond
, Address left
, DataLabelPtr
& dataLabel
, TrustedImmPtr initialRightValue
= TrustedImmPtr(0))
2096 dataLabel
= DataLabelPtr(this);
2097 moveWithPatch(initialRightValue
, getCachedDataTempRegisterIDAndInvalidate());
2098 return branch64(cond
, left
, dataTempRegister
);
2101 PatchableJump
patchableBranchPtr(RelationalCondition cond
, Address left
, TrustedImmPtr right
= TrustedImmPtr(0))
2103 m_makeJumpPatchable
= true;
2104 Jump result
= branch32(cond
, left
, TrustedImm32(right
));
2105 m_makeJumpPatchable
= false;
2106 return PatchableJump(result
);
2109 PatchableJump
patchableBranchTest32(ResultCondition cond
, RegisterID reg
, TrustedImm32 mask
= TrustedImm32(-1))
2111 m_makeJumpPatchable
= true;
2112 Jump result
= branchTest32(cond
, reg
, mask
);
2113 m_makeJumpPatchable
= false;
2114 return PatchableJump(result
);
2117 PatchableJump
patchableBranch32(RelationalCondition cond
, RegisterID reg
, TrustedImm32 imm
)
2119 m_makeJumpPatchable
= true;
2120 Jump result
= branch32(cond
, reg
, imm
);
2121 m_makeJumpPatchable
= false;
2122 return PatchableJump(result
);
2125 PatchableJump
patchableBranchPtrWithPatch(RelationalCondition cond
, Address left
, DataLabelPtr
& dataLabel
, TrustedImmPtr initialRightValue
= TrustedImmPtr(0))
2127 m_makeJumpPatchable
= true;
2128 Jump result
= branchPtrWithPatch(cond
, left
, dataLabel
, initialRightValue
);
2129 m_makeJumpPatchable
= false;
2130 return PatchableJump(result
);
2133 PatchableJump
patchableJump()
2135 m_makeJumpPatchable
= true;
2136 Jump result
= jump();
2137 m_makeJumpPatchable
= false;
2138 return PatchableJump(result
);
2141 ALWAYS_INLINE DataLabelPtr
storePtrWithPatch(TrustedImmPtr initialValue
, ImplicitAddress address
)
2143 DataLabelPtr
label(this);
2144 moveWithFixedWidth(initialValue
, getCachedDataTempRegisterIDAndInvalidate());
2145 store64(dataTempRegister
, address
);
2149 ALWAYS_INLINE DataLabelPtr
storePtrWithPatch(ImplicitAddress address
)
2151 return storePtrWithPatch(TrustedImmPtr(0), address
);
2154 static void reemitInitialMoveWithPatch(void* address
, void* value
)
2156 ARM64Assembler::setPointer(static_cast<int*>(address
), value
, dataTempRegister
, true);
2159 // Miscellaneous operations:
2161 void breakpoint(uint16_t imm
= 0)
2163 m_assembler
.brk(imm
);
2172 // Misc helper functions.
2174 // Invert a relational condition, e.g. == becomes !=, < becomes >=, etc.
2175 static RelationalCondition
invert(RelationalCondition cond
)
2177 return static_cast<RelationalCondition
>(ARM64Assembler::invert(static_cast<ARM64Assembler::Condition
>(cond
)));
2180 static FunctionPtr
readCallTarget(CodeLocationCall call
)
2182 return FunctionPtr(reinterpret_cast<void(*)()>(ARM64Assembler::readCallTarget(call
.dataLocation())));
2185 static void replaceWithJump(CodeLocationLabel instructionStart
, CodeLocationLabel destination
)
2187 ARM64Assembler::replaceWithJump(instructionStart
.dataLocation(), destination
.dataLocation());
2190 static ptrdiff_t maxJumpReplacementSize()
2192 return ARM64Assembler::maxJumpReplacementSize();
2195 static bool canJumpReplacePatchableBranchPtrWithPatch() { return false; }
2197 static CodeLocationLabel
startOfBranchPtrWithPatchOnRegister(CodeLocationDataLabelPtr label
)
2199 return label
.labelAtOffset(0);
2202 static CodeLocationLabel
startOfPatchableBranchPtrWithPatchOnAddress(CodeLocationDataLabelPtr
)
2204 UNREACHABLE_FOR_PLATFORM();
2205 return CodeLocationLabel();
2208 static void revertJumpReplacementToBranchPtrWithPatch(CodeLocationLabel instructionStart
, RegisterID
, void* initialValue
)
2210 reemitInitialMoveWithPatch(instructionStart
.dataLocation(), initialValue
);
2213 static void revertJumpReplacementToPatchableBranchPtrWithPatch(CodeLocationLabel
, Address
, void*)
2215 UNREACHABLE_FOR_PLATFORM();
2219 ALWAYS_INLINE Jump
makeBranch(ARM64Assembler::Condition cond
)
2221 m_assembler
.b_cond(cond
);
2222 AssemblerLabel label
= m_assembler
.label();
2224 return Jump(label
, m_makeJumpPatchable
? ARM64Assembler::JumpConditionFixedSize
: ARM64Assembler::JumpCondition
, cond
);
2226 ALWAYS_INLINE Jump
makeBranch(RelationalCondition cond
) { return makeBranch(ARM64Condition(cond
)); }
2227 ALWAYS_INLINE Jump
makeBranch(ResultCondition cond
) { return makeBranch(ARM64Condition(cond
)); }
2228 ALWAYS_INLINE Jump
makeBranch(DoubleCondition cond
) { return makeBranch(ARM64Condition(cond
)); }
2230 template <int dataSize
>
2231 ALWAYS_INLINE Jump
makeCompareAndBranch(ZeroCondition cond
, RegisterID reg
)
2234 m_assembler
.cbz
<dataSize
>(reg
);
2236 m_assembler
.cbnz
<dataSize
>(reg
);
2237 AssemblerLabel label
= m_assembler
.label();
2239 return Jump(label
, m_makeJumpPatchable
? ARM64Assembler::JumpCompareAndBranchFixedSize
: ARM64Assembler::JumpCompareAndBranch
, static_cast<ARM64Assembler::Condition
>(cond
), dataSize
== 64, reg
);
2242 ALWAYS_INLINE Jump
makeTestBitAndBranch(RegisterID reg
, unsigned bit
, ZeroCondition cond
)
2247 m_assembler
.tbz(reg
, bit
);
2249 m_assembler
.tbnz(reg
, bit
);
2250 AssemblerLabel label
= m_assembler
.label();
2252 return Jump(label
, m_makeJumpPatchable
? ARM64Assembler::JumpTestBitFixedSize
: ARM64Assembler::JumpTestBit
, static_cast<ARM64Assembler::Condition
>(cond
), bit
, reg
);
2255 ARM64Assembler::Condition
ARM64Condition(RelationalCondition cond
)
2257 return static_cast<ARM64Assembler::Condition
>(cond
);
2260 ARM64Assembler::Condition
ARM64Condition(ResultCondition cond
)
2262 return static_cast<ARM64Assembler::Condition
>(cond
);
2265 ARM64Assembler::Condition
ARM64Condition(DoubleCondition cond
)
2267 return static_cast<ARM64Assembler::Condition
>(cond
);
2271 ALWAYS_INLINE RegisterID
getCachedDataTempRegisterIDAndInvalidate() { return m_dataMemoryTempRegister
.registerIDInvalidate(); }
2272 ALWAYS_INLINE RegisterID
getCachedMemoryTempRegisterIDAndInvalidate() { return m_cachedMemoryTempRegister
.registerIDInvalidate(); }
2274 ALWAYS_INLINE
bool isInIntRange(intptr_t value
)
2276 return value
== ((value
<< 32) >> 32);
2279 template<typename ImmediateType
, typename rawType
>
2280 void moveInternal(ImmediateType imm
, RegisterID dest
)
2282 const int dataSize
= sizeof(rawType
)*8;
2283 const int numberHalfWords
= dataSize
/16;
2284 rawType value
= bitwise_cast
<rawType
>(imm
.m_value
);
2285 uint16_t halfword
[numberHalfWords
];
2287 // Handle 0 and ~0 here to simplify code below
2289 m_assembler
.movz
<dataSize
>(dest
, 0);
2293 m_assembler
.movn
<dataSize
>(dest
, 0);
2297 LogicalImmediate logicalImm
= dataSize
== 64 ? LogicalImmediate::create64(static_cast<uint64_t>(value
)) : LogicalImmediate::create32(static_cast<uint32_t>(value
));
2299 if (logicalImm
.isValid()) {
2300 m_assembler
.movi
<dataSize
>(dest
, logicalImm
);
2304 // Figure out how many halfwords are 0 or FFFF, then choose movz or movn accordingly.
2305 int zeroOrNegateVote
= 0;
2306 for (int i
= 0; i
< numberHalfWords
; ++i
) {
2307 halfword
[i
] = getHalfword(value
, i
);
2310 else if (halfword
[i
] == 0xffff)
2314 bool needToClearRegister
= true;
2315 if (zeroOrNegateVote
>= 0) {
2316 for (int i
= 0; i
< numberHalfWords
; i
++) {
2318 if (needToClearRegister
) {
2319 m_assembler
.movz
<dataSize
>(dest
, halfword
[i
], 16*i
);
2320 needToClearRegister
= false;
2322 m_assembler
.movk
<dataSize
>(dest
, halfword
[i
], 16*i
);
2326 for (int i
= 0; i
< numberHalfWords
; i
++) {
2327 if (halfword
[i
] != 0xffff) {
2328 if (needToClearRegister
) {
2329 m_assembler
.movn
<dataSize
>(dest
, ~halfword
[i
], 16*i
);
2330 needToClearRegister
= false;
2332 m_assembler
.movk
<dataSize
>(dest
, halfword
[i
], 16*i
);
2338 template<int datasize
>
2339 ALWAYS_INLINE
void loadUnsignedImmediate(RegisterID rt
, RegisterID rn
, unsigned pimm
)
2341 m_assembler
.ldr
<datasize
>(rt
, rn
, pimm
);
2344 template<int datasize
>
2345 ALWAYS_INLINE
void loadUnscaledImmediate(RegisterID rt
, RegisterID rn
, int simm
)
2347 m_assembler
.ldur
<datasize
>(rt
, rn
, simm
);
2350 template<int datasize
>
2351 ALWAYS_INLINE
void storeUnsignedImmediate(RegisterID rt
, RegisterID rn
, unsigned pimm
)
2353 m_assembler
.str
<datasize
>(rt
, rn
, pimm
);
2356 template<int datasize
>
2357 ALWAYS_INLINE
void storeUnscaledImmediate(RegisterID rt
, RegisterID rn
, int simm
)
2359 m_assembler
.stur
<datasize
>(rt
, rn
, simm
);
2362 void moveWithFixedWidth(TrustedImm32 imm
, RegisterID dest
)
2364 int32_t value
= imm
.m_value
;
2365 m_assembler
.movz
<32>(dest
, getHalfword(value
, 0));
2366 m_assembler
.movk
<32>(dest
, getHalfword(value
, 1), 16);
2369 void moveWithFixedWidth(TrustedImmPtr imm
, RegisterID dest
)
2371 intptr_t value
= reinterpret_cast<intptr_t>(imm
.m_value
);
2372 m_assembler
.movz
<64>(dest
, getHalfword(value
, 0));
2373 m_assembler
.movk
<64>(dest
, getHalfword(value
, 1), 16);
2374 m_assembler
.movk
<64>(dest
, getHalfword(value
, 2), 32);
2377 void signExtend32ToPtrWithFixedWidth(int32_t value
, RegisterID dest
)
2380 m_assembler
.movz
<32>(dest
, getHalfword(value
, 0));
2381 m_assembler
.movk
<32>(dest
, getHalfword(value
, 1), 16);
2383 m_assembler
.movn
<32>(dest
, ~getHalfword(value
, 0));
2384 m_assembler
.movk
<32>(dest
, getHalfword(value
, 1), 16);
2388 void signExtend32ToPtr(TrustedImm32 imm
, RegisterID dest
)
2390 move(TrustedImmPtr(reinterpret_cast<void*>(static_cast<intptr_t>(imm
.m_value
))), dest
);
2393 template<int datasize
>
2394 ALWAYS_INLINE
void load(const void* address
, RegisterID dest
)
2396 intptr_t currentRegisterContents
;
2397 if (m_cachedMemoryTempRegister
.value(currentRegisterContents
)) {
2398 intptr_t addressAsInt
= reinterpret_cast<intptr_t>(address
);
2399 intptr_t addressDelta
= addressAsInt
- currentRegisterContents
;
2401 if (dest
== memoryTempRegister
)
2402 m_cachedMemoryTempRegister
.invalidate();
2404 if (isInIntRange(addressDelta
)) {
2405 if (ARM64Assembler::canEncodeSImmOffset(addressDelta
)) {
2406 m_assembler
.ldur
<datasize
>(dest
, memoryTempRegister
, addressDelta
);
2410 if (ARM64Assembler::canEncodePImmOffset
<datasize
>(addressDelta
)) {
2411 m_assembler
.ldr
<datasize
>(dest
, memoryTempRegister
, addressDelta
);
2416 if ((addressAsInt
& (~maskHalfWord0
)) == (currentRegisterContents
& (~maskHalfWord0
))) {
2417 m_assembler
.movk
<64>(memoryTempRegister
, addressAsInt
& maskHalfWord0
, 0);
2418 m_cachedMemoryTempRegister
.setValue(reinterpret_cast<intptr_t>(address
));
2419 m_assembler
.ldr
<datasize
>(dest
, memoryTempRegister
, ARM64Registers::zr
);
2424 move(TrustedImmPtr(address
), memoryTempRegister
);
2425 if (dest
== memoryTempRegister
)
2426 m_cachedMemoryTempRegister
.invalidate();
2428 m_cachedMemoryTempRegister
.setValue(reinterpret_cast<intptr_t>(address
));
2429 m_assembler
.ldr
<datasize
>(dest
, memoryTempRegister
, ARM64Registers::zr
);
2432 template<int datasize
>
2433 ALWAYS_INLINE
void store(RegisterID src
, const void* address
)
2435 intptr_t currentRegisterContents
;
2436 if (m_cachedMemoryTempRegister
.value(currentRegisterContents
)) {
2437 intptr_t addressAsInt
= reinterpret_cast<intptr_t>(address
);
2438 intptr_t addressDelta
= addressAsInt
- currentRegisterContents
;
2440 if (isInIntRange(addressDelta
)) {
2441 if (ARM64Assembler::canEncodeSImmOffset(addressDelta
)) {
2442 m_assembler
.stur
<datasize
>(src
, memoryTempRegister
, addressDelta
);
2446 if (ARM64Assembler::canEncodePImmOffset
<datasize
>(addressDelta
)) {
2447 m_assembler
.str
<datasize
>(src
, memoryTempRegister
, addressDelta
);
2452 if ((addressAsInt
& (~maskHalfWord0
)) == (currentRegisterContents
& (~maskHalfWord0
))) {
2453 m_assembler
.movk
<64>(memoryTempRegister
, addressAsInt
& maskHalfWord0
, 0);
2454 m_cachedMemoryTempRegister
.setValue(reinterpret_cast<intptr_t>(address
));
2455 m_assembler
.str
<datasize
>(src
, memoryTempRegister
, ARM64Registers::zr
);
2460 move(TrustedImmPtr(address
), memoryTempRegister
);
2461 m_cachedMemoryTempRegister
.setValue(reinterpret_cast<intptr_t>(address
));
2462 m_assembler
.str
<datasize
>(src
, memoryTempRegister
, ARM64Registers::zr
);
2465 template <int dataSize
>
2466 ALWAYS_INLINE
bool tryMoveUsingCacheRegisterContents(intptr_t immediate
, CachedTempRegister
& dest
)
2468 intptr_t currentRegisterContents
;
2469 if (dest
.value(currentRegisterContents
)) {
2470 if (currentRegisterContents
== immediate
)
2473 LogicalImmediate logicalImm
= dataSize
== 64 ? LogicalImmediate::create64(static_cast<uint64_t>(immediate
)) : LogicalImmediate::create32(static_cast<uint32_t>(immediate
));
2475 if (logicalImm
.isValid()) {
2476 m_assembler
.movi
<dataSize
>(dest
.registerIDNoInvalidate(), logicalImm
);
2477 dest
.setValue(immediate
);
2481 if ((immediate
& maskUpperWord
) == (currentRegisterContents
& maskUpperWord
)) {
2482 if ((immediate
& maskHalfWord1
) != (currentRegisterContents
& maskHalfWord1
))
2483 m_assembler
.movk
<dataSize
>(dest
.registerIDNoInvalidate(), (immediate
& maskHalfWord1
) >> 16, 16);
2485 if ((immediate
& maskHalfWord0
) != (currentRegisterContents
& maskHalfWord0
))
2486 m_assembler
.movk
<dataSize
>(dest
.registerIDNoInvalidate(), immediate
& maskHalfWord0
, 0);
2488 dest
.setValue(immediate
);
2496 void moveToCachedReg(TrustedImm32 imm
, CachedTempRegister
& dest
)
2498 if (tryMoveUsingCacheRegisterContents
<32>(static_cast<intptr_t>(imm
.m_value
), dest
))
2501 moveInternal
<TrustedImm32
, int32_t>(imm
, dest
.registerIDNoInvalidate());
2502 dest
.setValue(imm
.m_value
);
2505 void moveToCachedReg(TrustedImmPtr imm
, CachedTempRegister
& dest
)
2507 if (tryMoveUsingCacheRegisterContents
<64>(imm
.asIntptr(), dest
))
2510 moveInternal
<TrustedImmPtr
, intptr_t>(imm
, dest
.registerIDNoInvalidate());
2511 dest
.setValue(imm
.asIntptr());
2514 void moveToCachedReg(TrustedImm64 imm
, CachedTempRegister
& dest
)
2516 if (tryMoveUsingCacheRegisterContents
<64>(static_cast<intptr_t>(imm
.m_value
), dest
))
2519 moveInternal
<TrustedImm64
, int64_t>(imm
, dest
.registerIDNoInvalidate());
2520 dest
.setValue(imm
.m_value
);
2523 template<int datasize
>
2524 ALWAYS_INLINE
bool tryLoadWithOffset(RegisterID rt
, RegisterID rn
, int32_t offset
)
2526 if (ARM64Assembler::canEncodeSImmOffset(offset
)) {
2527 loadUnscaledImmediate
<datasize
>(rt
, rn
, offset
);
2530 if (ARM64Assembler::canEncodePImmOffset
<datasize
>(offset
)) {
2531 loadUnsignedImmediate
<datasize
>(rt
, rn
, static_cast<unsigned>(offset
));
2537 template<int datasize
>
2538 ALWAYS_INLINE
bool tryLoadWithOffset(FPRegisterID rt
, RegisterID rn
, int32_t offset
)
2540 if (ARM64Assembler::canEncodeSImmOffset(offset
)) {
2541 m_assembler
.ldur
<datasize
>(rt
, rn
, offset
);
2544 if (ARM64Assembler::canEncodePImmOffset
<datasize
>(offset
)) {
2545 m_assembler
.ldr
<datasize
>(rt
, rn
, static_cast<unsigned>(offset
));
2551 template<int datasize
>
2552 ALWAYS_INLINE
bool tryStoreWithOffset(RegisterID rt
, RegisterID rn
, int32_t offset
)
2554 if (ARM64Assembler::canEncodeSImmOffset(offset
)) {
2555 storeUnscaledImmediate
<datasize
>(rt
, rn
, offset
);
2558 if (ARM64Assembler::canEncodePImmOffset
<datasize
>(offset
)) {
2559 storeUnsignedImmediate
<datasize
>(rt
, rn
, static_cast<unsigned>(offset
));
2565 template<int datasize
>
2566 ALWAYS_INLINE
bool tryStoreWithOffset(FPRegisterID rt
, RegisterID rn
, int32_t offset
)
2568 if (ARM64Assembler::canEncodeSImmOffset(offset
)) {
2569 m_assembler
.stur
<datasize
>(rt
, rn
, offset
);
2572 if (ARM64Assembler::canEncodePImmOffset
<datasize
>(offset
)) {
2573 m_assembler
.str
<datasize
>(rt
, rn
, static_cast<unsigned>(offset
));
2579 friend class LinkBuffer
;
2580 friend class RepatchBuffer
;
2582 static void linkCall(void* code
, Call call
, FunctionPtr function
)
2584 if (call
.isFlagSet(Call::Near
))
2585 ARM64Assembler::linkCall(code
, call
.m_label
, function
.value());
2587 ARM64Assembler::linkPointer(code
, call
.m_label
.labelAtOffset(REPATCH_OFFSET_CALL_TO_POINTER
), function
.value());
2590 static void repatchCall(CodeLocationCall call
, CodeLocationLabel destination
)
2592 ARM64Assembler::repatchPointer(call
.dataLabelPtrAtOffset(REPATCH_OFFSET_CALL_TO_POINTER
).dataLocation(), destination
.executableAddress());
2595 static void repatchCall(CodeLocationCall call
, FunctionPtr destination
)
2597 ARM64Assembler::repatchPointer(call
.dataLabelPtrAtOffset(REPATCH_OFFSET_CALL_TO_POINTER
).dataLocation(), destination
.executableAddress());
2600 CachedTempRegister m_dataMemoryTempRegister
;
2601 CachedTempRegister m_cachedMemoryTempRegister
;
2602 bool m_makeJumpPatchable
;
2605 // Extend the {load,store}{Unsigned,Unscaled}Immediate templated general register methods to cover all load/store sizes
2607 ALWAYS_INLINE
void MacroAssemblerARM64::loadUnsignedImmediate
<8>(RegisterID rt
, RegisterID rn
, unsigned pimm
)
2609 m_assembler
.ldrb(rt
, rn
, pimm
);
2613 ALWAYS_INLINE
void MacroAssemblerARM64::loadUnsignedImmediate
<16>(RegisterID rt
, RegisterID rn
, unsigned pimm
)
2615 m_assembler
.ldrh(rt
, rn
, pimm
);
2619 ALWAYS_INLINE
void MacroAssemblerARM64::loadUnscaledImmediate
<8>(RegisterID rt
, RegisterID rn
, int simm
)
2621 m_assembler
.ldurb(rt
, rn
, simm
);
2625 ALWAYS_INLINE
void MacroAssemblerARM64::loadUnscaledImmediate
<16>(RegisterID rt
, RegisterID rn
, int simm
)
2627 m_assembler
.ldurh(rt
, rn
, simm
);
2631 ALWAYS_INLINE
void MacroAssemblerARM64::storeUnsignedImmediate
<8>(RegisterID rt
, RegisterID rn
, unsigned pimm
)
2633 m_assembler
.strb(rt
, rn
, pimm
);
2637 ALWAYS_INLINE
void MacroAssemblerARM64::storeUnsignedImmediate
<16>(RegisterID rt
, RegisterID rn
, unsigned pimm
)
2639 m_assembler
.strh(rt
, rn
, pimm
);
2643 ALWAYS_INLINE
void MacroAssemblerARM64::storeUnscaledImmediate
<8>(RegisterID rt
, RegisterID rn
, int simm
)
2645 m_assembler
.sturb(rt
, rn
, simm
);
2649 ALWAYS_INLINE
void MacroAssemblerARM64::storeUnscaledImmediate
<16>(RegisterID rt
, RegisterID rn
, int simm
)
2651 m_assembler
.sturh(rt
, rn
, simm
);
2656 #endif // ENABLE(ASSEMBLER)
2658 #endif // MacroAssemblerARM64_h