2  * Copyright (C) 2012 Apple Inc. All rights reserved. 
   4  * Redistribution and use in source and binary forms, with or without 
   5  * modification, are permitted provided that the following conditions 
   7  * 1. Redistributions of source code must retain the above copyright 
   8  *    notice, this list of conditions and the following disclaimer. 
   9  * 2. Redistributions in binary form must reproduce the above copyright 
  10  *    notice, this list of conditions and the following disclaimer in the 
  11  *    documentation and/or other materials provided with the distribution. 
  13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY 
  14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
  15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 
  16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR 
  17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 
  18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 
  19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 
  20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY 
  21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
  22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
  23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.  
  26 #ifndef MacroAssemblerARM64_h 
  27 #define MacroAssemblerARM64_h 
  31 #include "ARM64Assembler.h" 
  32 #include "AbstractMacroAssembler.h" 
  33 #include <wtf/MathExtras.h> 
  37 class MacroAssemblerARM64 
: public AbstractMacroAssembler
<ARM64Assembler
> { 
  38     static const RegisterID dataTempRegister 
= ARM64Registers::ip0
; 
  39     static const RegisterID memoryTempRegister 
= ARM64Registers::ip1
; 
  40     static const ARM64Registers::FPRegisterID fpTempRegister 
= ARM64Registers::q31
; 
  41     static const ARM64Assembler::SetFlags S 
= ARM64Assembler::S
; 
  42     static const intptr_t maskHalfWord0 
= 0xffffl
; 
  43     static const intptr_t maskHalfWord1 
= 0xffff0000l
; 
  44     static const intptr_t maskUpperWord 
= 0xffffffff00000000l
; 
  46     // 4 instructions - 3 to load the function pointer, + blr. 
  47     static const ptrdiff_t REPATCH_OFFSET_CALL_TO_POINTER 
= -16; 
  51         : m_dataMemoryTempRegister(this, dataTempRegister
) 
  52         , m_cachedMemoryTempRegister(this, memoryTempRegister
) 
  53         , m_makeJumpPatchable(false) 
  57     typedef ARM64Registers::FPRegisterID FPRegisterID
; 
  58     typedef ARM64Assembler::LinkRecord LinkRecord
; 
  59     typedef ARM64Assembler::JumpType JumpType
; 
  60     typedef ARM64Assembler::JumpLinkType JumpLinkType
; 
  61     typedef ARM64Assembler::Condition Condition
; 
  63     static const ARM64Assembler::Condition DefaultCondition 
= ARM64Assembler::ConditionInvalid
; 
  64     static const ARM64Assembler::JumpType DefaultJump 
= ARM64Assembler::JumpNoConditionFixedSize
; 
  66     Vector
<LinkRecord
, 0, UnsafeVectorOverflow
>& jumpsToLink() { return m_assembler
.jumpsToLink(); } 
  67     void* unlinkedCode() { return m_assembler
.unlinkedCode(); } 
  68     bool canCompact(JumpType jumpType
) { return m_assembler
.canCompact(jumpType
); } 
  69     JumpLinkType 
computeJumpType(JumpType jumpType
, const uint8_t* from
, const uint8_t* to
) { return m_assembler
.computeJumpType(jumpType
, from
, to
); } 
  70     JumpLinkType 
computeJumpType(LinkRecord
& record
, const uint8_t* from
, const uint8_t* to
) { return m_assembler
.computeJumpType(record
, from
, to
); } 
  71     void recordLinkOffsets(int32_t regionStart
, int32_t regionEnd
, int32_t offset
) {return m_assembler
.recordLinkOffsets(regionStart
, regionEnd
, offset
); } 
  72     int jumpSizeDelta(JumpType jumpType
, JumpLinkType jumpLinkType
) { return m_assembler
.jumpSizeDelta(jumpType
, jumpLinkType
); } 
  73     void link(LinkRecord
& record
, uint8_t* from
, uint8_t* to
) { return m_assembler
.link(record
, from
, to
); } 
  74     int executableOffsetFor(int location
) { return m_assembler
.executableOffsetFor(location
); } 
  76     static const Scale ScalePtr 
= TimesEight
; 
  78     static bool isCompactPtrAlignedAddressOffset(ptrdiff_t value
) 
  80         // This is the largest 32-bit access allowed, aligned to 64-bit boundary. 
  81         return !(value 
& ~0x3ff8); 
  84     enum RelationalCondition 
{ 
  85         Equal 
= ARM64Assembler::ConditionEQ
, 
  86         NotEqual 
= ARM64Assembler::ConditionNE
, 
  87         Above 
= ARM64Assembler::ConditionHI
, 
  88         AboveOrEqual 
= ARM64Assembler::ConditionHS
, 
  89         Below 
= ARM64Assembler::ConditionLO
, 
  90         BelowOrEqual 
= ARM64Assembler::ConditionLS
, 
  91         GreaterThan 
= ARM64Assembler::ConditionGT
, 
  92         GreaterThanOrEqual 
= ARM64Assembler::ConditionGE
, 
  93         LessThan 
= ARM64Assembler::ConditionLT
, 
  94         LessThanOrEqual 
= ARM64Assembler::ConditionLE
 
  97     enum ResultCondition 
{ 
  98         Overflow 
= ARM64Assembler::ConditionVS
, 
  99         Signed 
= ARM64Assembler::ConditionMI
, 
 100         PositiveOrZero 
= ARM64Assembler::ConditionPL
, 
 101         Zero 
= ARM64Assembler::ConditionEQ
, 
 102         NonZero 
= ARM64Assembler::ConditionNE
 
 106         IsZero 
= ARM64Assembler::ConditionEQ
, 
 107         IsNonZero 
= ARM64Assembler::ConditionNE
 
 110     enum DoubleCondition 
{ 
 111         // These conditions will only evaluate to true if the comparison is ordered - i.e. neither operand is NaN. 
 112         DoubleEqual 
= ARM64Assembler::ConditionEQ
, 
 113         DoubleNotEqual 
= ARM64Assembler::ConditionVC
, // Not the right flag! check for this & handle differently. 
 114         DoubleGreaterThan 
= ARM64Assembler::ConditionGT
, 
 115         DoubleGreaterThanOrEqual 
= ARM64Assembler::ConditionGE
, 
 116         DoubleLessThan 
= ARM64Assembler::ConditionLO
, 
 117         DoubleLessThanOrEqual 
= ARM64Assembler::ConditionLS
, 
 118         // If either operand is NaN, these conditions always evaluate to true. 
 119         DoubleEqualOrUnordered 
= ARM64Assembler::ConditionVS
, // Not the right flag! check for this & handle differently. 
 120         DoubleNotEqualOrUnordered 
= ARM64Assembler::ConditionNE
, 
 121         DoubleGreaterThanOrUnordered 
= ARM64Assembler::ConditionHI
, 
 122         DoubleGreaterThanOrEqualOrUnordered 
= ARM64Assembler::ConditionHS
, 
 123         DoubleLessThanOrUnordered 
= ARM64Assembler::ConditionLT
, 
 124         DoubleLessThanOrEqualOrUnordered 
= ARM64Assembler::ConditionLE
, 
 127     static const RegisterID stackPointerRegister 
= ARM64Registers::sp
; 
 128     static const RegisterID linkRegister 
= ARM64Registers::lr
; 
 131     // Integer operations: 
 133     void add32(RegisterID src
, RegisterID dest
) 
 135         m_assembler
.add
<32>(dest
, dest
, src
); 
 138     void add32(TrustedImm32 imm
, RegisterID dest
) 
 140         add32(imm
, dest
, dest
); 
 143     void add32(TrustedImm32 imm
, RegisterID src
, RegisterID dest
) 
 145         if (isUInt12(imm
.m_value
)) 
 146             m_assembler
.add
<32>(dest
, src
, UInt12(imm
.m_value
)); 
 147         else if (isUInt12(-imm
.m_value
)) 
 148             m_assembler
.sub
<32>(dest
, src
, UInt12(-imm
.m_value
)); 
 150             move(imm
, getCachedDataTempRegisterIDAndInvalidate()); 
 151             m_assembler
.add
<32>(dest
, src
, dataTempRegister
); 
 155     void add32(TrustedImm32 imm
, Address address
) 
 157         load32(address
, getCachedDataTempRegisterIDAndInvalidate()); 
 159         if (isUInt12(imm
.m_value
)) 
 160             m_assembler
.add
<32>(dataTempRegister
, dataTempRegister
, UInt12(imm
.m_value
)); 
 161         else if (isUInt12(-imm
.m_value
)) 
 162             m_assembler
.sub
<32>(dataTempRegister
, dataTempRegister
, UInt12(-imm
.m_value
)); 
 164             move(imm
, getCachedMemoryTempRegisterIDAndInvalidate()); 
 165             m_assembler
.add
<32>(dataTempRegister
, dataTempRegister
, memoryTempRegister
); 
 168         store32(dataTempRegister
, address
); 
 171     void add32(TrustedImm32 imm
, AbsoluteAddress address
) 
 173         load32(address
.m_ptr
, getCachedDataTempRegisterIDAndInvalidate()); 
 175         if (isUInt12(imm
.m_value
)) { 
 176             m_assembler
.add
<32>(dataTempRegister
, dataTempRegister
, UInt12(imm
.m_value
)); 
 177             store32(dataTempRegister
, address
.m_ptr
); 
 181         if (isUInt12(-imm
.m_value
)) { 
 182             m_assembler
.sub
<32>(dataTempRegister
, dataTempRegister
, UInt12(-imm
.m_value
)); 
 183             store32(dataTempRegister
, address
.m_ptr
); 
 187         move(imm
, getCachedMemoryTempRegisterIDAndInvalidate()); 
 188         m_assembler
.add
<32>(dataTempRegister
, dataTempRegister
, memoryTempRegister
); 
 189         store32(dataTempRegister
, address
.m_ptr
); 
 192     void add32(Address src
, RegisterID dest
) 
 194         load32(src
, getCachedDataTempRegisterIDAndInvalidate()); 
 195         add32(dataTempRegister
, dest
); 
 198     void add64(RegisterID src
, RegisterID dest
) 
 200         m_assembler
.add
<64>(dest
, dest
, src
); 
 203     void add64(TrustedImm32 imm
, RegisterID dest
) 
 205         if (isUInt12(imm
.m_value
)) { 
 206             m_assembler
.add
<64>(dest
, dest
, UInt12(imm
.m_value
)); 
 209         if (isUInt12(-imm
.m_value
)) { 
 210             m_assembler
.sub
<64>(dest
, dest
, UInt12(-imm
.m_value
)); 
 214         signExtend32ToPtr(imm
, getCachedDataTempRegisterIDAndInvalidate()); 
 215         m_assembler
.add
<64>(dest
, dest
, dataTempRegister
); 
 218     void add64(TrustedImm64 imm
, RegisterID dest
) 
 220         intptr_t immediate 
= imm
.m_value
; 
 222         if (isUInt12(immediate
)) { 
 223             m_assembler
.add
<64>(dest
, dest
, UInt12(static_cast<int32_t>(immediate
))); 
 226         if (isUInt12(-immediate
)) { 
 227             m_assembler
.sub
<64>(dest
, dest
, UInt12(static_cast<int32_t>(-immediate
))); 
 231         move(imm
, getCachedDataTempRegisterIDAndInvalidate()); 
 232         m_assembler
.add
<64>(dest
, dest
, dataTempRegister
); 
 235     void add64(TrustedImm32 imm
, RegisterID src
, RegisterID dest
) 
 237         if (isUInt12(imm
.m_value
)) { 
 238             m_assembler
.add
<64>(dest
, src
, UInt12(imm
.m_value
)); 
 241         if (isUInt12(-imm
.m_value
)) { 
 242             m_assembler
.sub
<64>(dest
, src
, UInt12(-imm
.m_value
)); 
 246         signExtend32ToPtr(imm
, getCachedDataTempRegisterIDAndInvalidate()); 
 247         m_assembler
.add
<64>(dest
, src
, dataTempRegister
); 
 250     void add64(TrustedImm32 imm
, Address address
) 
 252         load64(address
, getCachedDataTempRegisterIDAndInvalidate()); 
 254         if (isUInt12(imm
.m_value
)) 
 255             m_assembler
.add
<64>(dataTempRegister
, dataTempRegister
, UInt12(imm
.m_value
)); 
 256         else if (isUInt12(-imm
.m_value
)) 
 257             m_assembler
.sub
<64>(dataTempRegister
, dataTempRegister
, UInt12(-imm
.m_value
)); 
 259             signExtend32ToPtr(imm
, getCachedMemoryTempRegisterIDAndInvalidate()); 
 260             m_assembler
.add
<64>(dataTempRegister
, dataTempRegister
, memoryTempRegister
); 
 263         store64(dataTempRegister
, address
); 
 266     void add64(TrustedImm32 imm
, AbsoluteAddress address
) 
 268         load64(address
.m_ptr
, getCachedDataTempRegisterIDAndInvalidate()); 
 270         if (isUInt12(imm
.m_value
)) { 
 271             m_assembler
.add
<64>(dataTempRegister
, dataTempRegister
, UInt12(imm
.m_value
)); 
 272             store64(dataTempRegister
, address
.m_ptr
); 
 276         if (isUInt12(-imm
.m_value
)) { 
 277             m_assembler
.sub
<64>(dataTempRegister
, dataTempRegister
, UInt12(-imm
.m_value
)); 
 278             store64(dataTempRegister
, address
.m_ptr
); 
 282         signExtend32ToPtr(imm
, getCachedMemoryTempRegisterIDAndInvalidate()); 
 283         m_assembler
.add
<64>(dataTempRegister
, dataTempRegister
, memoryTempRegister
); 
 284         store64(dataTempRegister
, address
.m_ptr
); 
 287     void add64(Address src
, RegisterID dest
) 
 289         load64(src
, getCachedDataTempRegisterIDAndInvalidate()); 
 290         m_assembler
.add
<64>(dest
, dest
, dataTempRegister
); 
 293     void add64(AbsoluteAddress src
, RegisterID dest
) 
 295         load64(src
.m_ptr
, getCachedDataTempRegisterIDAndInvalidate()); 
 296         m_assembler
.add
<64>(dest
, dest
, dataTempRegister
); 
 299     void and32(RegisterID src
, RegisterID dest
) 
 301         and32(dest
, src
, dest
); 
 304     void and32(RegisterID op1
, RegisterID op2
, RegisterID dest
) 
 306         m_assembler
.and_
<32>(dest
, op1
, op2
); 
 309     void and32(TrustedImm32 imm
, RegisterID dest
) 
 311         and32(imm
, dest
, dest
); 
 314     void and32(TrustedImm32 imm
, RegisterID src
, RegisterID dest
) 
 316         LogicalImmediate logicalImm 
= LogicalImmediate::create32(imm
.m_value
); 
 318         if (logicalImm
.isValid()) { 
 319             m_assembler
.and_
<32>(dest
, src
, logicalImm
); 
 323         move(imm
, getCachedDataTempRegisterIDAndInvalidate()); 
 324         m_assembler
.and_
<32>(dest
, src
, dataTempRegister
); 
 327     void and32(Address src
, RegisterID dest
) 
 329         load32(src
, dataTempRegister
); 
 330         and32(dataTempRegister
, dest
); 
 333     void and64(RegisterID src
, RegisterID dest
) 
 335         m_assembler
.and_
<64>(dest
, dest
, src
); 
 338     void and64(TrustedImm32 imm
, RegisterID dest
) 
 340         LogicalImmediate logicalImm 
= LogicalImmediate::create64(static_cast<intptr_t>(static_cast<int64_t>(imm
.m_value
))); 
 342         if (logicalImm
.isValid()) { 
 343             m_assembler
.and_
<64>(dest
, dest
, logicalImm
); 
 347         signExtend32ToPtr(imm
, getCachedDataTempRegisterIDAndInvalidate()); 
 348         m_assembler
.and_
<64>(dest
, dest
, dataTempRegister
); 
 351     void countLeadingZeros32(RegisterID src
, RegisterID dest
) 
 353         m_assembler
.clz
<32>(dest
, src
); 
 356     void lshift32(RegisterID src
, RegisterID shiftAmount
, RegisterID dest
) 
 358         m_assembler
.lsl
<32>(dest
, src
, shiftAmount
); 
 361     void lshift32(RegisterID src
, TrustedImm32 imm
, RegisterID dest
) 
 363         m_assembler
.lsl
<32>(dest
, src
, imm
.m_value 
& 0x1f); 
 366     void lshift32(RegisterID shiftAmount
, RegisterID dest
) 
 368         lshift32(dest
, shiftAmount
, dest
); 
 371     void lshift32(TrustedImm32 imm
, RegisterID dest
) 
 373         lshift32(dest
, imm
, dest
); 
 376     void mul32(RegisterID src
, RegisterID dest
) 
 378         m_assembler
.mul
<32>(dest
, dest
, src
); 
 381     void mul32(TrustedImm32 imm
, RegisterID src
, RegisterID dest
) 
 383         move(imm
, getCachedDataTempRegisterIDAndInvalidate()); 
 384         m_assembler
.mul
<32>(dest
, src
, dataTempRegister
); 
 387     void neg32(RegisterID dest
) 
 389         m_assembler
.neg
<32>(dest
, dest
); 
 392     void neg64(RegisterID dest
) 
 394         m_assembler
.neg
<64>(dest
, dest
); 
 397     void or32(RegisterID src
, RegisterID dest
) 
 399         or32(dest
, src
, dest
); 
 402     void or32(RegisterID op1
, RegisterID op2
, RegisterID dest
) 
 404         m_assembler
.orr
<32>(dest
, op1
, op2
); 
 407     void or32(TrustedImm32 imm
, RegisterID dest
) 
 409         or32(imm
, dest
, dest
); 
 412     void or32(TrustedImm32 imm
, RegisterID src
, RegisterID dest
) 
 414         LogicalImmediate logicalImm 
= LogicalImmediate::create32(imm
.m_value
); 
 416         if (logicalImm
.isValid()) { 
 417             m_assembler
.orr
<32>(dest
, src
, logicalImm
); 
 421         move(imm
, getCachedDataTempRegisterIDAndInvalidate()); 
 422         m_assembler
.orr
<32>(dest
, src
, dataTempRegister
); 
 425     void or32(RegisterID src
, AbsoluteAddress address
) 
 427         load32(address
.m_ptr
, getCachedDataTempRegisterIDAndInvalidate()); 
 428         m_assembler
.orr
<32>(dataTempRegister
, dataTempRegister
, src
); 
 429         store32(dataTempRegister
, address
.m_ptr
); 
 432     void or64(RegisterID src
, RegisterID dest
) 
 434         or64(dest
, src
, dest
); 
 437     void or64(RegisterID op1
, RegisterID op2
, RegisterID dest
) 
 439         m_assembler
.orr
<64>(dest
, op1
, op2
); 
 442     void or64(TrustedImm32 imm
, RegisterID dest
) 
 444         or64(imm
, dest
, dest
); 
 447     void or64(TrustedImm32 imm
, RegisterID src
, RegisterID dest
) 
 449         LogicalImmediate logicalImm 
= LogicalImmediate::create64(static_cast<intptr_t>(static_cast<int64_t>(imm
.m_value
))); 
 451         if (logicalImm
.isValid()) { 
 452             m_assembler
.orr
<64>(dest
, dest
, logicalImm
); 
 456         signExtend32ToPtr(imm
, getCachedDataTempRegisterIDAndInvalidate()); 
 457         m_assembler
.orr
<64>(dest
, src
, dataTempRegister
); 
 460     void or64(TrustedImm64 imm
, RegisterID dest
) 
 462         LogicalImmediate logicalImm 
= LogicalImmediate::create64(static_cast<intptr_t>(static_cast<int64_t>(imm
.m_value
))); 
 464         if (logicalImm
.isValid()) { 
 465             m_assembler
.orr
<64>(dest
, dest
, logicalImm
); 
 469         move(imm
, getCachedDataTempRegisterIDAndInvalidate()); 
 470         m_assembler
.orr
<64>(dest
, dest
, dataTempRegister
); 
 473     void rotateRight64(TrustedImm32 imm
, RegisterID srcDst
) 
 475         m_assembler
.ror
<64>(srcDst
, srcDst
, imm
.m_value 
& 63); 
 478     void rshift32(RegisterID src
, RegisterID shiftAmount
, RegisterID dest
) 
 480         m_assembler
.asr
<32>(dest
, src
, shiftAmount
); 
 483     void rshift32(RegisterID src
, TrustedImm32 imm
, RegisterID dest
) 
 485         m_assembler
.asr
<32>(dest
, src
, imm
.m_value 
& 0x1f); 
 488     void rshift32(RegisterID shiftAmount
, RegisterID dest
) 
 490         rshift32(dest
, shiftAmount
, dest
); 
 493     void rshift32(TrustedImm32 imm
, RegisterID dest
) 
 495         rshift32(dest
, imm
, dest
); 
 498     void sub32(RegisterID src
, RegisterID dest
) 
 500         m_assembler
.sub
<32>(dest
, dest
, src
); 
 503     void sub32(TrustedImm32 imm
, RegisterID dest
) 
 505         if (isUInt12(imm
.m_value
)) { 
 506             m_assembler
.sub
<32>(dest
, dest
, UInt12(imm
.m_value
)); 
 509         if (isUInt12(-imm
.m_value
)) { 
 510             m_assembler
.add
<32>(dest
, dest
, UInt12(-imm
.m_value
)); 
 514         move(imm
, getCachedDataTempRegisterIDAndInvalidate()); 
 515         m_assembler
.sub
<32>(dest
, dest
, dataTempRegister
); 
 518     void sub32(TrustedImm32 imm
, Address address
) 
 520         load32(address
, getCachedDataTempRegisterIDAndInvalidate()); 
 522         if (isUInt12(imm
.m_value
)) 
 523             m_assembler
.sub
<32>(dataTempRegister
, dataTempRegister
, UInt12(imm
.m_value
)); 
 524         else if (isUInt12(-imm
.m_value
)) 
 525             m_assembler
.add
<32>(dataTempRegister
, dataTempRegister
, UInt12(-imm
.m_value
)); 
 527             move(imm
, getCachedMemoryTempRegisterIDAndInvalidate()); 
 528             m_assembler
.sub
<32>(dataTempRegister
, dataTempRegister
, memoryTempRegister
); 
 531         store32(dataTempRegister
, address
); 
 534     void sub32(TrustedImm32 imm
, AbsoluteAddress address
) 
 536         load32(address
.m_ptr
, getCachedDataTempRegisterIDAndInvalidate()); 
 538         if (isUInt12(imm
.m_value
)) { 
 539             m_assembler
.sub
<32>(dataTempRegister
, dataTempRegister
, UInt12(imm
.m_value
)); 
 540             store32(dataTempRegister
, address
.m_ptr
); 
 544         if (isUInt12(-imm
.m_value
)) { 
 545             m_assembler
.add
<32>(dataTempRegister
, dataTempRegister
, UInt12(-imm
.m_value
)); 
 546             store32(dataTempRegister
, address
.m_ptr
); 
 550         move(imm
, getCachedMemoryTempRegisterIDAndInvalidate()); 
 551         m_assembler
.sub
<32>(dataTempRegister
, dataTempRegister
, memoryTempRegister
); 
 552         store32(dataTempRegister
, address
.m_ptr
); 
 555     void sub32(Address src
, RegisterID dest
) 
 557         load32(src
, getCachedDataTempRegisterIDAndInvalidate()); 
 558         sub32(dataTempRegister
, dest
); 
 561     void sub64(RegisterID src
, RegisterID dest
) 
 563         m_assembler
.sub
<64>(dest
, dest
, src
); 
 566     void sub64(TrustedImm32 imm
, RegisterID dest
) 
 568         if (isUInt12(imm
.m_value
)) { 
 569             m_assembler
.sub
<64>(dest
, dest
, UInt12(imm
.m_value
)); 
 572         if (isUInt12(-imm
.m_value
)) { 
 573             m_assembler
.add
<64>(dest
, dest
, UInt12(-imm
.m_value
)); 
 577         signExtend32ToPtr(imm
, getCachedDataTempRegisterIDAndInvalidate()); 
 578         m_assembler
.sub
<64>(dest
, dest
, dataTempRegister
); 
 581     void sub64(TrustedImm64 imm
, RegisterID dest
) 
 583         intptr_t immediate 
= imm
.m_value
; 
 585         if (isUInt12(immediate
)) { 
 586             m_assembler
.sub
<64>(dest
, dest
, UInt12(static_cast<int32_t>(immediate
))); 
 589         if (isUInt12(-immediate
)) { 
 590             m_assembler
.add
<64>(dest
, dest
, UInt12(static_cast<int32_t>(-immediate
))); 
 594         move(imm
, getCachedDataTempRegisterIDAndInvalidate()); 
 595         m_assembler
.sub
<64>(dest
, dest
, dataTempRegister
); 
 598     void urshift32(RegisterID src
, RegisterID shiftAmount
, RegisterID dest
) 
 600         m_assembler
.lsr
<32>(dest
, src
, shiftAmount
); 
 603     void urshift32(RegisterID src
, TrustedImm32 imm
, RegisterID dest
) 
 605         m_assembler
.lsr
<32>(dest
, src
, imm
.m_value 
& 0x1f); 
 608     void urshift32(RegisterID shiftAmount
, RegisterID dest
) 
 610         urshift32(dest
, shiftAmount
, dest
); 
 613     void urshift32(TrustedImm32 imm
, RegisterID dest
) 
 615         urshift32(dest
, imm
, dest
); 
 618     void xor32(RegisterID src
, RegisterID dest
) 
 620         xor32(dest
, src
, dest
); 
 623     void xor32(RegisterID op1
, RegisterID op2
, RegisterID dest
) 
 625         m_assembler
.eor
<32>(dest
, op1
, op2
); 
 628     void xor32(TrustedImm32 imm
, RegisterID dest
) 
 630         xor32(imm
, dest
, dest
); 
 633     void xor32(TrustedImm32 imm
, RegisterID src
, RegisterID dest
) 
 635         if (imm
.m_value 
== -1) 
 636             m_assembler
.mvn
<32>(dest
, src
); 
 638             LogicalImmediate logicalImm 
= LogicalImmediate::create32(imm
.m_value
); 
 640             if (logicalImm
.isValid()) { 
 641                 m_assembler
.eor
<32>(dest
, dest
, logicalImm
); 
 645             move(imm
, getCachedDataTempRegisterIDAndInvalidate()); 
 646             m_assembler
.eor
<32>(dest
, src
, dataTempRegister
); 
 650     void xor64(RegisterID src
, Address address
) 
 652         load64(address
, getCachedDataTempRegisterIDAndInvalidate()); 
 653         m_assembler
.eor
<64>(dataTempRegister
, dataTempRegister
, src
); 
 654         store64(dataTempRegister
, address
); 
 657     void xor64(RegisterID src
, RegisterID dest
) 
 659         xor64(dest
, src
, dest
); 
 662     void xor64(RegisterID op1
, RegisterID op2
, RegisterID dest
) 
 664         m_assembler
.eor
<64>(dest
, op1
, op2
); 
 667     void xor64(TrustedImm32 imm
, RegisterID dest
) 
 669         xor64(imm
, dest
, dest
); 
 672     void xor64(TrustedImm32 imm
, RegisterID src
, RegisterID dest
) 
 674         if (imm
.m_value 
== -1) 
 675             m_assembler
.mvn
<64>(dest
, src
); 
 677             LogicalImmediate logicalImm 
= LogicalImmediate::create64(static_cast<intptr_t>(static_cast<int64_t>(imm
.m_value
))); 
 679             if (logicalImm
.isValid()) { 
 680                 m_assembler
.eor
<64>(dest
, dest
, logicalImm
); 
 684             signExtend32ToPtr(imm
, getCachedDataTempRegisterIDAndInvalidate()); 
 685             m_assembler
.eor
<64>(dest
, src
, dataTempRegister
); 
 690     // Memory access operations: 
 692     void load64(ImplicitAddress address
, RegisterID dest
) 
 694         if (tryLoadWithOffset
<64>(dest
, address
.base
, address
.offset
)) 
 697         signExtend32ToPtr(TrustedImm32(address
.offset
), getCachedMemoryTempRegisterIDAndInvalidate()); 
 698         m_assembler
.ldr
<64>(dest
, address
.base
, memoryTempRegister
); 
 701     void load64(BaseIndex address
, RegisterID dest
) 
 703         if (!address
.offset 
&& (!address
.scale 
|| address
.scale 
== 3)) { 
 704             m_assembler
.ldr
<64>(dest
, address
.base
, address
.index
, ARM64Assembler::UXTX
, address
.scale
); 
 708         signExtend32ToPtr(TrustedImm32(address
.offset
), getCachedMemoryTempRegisterIDAndInvalidate()); 
 709         m_assembler
.add
<64>(memoryTempRegister
, memoryTempRegister
, address
.index
, ARM64Assembler::UXTX
, address
.scale
); 
 710         m_assembler
.ldr
<64>(dest
, address
.base
, memoryTempRegister
); 
 713     void load64(const void* address
, RegisterID dest
) 
 715         load
<64>(address
, dest
); 
 718     DataLabel32 
load64WithAddressOffsetPatch(Address address
, RegisterID dest
) 
 720         DataLabel32 
label(this); 
 721         signExtend32ToPtrWithFixedWidth(address
.offset
, getCachedMemoryTempRegisterIDAndInvalidate()); 
 722         m_assembler
.ldr
<64>(dest
, address
.base
, memoryTempRegister
, ARM64Assembler::SXTW
, 0); 
 726     DataLabelCompact 
load64WithCompactAddressOffsetPatch(Address address
, RegisterID dest
) 
 728         ASSERT(isCompactPtrAlignedAddressOffset(address
.offset
)); 
 729         DataLabelCompact 
label(this); 
 730         m_assembler
.ldr
<64>(dest
, address
.base
, address
.offset
); 
 734     ConvertibleLoadLabel 
convertibleLoadPtr(Address address
, RegisterID dest
) 
 736         ConvertibleLoadLabel 
result(this); 
 737         ASSERT(!(address
.offset 
& ~0xff8)); 
 738         m_assembler
.ldr
<64>(dest
, address
.base
, address
.offset
); 
 742     void load32(ImplicitAddress address
, RegisterID dest
) 
 744         if (tryLoadWithOffset
<32>(dest
, address
.base
, address
.offset
)) 
 747         signExtend32ToPtr(TrustedImm32(address
.offset
), getCachedMemoryTempRegisterIDAndInvalidate()); 
 748         m_assembler
.ldr
<32>(dest
, address
.base
, memoryTempRegister
); 
 751     void load32(BaseIndex address
, RegisterID dest
) 
 753         if (!address
.offset 
&& (!address
.scale 
|| address
.scale 
== 2)) { 
 754             m_assembler
.ldr
<32>(dest
, address
.base
, address
.index
, ARM64Assembler::UXTX
, address
.scale
); 
 758         signExtend32ToPtr(TrustedImm32(address
.offset
), getCachedMemoryTempRegisterIDAndInvalidate()); 
 759         m_assembler
.add
<64>(memoryTempRegister
, memoryTempRegister
, address
.index
, ARM64Assembler::UXTX
, address
.scale
); 
 760         m_assembler
.ldr
<32>(dest
, address
.base
, memoryTempRegister
); 
 763     void load32(const void* address
, RegisterID dest
) 
 765         load
<32>(address
, dest
); 
 768     DataLabel32 
load32WithAddressOffsetPatch(Address address
, RegisterID dest
) 
 770         DataLabel32 
label(this); 
 771         signExtend32ToPtrWithFixedWidth(address
.offset
, getCachedMemoryTempRegisterIDAndInvalidate()); 
 772         m_assembler
.ldr
<32>(dest
, address
.base
, memoryTempRegister
, ARM64Assembler::SXTW
, 0); 
 776     DataLabelCompact 
load32WithCompactAddressOffsetPatch(Address address
, RegisterID dest
) 
 778         ASSERT(isCompactPtrAlignedAddressOffset(address
.offset
)); 
 779         DataLabelCompact 
label(this); 
 780         m_assembler
.ldr
<32>(dest
, address
.base
, address
.offset
); 
 784     void load32WithUnalignedHalfWords(BaseIndex address
, RegisterID dest
) 
 786         load32(address
, dest
); 
 789     void load16(ImplicitAddress address
, RegisterID dest
) 
 791         if (tryLoadWithOffset
<16>(dest
, address
.base
, address
.offset
)) 
 794         signExtend32ToPtr(TrustedImm32(address
.offset
), getCachedMemoryTempRegisterIDAndInvalidate()); 
 795         m_assembler
.ldrh(dest
, address
.base
, memoryTempRegister
); 
 798     void load16(BaseIndex address
, RegisterID dest
) 
 800         if (!address
.offset 
&& (!address
.scale 
|| address
.scale 
== 1)) { 
 801             m_assembler
.ldrh(dest
, address
.base
, address
.index
, ARM64Assembler::UXTX
, address
.scale
); 
 805         signExtend32ToPtr(TrustedImm32(address
.offset
), getCachedMemoryTempRegisterIDAndInvalidate()); 
 806         m_assembler
.add
<64>(memoryTempRegister
, memoryTempRegister
, address
.index
, ARM64Assembler::UXTX
, address
.scale
); 
 807         m_assembler
.ldrh(dest
, address
.base
, memoryTempRegister
); 
 810     void load16Unaligned(BaseIndex address
, RegisterID dest
) 
 812         load16(address
, dest
); 
 815     void load16Signed(BaseIndex address
, RegisterID dest
) 
 817         if (!address
.offset 
&& (!address
.scale 
|| address
.scale 
== 1)) { 
 818             m_assembler
.ldrsh
<64>(dest
, address
.base
, address
.index
, ARM64Assembler::UXTX
, address
.scale
); 
 822         signExtend32ToPtr(TrustedImm32(address
.offset
), getCachedMemoryTempRegisterIDAndInvalidate()); 
 823         m_assembler
.add
<64>(memoryTempRegister
, memoryTempRegister
, address
.index
, ARM64Assembler::UXTX
, address
.scale
); 
 824         m_assembler
.ldrsh
<64>(dest
, address
.base
, memoryTempRegister
); 
 827     void load8(ImplicitAddress address
, RegisterID dest
) 
 829         if (tryLoadWithOffset
<8>(dest
, address
.base
, address
.offset
)) 
 832         signExtend32ToPtr(TrustedImm32(address
.offset
), getCachedMemoryTempRegisterIDAndInvalidate()); 
 833         m_assembler
.ldrb(dest
, address
.base
, memoryTempRegister
); 
 836     void load8(BaseIndex address
, RegisterID dest
) 
 838         if (!address
.offset 
&& !address
.scale
) { 
 839             m_assembler
.ldrb(dest
, address
.base
, address
.index
, ARM64Assembler::UXTX
, address
.scale
); 
 843         signExtend32ToPtr(TrustedImm32(address
.offset
), getCachedMemoryTempRegisterIDAndInvalidate()); 
 844         m_assembler
.add
<64>(memoryTempRegister
, memoryTempRegister
, address
.index
, ARM64Assembler::UXTX
, address
.scale
); 
 845         m_assembler
.ldrb(dest
, address
.base
, memoryTempRegister
); 
 848     void load8(const void* address
, RegisterID dest
) 
 850         moveToCachedReg(TrustedImmPtr(address
), m_cachedMemoryTempRegister
); 
 851         m_assembler
.ldrb(dest
, memoryTempRegister
, ARM64Registers::zr
); 
 854     void load8Signed(BaseIndex address
, RegisterID dest
) 
 856         if (!address
.offset 
&& !address
.scale
) { 
 857             m_assembler
.ldrsb
<64>(dest
, address
.base
, address
.index
, ARM64Assembler::UXTX
, address
.scale
); 
 861         signExtend32ToPtr(TrustedImm32(address
.offset
), getCachedMemoryTempRegisterIDAndInvalidate()); 
 862         m_assembler
.add
<64>(memoryTempRegister
, memoryTempRegister
, address
.index
, ARM64Assembler::UXTX
, address
.scale
); 
 863         m_assembler
.ldrsb
<64>(dest
, address
.base
, memoryTempRegister
); 
 866     void store64(RegisterID src
, ImplicitAddress address
) 
 868         if (tryStoreWithOffset
<64>(src
, address
.base
, address
.offset
)) 
 871         signExtend32ToPtr(TrustedImm32(address
.offset
), getCachedMemoryTempRegisterIDAndInvalidate()); 
 872         m_assembler
.str
<64>(src
, address
.base
, memoryTempRegister
); 
 875     void store64(RegisterID src
, BaseIndex address
) 
 877         if (!address
.offset 
&& (!address
.scale 
|| address
.scale 
== 3)) { 
 878             m_assembler
.str
<64>(src
, address
.base
, address
.index
, ARM64Assembler::UXTX
, address
.scale
); 
 882         signExtend32ToPtr(TrustedImm32(address
.offset
), getCachedMemoryTempRegisterIDAndInvalidate()); 
 883         m_assembler
.add
<64>(memoryTempRegister
, memoryTempRegister
, address
.index
, ARM64Assembler::UXTX
, address
.scale
); 
 884         m_assembler
.str
<64>(src
, address
.base
, memoryTempRegister
); 
 887     void store64(RegisterID src
, const void* address
) 
 889         store
<64>(src
, address
); 
 892     void store64(TrustedImm64 imm
, ImplicitAddress address
) 
 895             store64(ARM64Registers::zr
, address
); 
 899         moveToCachedReg(imm
, m_dataMemoryTempRegister
); 
 900         store64(dataTempRegister
, address
); 
 903     void store64(TrustedImm64 imm
, BaseIndex address
) 
 906             store64(ARM64Registers::zr
, address
); 
 910         moveToCachedReg(imm
, m_dataMemoryTempRegister
); 
 911         store64(dataTempRegister
, address
); 
 914     DataLabel32 
store64WithAddressOffsetPatch(RegisterID src
, Address address
) 
 916         DataLabel32 
label(this); 
 917         signExtend32ToPtrWithFixedWidth(address
.offset
, getCachedMemoryTempRegisterIDAndInvalidate()); 
 918         m_assembler
.str
<64>(src
, address
.base
, memoryTempRegister
, ARM64Assembler::SXTW
, 0); 
 922     void store32(RegisterID src
, ImplicitAddress address
) 
 924         if (tryStoreWithOffset
<32>(src
, address
.base
, address
.offset
)) 
 927         signExtend32ToPtr(TrustedImm32(address
.offset
), getCachedMemoryTempRegisterIDAndInvalidate()); 
 928         m_assembler
.str
<32>(src
, address
.base
, memoryTempRegister
); 
 931     void store32(RegisterID src
, BaseIndex address
) 
 933         if (!address
.offset 
&& (!address
.scale 
|| address
.scale 
== 2)) { 
 934             m_assembler
.str
<32>(src
, address
.base
, address
.index
, ARM64Assembler::UXTX
, address
.scale
); 
 938         signExtend32ToPtr(TrustedImm32(address
.offset
), getCachedMemoryTempRegisterIDAndInvalidate()); 
 939         m_assembler
.add
<64>(memoryTempRegister
, memoryTempRegister
, address
.index
, ARM64Assembler::UXTX
, address
.scale
); 
 940         m_assembler
.str
<32>(src
, address
.base
, memoryTempRegister
); 
 943     void store32(RegisterID src
, const void* address
) 
 945         store
<32>(src
, address
); 
 948     void store32(TrustedImm32 imm
, ImplicitAddress address
) 
 951             store32(ARM64Registers::zr
, address
); 
 955         moveToCachedReg(imm
, m_dataMemoryTempRegister
); 
 956         store32(dataTempRegister
, address
); 
 959     void store32(TrustedImm32 imm
, BaseIndex address
) 
 962             store32(ARM64Registers::zr
, address
); 
 966         moveToCachedReg(imm
, m_dataMemoryTempRegister
); 
 967         store32(dataTempRegister
, address
); 
 970     void store32(TrustedImm32 imm
, const void* address
) 
 973             store32(ARM64Registers::zr
, address
); 
 977         moveToCachedReg(imm
, m_dataMemoryTempRegister
); 
 978         store32(dataTempRegister
, address
); 
 981     DataLabel32 
store32WithAddressOffsetPatch(RegisterID src
, Address address
) 
 983         DataLabel32 
label(this); 
 984         signExtend32ToPtrWithFixedWidth(address
.offset
, getCachedMemoryTempRegisterIDAndInvalidate()); 
 985         m_assembler
.str
<32>(src
, address
.base
, memoryTempRegister
, ARM64Assembler::SXTW
, 0); 
 989     void store16(RegisterID src
, BaseIndex address
) 
 991         if (!address
.offset 
&& (!address
.scale 
|| address
.scale 
== 1)) { 
 992             m_assembler
.strh(src
, address
.base
, address
.index
, ARM64Assembler::UXTX
, address
.scale
); 
 996         signExtend32ToPtr(TrustedImm32(address
.offset
), getCachedMemoryTempRegisterIDAndInvalidate()); 
 997         m_assembler
.add
<64>(memoryTempRegister
, memoryTempRegister
, address
.index
, ARM64Assembler::UXTX
, address
.scale
); 
 998         m_assembler
.strh(src
, address
.base
, memoryTempRegister
); 
1001     void store8(RegisterID src
, BaseIndex address
) 
1003         if (!address
.offset 
&& !address
.scale
) { 
1004             m_assembler
.strb(src
, address
.base
, address
.index
, ARM64Assembler::UXTX
, address
.scale
); 
1008         signExtend32ToPtr(TrustedImm32(address
.offset
), getCachedMemoryTempRegisterIDAndInvalidate()); 
1009         m_assembler
.add
<64>(memoryTempRegister
, memoryTempRegister
, address
.index
, ARM64Assembler::UXTX
, address
.scale
); 
1010         m_assembler
.strb(src
, address
.base
, memoryTempRegister
); 
1013     void store8(RegisterID src
, void* address
) 
1015         move(ImmPtr(address
), getCachedMemoryTempRegisterIDAndInvalidate()); 
1016         m_assembler
.strb(src
, memoryTempRegister
, 0); 
1019     void store8(TrustedImm32 imm
, void* address
) 
1022             store8(ARM64Registers::zr
, address
); 
1026         move(imm
, getCachedDataTempRegisterIDAndInvalidate()); 
1027         store8(dataTempRegister
, address
); 
1031     // Floating-point operations: 
1033     static bool supportsFloatingPoint() { return true; } 
1034     static bool supportsFloatingPointTruncate() { return true; } 
1035     static bool supportsFloatingPointSqrt() { return true; } 
1036     static bool supportsFloatingPointAbs() { return true; } 
1038     enum BranchTruncateType 
{ BranchIfTruncateFailed
, BranchIfTruncateSuccessful 
}; 
1040     void absDouble(FPRegisterID src
, FPRegisterID dest
) 
1042         m_assembler
.fabs
<64>(dest
, src
); 
1045     void addDouble(FPRegisterID src
, FPRegisterID dest
) 
1047         addDouble(dest
, src
, dest
); 
1050     void addDouble(FPRegisterID op1
, FPRegisterID op2
, FPRegisterID dest
) 
1052         m_assembler
.fadd
<64>(dest
, op1
, op2
); 
1055     void addDouble(Address src
, FPRegisterID dest
) 
1057         loadDouble(src
, fpTempRegister
); 
1058         addDouble(fpTempRegister
, dest
); 
1061     void addDouble(AbsoluteAddress address
, FPRegisterID dest
) 
1063         loadDouble(address
.m_ptr
, fpTempRegister
); 
1064         addDouble(fpTempRegister
, dest
); 
1067     void ceilDouble(FPRegisterID src
, FPRegisterID dest
) 
1069         m_assembler
.frintp
<64>(dest
, src
); 
1072     void floorDouble(FPRegisterID src
, FPRegisterID dest
) 
1074         m_assembler
.frintm
<64>(dest
, src
); 
1077     // Convert 'src' to an integer, and places the resulting 'dest'. 
1078     // If the result is not representable as a 32 bit value, branch. 
1079     // May also branch for some values that are representable in 32 bits 
1080     // (specifically, in this case, 0). 
1081     void branchConvertDoubleToInt32(FPRegisterID src
, RegisterID dest
, JumpList
& failureCases
, FPRegisterID
, bool negZeroCheck 
= true) 
1083         m_assembler
.fcvtns
<32, 64>(dest
, src
); 
1085         // Convert the integer result back to float & compare to the original value - if not equal or unordered (NaN) then jump. 
1086         m_assembler
.scvtf
<64, 32>(fpTempRegister
, dest
); 
1087         failureCases
.append(branchDouble(DoubleNotEqualOrUnordered
, src
, fpTempRegister
)); 
1089         // If the result is zero, it might have been -0.0, and the double comparison won't catch this! 
1091             failureCases
.append(branchTest32(Zero
, dest
)); 
1094     Jump 
branchDouble(DoubleCondition cond
, FPRegisterID left
, FPRegisterID right
) 
1096         m_assembler
.fcmp
<64>(left
, right
); 
1098         if (cond 
== DoubleNotEqual
) { 
1099             // ConditionNE jumps if NotEqual *or* unordered - force the unordered cases not to jump. 
1100             Jump unordered 
= makeBranch(ARM64Assembler::ConditionVS
); 
1101             Jump result 
= makeBranch(ARM64Assembler::ConditionNE
); 
1102             unordered
.link(this); 
1105         if (cond 
== DoubleEqualOrUnordered
) { 
1106             Jump unordered 
= makeBranch(ARM64Assembler::ConditionVS
); 
1107             Jump notEqual 
= makeBranch(ARM64Assembler::ConditionNE
); 
1108             unordered
.link(this); 
1109             // We get here if either unordered or equal. 
1110             Jump result 
= jump(); 
1111             notEqual
.link(this); 
1114         return makeBranch(cond
); 
1117     Jump 
branchDoubleNonZero(FPRegisterID reg
, FPRegisterID
) 
1119         m_assembler
.fcmp_0
<64>(reg
); 
1120         Jump unordered 
= makeBranch(ARM64Assembler::ConditionVS
); 
1121         Jump result 
= makeBranch(ARM64Assembler::ConditionNE
); 
1122         unordered
.link(this); 
1126     Jump 
branchDoubleZeroOrNaN(FPRegisterID reg
, FPRegisterID
) 
1128         m_assembler
.fcmp_0
<64>(reg
); 
1129         Jump unordered 
= makeBranch(ARM64Assembler::ConditionVS
); 
1130         Jump notEqual 
= makeBranch(ARM64Assembler::ConditionNE
); 
1131         unordered
.link(this); 
1132         // We get here if either unordered or equal. 
1133         Jump result 
= jump(); 
1134         notEqual
.link(this); 
1138     Jump 
branchTruncateDoubleToInt32(FPRegisterID src
, RegisterID dest
, BranchTruncateType branchType 
= BranchIfTruncateFailed
) 
1140         // Truncate to a 64-bit integer in dataTempRegister, copy the low 32-bit to dest. 
1141         m_assembler
.fcvtzs
<64, 64>(getCachedDataTempRegisterIDAndInvalidate(), src
); 
1142         zeroExtend32ToPtr(dataTempRegister
, dest
); 
1143         // Check thlow 32-bits sign extend to be equal to the full value. 
1144         m_assembler
.cmp
<64>(dataTempRegister
, dataTempRegister
, ARM64Assembler::SXTW
, 0); 
1145         return Jump(makeBranch(branchType 
== BranchIfTruncateSuccessful 
? Equal 
: NotEqual
)); 
1148     Jump 
branchTruncateDoubleToUint32(FPRegisterID src
, RegisterID dest
, BranchTruncateType branchType 
= BranchIfTruncateFailed
) 
1150         // Truncate to a 64-bit integer in dataTempRegister, copy the low 32-bit to dest. 
1151         m_assembler
.fcvtzs
<64, 64>(dest
, src
); 
1152         // Check thlow 32-bits zero extend to be equal to the full value. 
1153         m_assembler
.cmp
<64>(dest
, dest
, ARM64Assembler::UXTW
, 0); 
1154         return Jump(makeBranch(branchType 
== BranchIfTruncateSuccessful 
? Equal 
: NotEqual
)); 
1157     void convertDoubleToFloat(FPRegisterID src
, FPRegisterID dest
) 
1159         m_assembler
.fcvt
<32, 64>(dest
, src
); 
1162     void convertFloatToDouble(FPRegisterID src
, FPRegisterID dest
) 
1164         m_assembler
.fcvt
<64, 32>(dest
, src
); 
1167     void convertInt32ToDouble(TrustedImm32 imm
, FPRegisterID dest
) 
1169         move(imm
, getCachedDataTempRegisterIDAndInvalidate()); 
1170         convertInt32ToDouble(dataTempRegister
, dest
); 
1173     void convertInt32ToDouble(RegisterID src
, FPRegisterID dest
) 
1175         m_assembler
.scvtf
<64, 32>(dest
, src
); 
1178     void convertInt32ToDouble(Address address
, FPRegisterID dest
) 
1180         load32(address
, getCachedDataTempRegisterIDAndInvalidate()); 
1181         convertInt32ToDouble(dataTempRegister
, dest
); 
1184     void convertInt32ToDouble(AbsoluteAddress address
, FPRegisterID dest
) 
1186         load32(address
.m_ptr
, getCachedDataTempRegisterIDAndInvalidate()); 
1187         convertInt32ToDouble(dataTempRegister
, dest
); 
1190     void divDouble(FPRegisterID src
, FPRegisterID dest
) 
1192         divDouble(dest
, src
, dest
); 
1195     void divDouble(FPRegisterID op1
, FPRegisterID op2
, FPRegisterID dest
) 
1197         m_assembler
.fdiv
<64>(dest
, op1
, op2
); 
1200     void loadDouble(ImplicitAddress address
, FPRegisterID dest
) 
1202         if (tryLoadWithOffset
<64>(dest
, address
.base
, address
.offset
)) 
1205         signExtend32ToPtr(TrustedImm32(address
.offset
), getCachedMemoryTempRegisterIDAndInvalidate()); 
1206         m_assembler
.ldr
<64>(dest
, address
.base
, memoryTempRegister
); 
1209     void loadDouble(BaseIndex address
, FPRegisterID dest
) 
1211         if (!address
.offset 
&& (!address
.scale 
|| address
.scale 
== 3)) { 
1212             m_assembler
.ldr
<64>(dest
, address
.base
, address
.index
, ARM64Assembler::UXTX
, address
.scale
); 
1216         signExtend32ToPtr(TrustedImm32(address
.offset
), getCachedMemoryTempRegisterIDAndInvalidate()); 
1217         m_assembler
.add
<64>(memoryTempRegister
, memoryTempRegister
, address
.index
, ARM64Assembler::UXTX
, address
.scale
); 
1218         m_assembler
.ldr
<64>(dest
, address
.base
, memoryTempRegister
); 
1221     void loadDouble(const void* address
, FPRegisterID dest
) 
1223         moveToCachedReg(TrustedImmPtr(address
), m_cachedMemoryTempRegister
); 
1224         m_assembler
.ldr
<64>(dest
, memoryTempRegister
, ARM64Registers::zr
); 
1227     void loadFloat(BaseIndex address
, FPRegisterID dest
) 
1229         if (!address
.offset 
&& (!address
.scale 
|| address
.scale 
== 2)) { 
1230             m_assembler
.ldr
<32>(dest
, address
.base
, address
.index
, ARM64Assembler::UXTX
, address
.scale
); 
1234         signExtend32ToPtr(TrustedImm32(address
.offset
), getCachedMemoryTempRegisterIDAndInvalidate()); 
1235         m_assembler
.add
<64>(memoryTempRegister
, memoryTempRegister
, address
.index
, ARM64Assembler::UXTX
, address
.scale
); 
1236         m_assembler
.ldr
<32>(dest
, address
.base
, memoryTempRegister
); 
1239     void moveDouble(FPRegisterID src
, FPRegisterID dest
) 
1241         m_assembler
.fmov
<64>(dest
, src
); 
1244     void moveDoubleTo64(FPRegisterID src
, RegisterID dest
) 
1246         m_assembler
.fmov
<64>(dest
, src
); 
1249     void move64ToDouble(RegisterID src
, FPRegisterID dest
) 
1251         m_assembler
.fmov
<64>(dest
, src
); 
1254     void mulDouble(FPRegisterID src
, FPRegisterID dest
) 
1256         mulDouble(dest
, src
, dest
); 
1259     void mulDouble(FPRegisterID op1
, FPRegisterID op2
, FPRegisterID dest
) 
1261         m_assembler
.fmul
<64>(dest
, op1
, op2
); 
1264     void mulDouble(Address src
, FPRegisterID dest
) 
1266         loadDouble(src
, fpTempRegister
); 
1267         mulDouble(fpTempRegister
, dest
); 
1270     void negateDouble(FPRegisterID src
, FPRegisterID dest
) 
1272         m_assembler
.fneg
<64>(dest
, src
); 
1275     void sqrtDouble(FPRegisterID src
, FPRegisterID dest
) 
1277         m_assembler
.fsqrt
<64>(dest
, src
); 
1280     void storeDouble(FPRegisterID src
, ImplicitAddress address
) 
1282         if (tryStoreWithOffset
<64>(src
, address
.base
, address
.offset
)) 
1285         signExtend32ToPtr(TrustedImm32(address
.offset
), getCachedMemoryTempRegisterIDAndInvalidate()); 
1286         m_assembler
.str
<64>(src
, address
.base
, memoryTempRegister
); 
1289     void storeDouble(FPRegisterID src
, const void* address
) 
1291         moveToCachedReg(TrustedImmPtr(address
), m_cachedMemoryTempRegister
); 
1292         m_assembler
.str
<64>(src
, memoryTempRegister
, ARM64Registers::zr
); 
1295     void storeDouble(FPRegisterID src
, BaseIndex address
) 
1297         if (!address
.offset 
&& (!address
.scale 
|| address
.scale 
== 3)) { 
1298             m_assembler
.str
<64>(src
, address
.base
, address
.index
, ARM64Assembler::UXTX
, address
.scale
); 
1302         signExtend32ToPtr(TrustedImm32(address
.offset
), getCachedMemoryTempRegisterIDAndInvalidate()); 
1303         m_assembler
.add
<64>(memoryTempRegister
, memoryTempRegister
, address
.index
, ARM64Assembler::UXTX
, address
.scale
); 
1304         m_assembler
.str
<64>(src
, address
.base
, memoryTempRegister
); 
1307     void storeFloat(FPRegisterID src
, BaseIndex address
) 
1309         if (!address
.offset 
&& (!address
.scale 
|| address
.scale 
== 2)) { 
1310             m_assembler
.str
<32>(src
, address
.base
, address
.index
, ARM64Assembler::UXTX
, address
.scale
); 
1314         signExtend32ToPtr(TrustedImm32(address
.offset
), getCachedMemoryTempRegisterIDAndInvalidate()); 
1315         m_assembler
.add
<64>(memoryTempRegister
, memoryTempRegister
, address
.index
, ARM64Assembler::UXTX
, address
.scale
); 
1316         m_assembler
.str
<32>(src
, address
.base
, memoryTempRegister
); 
1319     void subDouble(FPRegisterID src
, FPRegisterID dest
) 
1321         subDouble(dest
, src
, dest
); 
1324     void subDouble(FPRegisterID op1
, FPRegisterID op2
, FPRegisterID dest
) 
1326         m_assembler
.fsub
<64>(dest
, op1
, op2
); 
1329     void subDouble(Address src
, FPRegisterID dest
) 
1331         loadDouble(src
, fpTempRegister
); 
1332         subDouble(fpTempRegister
, dest
); 
1335     // Result is undefined if the value is outside of the integer range. 
1336     void truncateDoubleToInt32(FPRegisterID src
, RegisterID dest
) 
1338         m_assembler
.fcvtzs
<32, 64>(dest
, src
); 
1341     void truncateDoubleToUint32(FPRegisterID src
, RegisterID dest
) 
1343         m_assembler
.fcvtzu
<32, 64>(dest
, src
); 
1347     // Stack manipulation operations: 
1349     // The ABI is assumed to provide a stack abstraction to memory, 
1350     // containing machine word sized units of data.  Push and pop 
1351     // operations add and remove a single register sized unit of data 
1352     // to or from the stack.  These operations are not supported on 
1353     // ARM64.  Peek and poke operations read or write values on the 
1354     // stack, without moving the current stack position.  Additionally, 
1355     // there are popToRestore and pushToSave operations, which are 
1356     // designed just for quick-and-dirty saving and restoring of 
1357     // temporary values.  These operations don't claim to have any 
1358     // ABI compatibility. 
1360     void pop(RegisterID
) NO_RETURN_DUE_TO_CRASH
 
1365     void push(RegisterID
) NO_RETURN_DUE_TO_CRASH
 
1370     void push(Address
) NO_RETURN_DUE_TO_CRASH
 
1375     void push(TrustedImm32
) NO_RETURN_DUE_TO_CRASH
 
1380     void popToRestore(RegisterID dest
) 
1382         m_assembler
.ldr
<64>(dest
, ARM64Registers::sp
, PostIndex(16)); 
1385     void pushToSave(RegisterID src
) 
1387         m_assembler
.str
<64>(src
, ARM64Registers::sp
, PreIndex(-16)); 
1390     void pushToSave(Address address
) 
1392         load32(address
, getCachedDataTempRegisterIDAndInvalidate()); 
1393         pushToSave(dataTempRegister
); 
1396     void pushToSave(TrustedImm32 imm
) 
1398         move(imm
, getCachedDataTempRegisterIDAndInvalidate()); 
1399         pushToSave(dataTempRegister
); 
1402     void popToRestore(FPRegisterID dest
) 
1404         loadDouble(stackPointerRegister
, dest
); 
1405         add64(TrustedImm32(16), stackPointerRegister
); 
1408     void pushToSave(FPRegisterID src
) 
1410         sub64(TrustedImm32(16), stackPointerRegister
); 
1411         storeDouble(src
, stackPointerRegister
); 
1415     // Register move operations: 
1417     void move(RegisterID src
, RegisterID dest
) 
1420             m_assembler
.mov
<64>(dest
, src
); 
1423     void move(TrustedImm32 imm
, RegisterID dest
) 
1425         moveInternal
<TrustedImm32
, int32_t>(imm
, dest
); 
1428     void move(TrustedImmPtr imm
, RegisterID dest
) 
1430         moveInternal
<TrustedImmPtr
, intptr_t>(imm
, dest
); 
1433     void move(TrustedImm64 imm
, RegisterID dest
) 
1435         moveInternal
<TrustedImm64
, int64_t>(imm
, dest
); 
1438     void swap(RegisterID reg1
, RegisterID reg2
) 
1440         move(reg1
, getCachedDataTempRegisterIDAndInvalidate()); 
1442         move(dataTempRegister
, reg2
); 
1445     void signExtend32ToPtr(RegisterID src
, RegisterID dest
) 
1447         m_assembler
.sxtw(dest
, src
); 
1450     void zeroExtend32ToPtr(RegisterID src
, RegisterID dest
) 
1452         m_assembler
.uxtw(dest
, src
); 
1456     // Forwards / external control flow operations: 
1458     // This set of jump and conditional branch operations return a Jump 
1459     // object which may linked at a later point, allow forwards jump, 
1460     // or jumps that will require external linkage (after the code has been 
1463     // For branches, signed <, >, <= and >= are denoted as l, g, le, and ge 
1464     // respecitvely, for unsigned comparisons the names b, a, be, and ae are 
1465     // used (representing the names 'below' and 'above'). 
1467     // Operands to the comparision are provided in the expected order, e.g. 
1468     // jle32(reg1, TrustedImm32(5)) will branch if the value held in reg1, when 
1469     // treated as a signed 32bit value, is less than or equal to 5. 
1471     // jz and jnz test whether the first operand is equal to zero, and take 
1472     // an optional second operand of a mask under which to perform the test. 
1474     Jump 
branch32(RelationalCondition cond
, RegisterID left
, RegisterID right
) 
1476         m_assembler
.cmp
<32>(left
, right
); 
1477         return Jump(makeBranch(cond
)); 
1480     Jump 
branch32(RelationalCondition cond
, RegisterID left
, TrustedImm32 right
) 
1482         if (isUInt12(right
.m_value
)) 
1483             m_assembler
.cmp
<32>(left
, UInt12(right
.m_value
)); 
1484         else if (isUInt12(-right
.m_value
)) 
1485             m_assembler
.cmn
<32>(left
, UInt12(-right
.m_value
)); 
1487             moveToCachedReg(right
, m_dataMemoryTempRegister
); 
1488             m_assembler
.cmp
<32>(left
, dataTempRegister
); 
1490         return Jump(makeBranch(cond
)); 
1493     Jump 
branch32(RelationalCondition cond
, RegisterID left
, Address right
) 
1495         load32(right
, getCachedMemoryTempRegisterIDAndInvalidate()); 
1496         return branch32(cond
, left
, memoryTempRegister
); 
1499     Jump 
branch32(RelationalCondition cond
, Address left
, RegisterID right
) 
1501         load32(left
, getCachedMemoryTempRegisterIDAndInvalidate()); 
1502         return branch32(cond
, memoryTempRegister
, right
); 
1505     Jump 
branch32(RelationalCondition cond
, Address left
, TrustedImm32 right
) 
1507         load32(left
, getCachedMemoryTempRegisterIDAndInvalidate()); 
1508         return branch32(cond
, memoryTempRegister
, right
); 
1511     Jump 
branch32(RelationalCondition cond
, BaseIndex left
, TrustedImm32 right
) 
1513         load32(left
, getCachedMemoryTempRegisterIDAndInvalidate()); 
1514         return branch32(cond
, memoryTempRegister
, right
); 
1517     Jump 
branch32(RelationalCondition cond
, AbsoluteAddress left
, RegisterID right
) 
1519         load32(left
.m_ptr
, getCachedMemoryTempRegisterIDAndInvalidate()); 
1520         return branch32(cond
, memoryTempRegister
, right
); 
1523     Jump 
branch32(RelationalCondition cond
, AbsoluteAddress left
, TrustedImm32 right
) 
1525         load32(left
.m_ptr
, getCachedMemoryTempRegisterIDAndInvalidate()); 
1526         return branch32(cond
, memoryTempRegister
, right
); 
1529     Jump 
branch64(RelationalCondition cond
, RegisterID left
, RegisterID right
) 
1531         m_assembler
.cmp
<64>(left
, right
); 
1532         return Jump(makeBranch(cond
)); 
1535     Jump 
branch64(RelationalCondition cond
, RegisterID left
, TrustedImm64 right
) 
1537         intptr_t immediate 
= right
.m_value
; 
1538         if (isUInt12(immediate
)) 
1539             m_assembler
.cmp
<64>(left
, UInt12(static_cast<int32_t>(immediate
))); 
1540         else if (isUInt12(-immediate
)) 
1541             m_assembler
.cmn
<64>(left
, UInt12(static_cast<int32_t>(-immediate
))); 
1543             moveToCachedReg(right
, m_dataMemoryTempRegister
); 
1544             m_assembler
.cmp
<64>(left
, dataTempRegister
); 
1546         return Jump(makeBranch(cond
)); 
1549     Jump 
branch64(RelationalCondition cond
, RegisterID left
, Address right
) 
1551         load64(right
, getCachedMemoryTempRegisterIDAndInvalidate()); 
1552         return branch64(cond
, left
, memoryTempRegister
); 
1555     Jump 
branch64(RelationalCondition cond
, AbsoluteAddress left
, RegisterID right
) 
1557         load64(left
.m_ptr
, getCachedMemoryTempRegisterIDAndInvalidate()); 
1558         return branch64(cond
, memoryTempRegister
, right
); 
1561     Jump 
branch64(RelationalCondition cond
, Address left
, RegisterID right
) 
1563         load64(left
, getCachedMemoryTempRegisterIDAndInvalidate()); 
1564         return branch64(cond
, memoryTempRegister
, right
); 
1567     Jump 
branch64(RelationalCondition cond
, Address left
, TrustedImm64 right
) 
1569         load64(left
, getCachedMemoryTempRegisterIDAndInvalidate()); 
1570         return branch64(cond
, memoryTempRegister
, right
); 
1573     Jump 
branch8(RelationalCondition cond
, Address left
, TrustedImm32 right
) 
1575         ASSERT(!(0xffffff00 & right
.m_value
)); 
1576         load8(left
, getCachedMemoryTempRegisterIDAndInvalidate()); 
1577         return branch32(cond
, memoryTempRegister
, right
); 
1580     Jump 
branch8(RelationalCondition cond
, BaseIndex left
, TrustedImm32 right
) 
1582         ASSERT(!(0xffffff00 & right
.m_value
)); 
1583         load8(left
, getCachedMemoryTempRegisterIDAndInvalidate()); 
1584         return branch32(cond
, memoryTempRegister
, right
); 
1587     Jump 
branchTest32(ResultCondition cond
, RegisterID reg
, RegisterID mask
) 
1589         m_assembler
.tst
<32>(reg
, mask
); 
1590         return Jump(makeBranch(cond
)); 
1593     Jump 
branchTest32(ResultCondition cond
, RegisterID reg
, TrustedImm32 mask 
= TrustedImm32(-1)) 
1595         if (mask
.m_value 
== -1) { 
1596             if ((cond 
== Zero
) || (cond 
== NonZero
)) 
1597                 return Jump(makeCompareAndBranch
<32>(static_cast<ZeroCondition
>(cond
), reg
)); 
1598             m_assembler
.tst
<32>(reg
, reg
); 
1599         } else if (hasOneBitSet(mask
.m_value
) && ((cond 
== Zero
) || (cond 
== NonZero
))) 
1600             return Jump(makeTestBitAndBranch(reg
, getLSBSet(mask
.m_value
), static_cast<ZeroCondition
>(cond
))); 
1602             if ((cond 
== Zero
) || (cond 
== NonZero
)) { 
1603                 LogicalImmediate logicalImm 
= LogicalImmediate::create32(mask
.m_value
); 
1605                 if (logicalImm
.isValid()) { 
1606                     m_assembler
.tst
<32>(reg
, logicalImm
); 
1607                     return Jump(makeBranch(cond
)); 
1611             move(mask
, getCachedDataTempRegisterIDAndInvalidate()); 
1612             m_assembler
.tst
<32>(reg
, dataTempRegister
); 
1614         return Jump(makeBranch(cond
)); 
1617     Jump 
branchTest32(ResultCondition cond
, Address address
, TrustedImm32 mask 
= TrustedImm32(-1)) 
1619         load32(address
, getCachedMemoryTempRegisterIDAndInvalidate()); 
1620         return branchTest32(cond
, memoryTempRegister
, mask
); 
1623     Jump 
branchTest32(ResultCondition cond
, BaseIndex address
, TrustedImm32 mask 
= TrustedImm32(-1)) 
1625         load32(address
, getCachedMemoryTempRegisterIDAndInvalidate()); 
1626         return branchTest32(cond
, memoryTempRegister
, mask
); 
1629     Jump 
branchTest64(ResultCondition cond
, RegisterID reg
, RegisterID mask
) 
1631         m_assembler
.tst
<64>(reg
, mask
); 
1632         return Jump(makeBranch(cond
)); 
1635     Jump 
branchTest64(ResultCondition cond
, RegisterID reg
, TrustedImm32 mask 
= TrustedImm32(-1)) 
1637         if (mask
.m_value 
== -1) { 
1638             if ((cond 
== Zero
) || (cond 
== NonZero
)) 
1639                 return Jump(makeCompareAndBranch
<64>(static_cast<ZeroCondition
>(cond
), reg
)); 
1640             m_assembler
.tst
<64>(reg
, reg
); 
1641         } else if (hasOneBitSet(mask
.m_value
) && ((cond 
== Zero
) || (cond 
== NonZero
))) 
1642             return Jump(makeTestBitAndBranch(reg
, getLSBSet(mask
.m_value
), static_cast<ZeroCondition
>(cond
))); 
1644             if ((cond 
== Zero
) || (cond 
== NonZero
)) { 
1645                 LogicalImmediate logicalImm 
= LogicalImmediate::create64(mask
.m_value
); 
1647                 if (logicalImm
.isValid()) { 
1648                     m_assembler
.tst
<64>(reg
, logicalImm
); 
1649                     return Jump(makeBranch(cond
)); 
1653             signExtend32ToPtr(mask
, getCachedDataTempRegisterIDAndInvalidate()); 
1654             m_assembler
.tst
<64>(reg
, dataTempRegister
); 
1656         return Jump(makeBranch(cond
)); 
1659     Jump 
branchTest64(ResultCondition cond
, Address address
, RegisterID mask
) 
1661         load64(address
, getCachedDataTempRegisterIDAndInvalidate()); 
1662         return branchTest64(cond
, dataTempRegister
, mask
); 
1665     Jump 
branchTest64(ResultCondition cond
, Address address
, TrustedImm32 mask 
= TrustedImm32(-1)) 
1667         load64(address
, getCachedDataTempRegisterIDAndInvalidate()); 
1668         return branchTest64(cond
, dataTempRegister
, mask
); 
1671     Jump 
branchTest64(ResultCondition cond
, BaseIndex address
, TrustedImm32 mask 
= TrustedImm32(-1)) 
1673         load64(address
, getCachedDataTempRegisterIDAndInvalidate()); 
1674         return branchTest64(cond
, dataTempRegister
, mask
); 
1677     Jump 
branchTest64(ResultCondition cond
, AbsoluteAddress address
, TrustedImm32 mask 
= TrustedImm32(-1)) 
1679         load64(address
.m_ptr
, getCachedDataTempRegisterIDAndInvalidate()); 
1680         return branchTest64(cond
, dataTempRegister
, mask
); 
1683     Jump 
branchTest8(ResultCondition cond
, Address address
, TrustedImm32 mask 
= TrustedImm32(-1)) 
1685         load8(address
, getCachedDataTempRegisterIDAndInvalidate()); 
1686         return branchTest32(cond
, dataTempRegister
, mask
); 
1689     Jump 
branchTest8(ResultCondition cond
, AbsoluteAddress address
, TrustedImm32 mask 
= TrustedImm32(-1)) 
1691         load8(address
.m_ptr
, getCachedDataTempRegisterIDAndInvalidate()); 
1692         return branchTest32(cond
, dataTempRegister
, mask
); 
1695     Jump 
branchTest8(ResultCondition cond
, ExtendedAddress address
, TrustedImm32 mask 
= TrustedImm32(-1)) 
1697         move(ImmPtr(reinterpret_cast<void*>(address
.offset
)), getCachedDataTempRegisterIDAndInvalidate()); 
1698         m_assembler
.ldrb(dataTempRegister
, address
.base
, dataTempRegister
); 
1699         return branchTest32(cond
, dataTempRegister
, mask
); 
1702     Jump 
branch32WithUnalignedHalfWords(RelationalCondition cond
, BaseIndex left
, TrustedImm32 right
) 
1704         return branch32(cond
, left
, right
); 
1708     // Arithmetic control flow operations: 
1710     // This set of conditional branch operations branch based 
1711     // on the result of an arithmetic operation.  The operation 
1712     // is performed as normal, storing the result. 
1714     // * jz operations branch if the result is zero. 
1715     // * jo operations branch if the (signed) arithmetic 
1716     //   operation caused an overflow to occur. 
1718     Jump 
branchAdd32(ResultCondition cond
, RegisterID op1
, RegisterID op2
, RegisterID dest
) 
1720         m_assembler
.add
<32, S
>(dest
, op1
, op2
); 
1721         return Jump(makeBranch(cond
)); 
1724     Jump 
branchAdd32(ResultCondition cond
, RegisterID op1
, TrustedImm32 imm
, RegisterID dest
) 
1726         if (isUInt12(imm
.m_value
)) { 
1727             m_assembler
.add
<32, S
>(dest
, op1
, UInt12(imm
.m_value
)); 
1728             return Jump(makeBranch(cond
)); 
1730         if (isUInt12(-imm
.m_value
)) { 
1731             m_assembler
.sub
<32, S
>(dest
, op1
, UInt12(-imm
.m_value
)); 
1732             return Jump(makeBranch(cond
)); 
1735         signExtend32ToPtr(imm
, getCachedDataTempRegisterIDAndInvalidate()); 
1736         return branchAdd32(cond
, op1
, dataTempRegister
, dest
); 
1739     Jump 
branchAdd32(ResultCondition cond
, RegisterID src
, RegisterID dest
) 
1741         return branchAdd32(cond
, dest
, src
, dest
); 
1744     Jump 
branchAdd32(ResultCondition cond
, TrustedImm32 imm
, RegisterID dest
) 
1746         return branchAdd32(cond
, dest
, imm
, dest
); 
1749     Jump 
branchAdd32(ResultCondition cond
, TrustedImm32 imm
, AbsoluteAddress address
) 
1751         load32(address
.m_ptr
, getCachedDataTempRegisterIDAndInvalidate()); 
1753         if (isUInt12(imm
.m_value
)) { 
1754             m_assembler
.add
<32, S
>(dataTempRegister
, dataTempRegister
, UInt12(imm
.m_value
)); 
1755             store32(dataTempRegister
, address
.m_ptr
); 
1756         } else if (isUInt12(-imm
.m_value
)) { 
1757             m_assembler
.sub
<32, S
>(dataTempRegister
, dataTempRegister
, UInt12(-imm
.m_value
)); 
1758             store32(dataTempRegister
, address
.m_ptr
); 
1760             move(imm
, getCachedMemoryTempRegisterIDAndInvalidate()); 
1761             m_assembler
.add
<32, S
>(dataTempRegister
, dataTempRegister
, memoryTempRegister
); 
1762             store32(dataTempRegister
, address
.m_ptr
); 
1765         return Jump(makeBranch(cond
)); 
1768     Jump 
branchAdd64(ResultCondition cond
, RegisterID op1
, RegisterID op2
, RegisterID dest
) 
1770         m_assembler
.add
<64, S
>(dest
, op1
, op2
); 
1771         return Jump(makeBranch(cond
)); 
1774     Jump 
branchAdd64(ResultCondition cond
, RegisterID op1
, TrustedImm32 imm
, RegisterID dest
) 
1776         if (isUInt12(imm
.m_value
)) { 
1777             m_assembler
.add
<64, S
>(dest
, op1
, UInt12(imm
.m_value
)); 
1778             return Jump(makeBranch(cond
)); 
1780         if (isUInt12(-imm
.m_value
)) { 
1781             m_assembler
.sub
<64, S
>(dest
, op1
, UInt12(-imm
.m_value
)); 
1782             return Jump(makeBranch(cond
)); 
1785         move(imm
, getCachedDataTempRegisterIDAndInvalidate()); 
1786         return branchAdd64(cond
, op1
, dataTempRegister
, dest
); 
1789     Jump 
branchAdd64(ResultCondition cond
, RegisterID src
, RegisterID dest
) 
1791         return branchAdd64(cond
, dest
, src
, dest
); 
1794     Jump 
branchAdd64(ResultCondition cond
, TrustedImm32 imm
, RegisterID dest
) 
1796         return branchAdd64(cond
, dest
, imm
, dest
); 
1799     Jump 
branchMul32(ResultCondition cond
, RegisterID src1
, RegisterID src2
, RegisterID dest
) 
1801         ASSERT(cond 
!= Signed
); 
1803         if (cond 
!= Overflow
) { 
1804             m_assembler
.mul
<32>(dest
, src1
, src2
); 
1805             return branchTest32(cond
, dest
); 
1808         // This is a signed multiple of two 32-bit values, producing a 64-bit result. 
1809         m_assembler
.smull(dest
, src1
, src2
); 
1810         // Copy bits 63..32 of the result to bits 31..0 of dataTempRegister. 
1811         m_assembler
.asr
<64>(getCachedDataTempRegisterIDAndInvalidate(), dest
, 32); 
1812         // Splat bit 31 of the result to bits 31..0 of memoryTempRegister. 
1813         m_assembler
.asr
<32>(getCachedMemoryTempRegisterIDAndInvalidate(), dest
, 31); 
1814         // After a mul32 the top 32 bits of the register should be clear. 
1815         zeroExtend32ToPtr(dest
, dest
); 
1816         // Check that bits 31..63 of the original result were all equal. 
1817         return branch32(NotEqual
, memoryTempRegister
, dataTempRegister
); 
1820     Jump 
branchMul32(ResultCondition cond
, RegisterID src
, RegisterID dest
) 
1822         return branchMul32(cond
, dest
, src
, dest
); 
1825     Jump 
branchMul32(ResultCondition cond
, TrustedImm32 imm
, RegisterID src
, RegisterID dest
) 
1827         move(imm
, getCachedDataTempRegisterIDAndInvalidate()); 
1828         return branchMul32(cond
, dataTempRegister
, src
, dest
); 
1831     Jump 
branchNeg32(ResultCondition cond
, RegisterID dest
) 
1833         m_assembler
.neg
<32, S
>(dest
, dest
); 
1834         return Jump(makeBranch(cond
)); 
1837     Jump 
branchSub32(ResultCondition cond
, RegisterID dest
) 
1839         m_assembler
.neg
<32, S
>(dest
, dest
); 
1840         return Jump(makeBranch(cond
)); 
1843     Jump 
branchSub32(ResultCondition cond
, RegisterID op1
, RegisterID op2
, RegisterID dest
) 
1845         m_assembler
.sub
<32, S
>(dest
, op1
, op2
); 
1846         return Jump(makeBranch(cond
)); 
1849     Jump 
branchSub32(ResultCondition cond
, RegisterID op1
, TrustedImm32 imm
, RegisterID dest
) 
1851         if (isUInt12(imm
.m_value
)) { 
1852             m_assembler
.sub
<32, S
>(dest
, op1
, UInt12(imm
.m_value
)); 
1853             return Jump(makeBranch(cond
)); 
1855         if (isUInt12(-imm
.m_value
)) { 
1856             m_assembler
.add
<32, S
>(dest
, op1
, UInt12(-imm
.m_value
)); 
1857             return Jump(makeBranch(cond
)); 
1860         signExtend32ToPtr(imm
, getCachedDataTempRegisterIDAndInvalidate()); 
1861         return branchSub32(cond
, op1
, dataTempRegister
, dest
); 
1864     Jump 
branchSub32(ResultCondition cond
, RegisterID src
, RegisterID dest
) 
1866         return branchSub32(cond
, dest
, src
, dest
); 
1869     Jump 
branchSub32(ResultCondition cond
, TrustedImm32 imm
, RegisterID dest
) 
1871         return branchSub32(cond
, dest
, imm
, dest
); 
1874     Jump 
branchSub64(ResultCondition cond
, RegisterID op1
, RegisterID op2
, RegisterID dest
) 
1876         m_assembler
.sub
<64, S
>(dest
, op1
, op2
); 
1877         return Jump(makeBranch(cond
)); 
1880     Jump 
branchSub64(ResultCondition cond
, RegisterID op1
, TrustedImm32 imm
, RegisterID dest
) 
1882         if (isUInt12(imm
.m_value
)) { 
1883             m_assembler
.sub
<64, S
>(dest
, op1
, UInt12(imm
.m_value
)); 
1884             return Jump(makeBranch(cond
)); 
1886         if (isUInt12(-imm
.m_value
)) { 
1887             m_assembler
.add
<64, S
>(dest
, op1
, UInt12(-imm
.m_value
)); 
1888             return Jump(makeBranch(cond
)); 
1891         move(imm
, getCachedDataTempRegisterIDAndInvalidate()); 
1892         return branchSub64(cond
, op1
, dataTempRegister
, dest
); 
1895     Jump 
branchSub64(ResultCondition cond
, RegisterID src
, RegisterID dest
) 
1897         return branchSub64(cond
, dest
, src
, dest
); 
1900     Jump 
branchSub64(ResultCondition cond
, TrustedImm32 imm
, RegisterID dest
) 
1902         return branchSub64(cond
, dest
, imm
, dest
); 
1906     // Jumps, calls, returns 
1908     ALWAYS_INLINE Call 
call() 
1910         AssemblerLabel pointerLabel 
= m_assembler
.label(); 
1911         moveWithFixedWidth(TrustedImmPtr(0), getCachedDataTempRegisterIDAndInvalidate()); 
1912         invalidateAllTempRegisters(); 
1913         m_assembler
.blr(dataTempRegister
); 
1914         AssemblerLabel callLabel 
= m_assembler
.label(); 
1915         ASSERT_UNUSED(pointerLabel
, ARM64Assembler::getDifferenceBetweenLabels(callLabel
, pointerLabel
) == REPATCH_OFFSET_CALL_TO_POINTER
); 
1916         return Call(callLabel
, Call::Linkable
); 
1919     ALWAYS_INLINE Call 
call(RegisterID target
) 
1921         invalidateAllTempRegisters(); 
1922         m_assembler
.blr(target
); 
1923         return Call(m_assembler
.label(), Call::None
); 
1926     ALWAYS_INLINE Call 
call(Address address
) 
1928         load64(address
, getCachedDataTempRegisterIDAndInvalidate()); 
1929         return call(dataTempRegister
); 
1932     ALWAYS_INLINE Jump 
jump() 
1934         AssemblerLabel label 
= m_assembler
.label(); 
1936         return Jump(label
, m_makeJumpPatchable 
? ARM64Assembler::JumpNoConditionFixedSize 
: ARM64Assembler::JumpNoCondition
); 
1939     void jump(RegisterID target
) 
1941         m_assembler
.br(target
); 
1944     void jump(Address address
) 
1946         load64(address
, getCachedDataTempRegisterIDAndInvalidate()); 
1947         m_assembler
.br(dataTempRegister
); 
1950     void jump(AbsoluteAddress address
) 
1952         move(TrustedImmPtr(address
.m_ptr
), getCachedDataTempRegisterIDAndInvalidate()); 
1953         load64(Address(dataTempRegister
), dataTempRegister
); 
1954         m_assembler
.br(dataTempRegister
); 
1957     ALWAYS_INLINE Call 
makeTailRecursiveCall(Jump oldJump
) 
1960         return tailRecursiveCall(); 
1963     ALWAYS_INLINE Call 
nearCall() 
1966         return Call(m_assembler
.label(), Call::LinkableNear
); 
1969     ALWAYS_INLINE 
void ret() 
1974     ALWAYS_INLINE Call 
tailRecursiveCall() 
1976         // Like a normal call, but don't link. 
1977         AssemblerLabel pointerLabel 
= m_assembler
.label(); 
1978         moveWithFixedWidth(TrustedImmPtr(0), getCachedDataTempRegisterIDAndInvalidate()); 
1979         m_assembler
.br(dataTempRegister
); 
1980         AssemblerLabel callLabel 
= m_assembler
.label(); 
1981         ASSERT_UNUSED(pointerLabel
, ARM64Assembler::getDifferenceBetweenLabels(callLabel
, pointerLabel
) == REPATCH_OFFSET_CALL_TO_POINTER
); 
1982         return Call(callLabel
, Call::Linkable
); 
1986     // Comparisons operations 
1988     void compare32(RelationalCondition cond
, RegisterID left
, RegisterID right
, RegisterID dest
) 
1990         m_assembler
.cmp
<32>(left
, right
); 
1991         m_assembler
.cset
<32>(dest
, ARM64Condition(cond
)); 
1994     void compare32(RelationalCondition cond
, Address left
, RegisterID right
, RegisterID dest
) 
1996         load32(left
, getCachedDataTempRegisterIDAndInvalidate()); 
1997         m_assembler
.cmp
<32>(dataTempRegister
, right
); 
1998         m_assembler
.cset
<32>(dest
, ARM64Condition(cond
)); 
2001     void compare32(RelationalCondition cond
, RegisterID left
, TrustedImm32 right
, RegisterID dest
) 
2003         move(right
, getCachedDataTempRegisterIDAndInvalidate()); 
2004         m_assembler
.cmp
<32>(left
, dataTempRegister
); 
2005         m_assembler
.cset
<32>(dest
, ARM64Condition(cond
)); 
2008     void compare64(RelationalCondition cond
, RegisterID left
, RegisterID right
, RegisterID dest
) 
2010         m_assembler
.cmp
<64>(left
, right
); 
2011         m_assembler
.cset
<32>(dest
, ARM64Condition(cond
)); 
2014     void compare64(RelationalCondition cond
, RegisterID left
, TrustedImm32 right
, RegisterID dest
) 
2016         signExtend32ToPtr(right
, getCachedDataTempRegisterIDAndInvalidate()); 
2017         m_assembler
.cmp
<64>(left
, dataTempRegister
); 
2018         m_assembler
.cset
<32>(dest
, ARM64Condition(cond
)); 
2021     void compare8(RelationalCondition cond
, Address left
, TrustedImm32 right
, RegisterID dest
) 
2023         load8(left
, getCachedMemoryTempRegisterIDAndInvalidate()); 
2024         move(right
, getCachedDataTempRegisterIDAndInvalidate()); 
2025         compare32(cond
, memoryTempRegister
, dataTempRegister
, dest
); 
2028     void test32(ResultCondition cond
, RegisterID src
, TrustedImm32 mask
, RegisterID dest
) 
2030         if (mask
.m_value 
== -1) 
2031             m_assembler
.tst
<32>(src
, src
); 
2033             signExtend32ToPtr(mask
, getCachedDataTempRegisterIDAndInvalidate()); 
2034             m_assembler
.tst
<32>(src
, dataTempRegister
); 
2036         m_assembler
.cset
<32>(dest
, ARM64Condition(cond
)); 
2039     void test32(ResultCondition cond
, Address address
, TrustedImm32 mask
, RegisterID dest
) 
2041         load32(address
, getCachedDataTempRegisterIDAndInvalidate()); 
2042         test32(cond
, dataTempRegister
, mask
, dest
); 
2045     void test8(ResultCondition cond
, Address address
, TrustedImm32 mask
, RegisterID dest
) 
2047         load8(address
, getCachedDataTempRegisterIDAndInvalidate()); 
2048         test32(cond
, dataTempRegister
, mask
, dest
); 
2051     void test64(ResultCondition cond
, RegisterID op1
, RegisterID op2
, RegisterID dest
) 
2053         m_assembler
.tst
<64>(op1
, op2
); 
2054         m_assembler
.cset
<32>(dest
, ARM64Condition(cond
)); 
2057     void test64(ResultCondition cond
, RegisterID src
, TrustedImm32 mask
, RegisterID dest
) 
2059         if (mask
.m_value 
== -1) 
2060             m_assembler
.tst
<64>(src
, src
); 
2062             signExtend32ToPtr(mask
, getCachedDataTempRegisterIDAndInvalidate()); 
2063             m_assembler
.tst
<64>(src
, dataTempRegister
); 
2065         m_assembler
.cset
<32>(dest
, ARM64Condition(cond
)); 
2069     // Patchable operations 
2071     ALWAYS_INLINE DataLabel32 
moveWithPatch(TrustedImm32 imm
, RegisterID dest
) 
2073         DataLabel32 
label(this); 
2074         moveWithFixedWidth(imm
, dest
); 
2078     ALWAYS_INLINE DataLabelPtr 
moveWithPatch(TrustedImmPtr imm
, RegisterID dest
) 
2080         DataLabelPtr 
label(this); 
2081         moveWithFixedWidth(imm
, dest
); 
2085     ALWAYS_INLINE Jump 
branchPtrWithPatch(RelationalCondition cond
, RegisterID left
, DataLabelPtr
& dataLabel
, TrustedImmPtr initialRightValue 
= TrustedImmPtr(0)) 
2087         dataLabel 
= DataLabelPtr(this); 
2088         moveWithPatch(initialRightValue
, getCachedDataTempRegisterIDAndInvalidate()); 
2089         return branch64(cond
, left
, dataTempRegister
); 
2092     ALWAYS_INLINE Jump 
branchPtrWithPatch(RelationalCondition cond
, Address left
, DataLabelPtr
& dataLabel
, TrustedImmPtr initialRightValue 
= TrustedImmPtr(0)) 
2094         dataLabel 
= DataLabelPtr(this); 
2095         moveWithPatch(initialRightValue
, getCachedDataTempRegisterIDAndInvalidate()); 
2096         return branch64(cond
, left
, dataTempRegister
); 
2099     PatchableJump 
patchableBranchPtr(RelationalCondition cond
, Address left
, TrustedImmPtr right 
= TrustedImmPtr(0)) 
2101         m_makeJumpPatchable 
= true; 
2102         Jump result 
= branch32(cond
, left
, TrustedImm32(right
)); 
2103         m_makeJumpPatchable 
= false; 
2104         return PatchableJump(result
); 
2107     PatchableJump 
patchableBranchTest32(ResultCondition cond
, RegisterID reg
, TrustedImm32 mask 
= TrustedImm32(-1)) 
2109         m_makeJumpPatchable 
= true; 
2110         Jump result 
= branchTest32(cond
, reg
, mask
); 
2111         m_makeJumpPatchable 
= false; 
2112         return PatchableJump(result
); 
2115     PatchableJump 
patchableBranch32(RelationalCondition cond
, RegisterID reg
, TrustedImm32 imm
) 
2117         m_makeJumpPatchable 
= true; 
2118         Jump result 
= branch32(cond
, reg
, imm
); 
2119         m_makeJumpPatchable 
= false; 
2120         return PatchableJump(result
); 
2123     PatchableJump 
patchableBranchPtrWithPatch(RelationalCondition cond
, Address left
, DataLabelPtr
& dataLabel
, TrustedImmPtr initialRightValue 
= TrustedImmPtr(0)) 
2125         m_makeJumpPatchable 
= true; 
2126         Jump result 
= branchPtrWithPatch(cond
, left
, dataLabel
, initialRightValue
); 
2127         m_makeJumpPatchable 
= false; 
2128         return PatchableJump(result
); 
2131     PatchableJump 
patchableJump() 
2133         m_makeJumpPatchable 
= true; 
2134         Jump result 
= jump(); 
2135         m_makeJumpPatchable 
= false; 
2136         return PatchableJump(result
); 
2139     ALWAYS_INLINE DataLabelPtr 
storePtrWithPatch(TrustedImmPtr initialValue
, ImplicitAddress address
) 
2141         DataLabelPtr 
label(this); 
2142         moveWithFixedWidth(initialValue
, getCachedDataTempRegisterIDAndInvalidate()); 
2143         store64(dataTempRegister
, address
); 
2147     ALWAYS_INLINE DataLabelPtr 
storePtrWithPatch(ImplicitAddress address
) 
2149         return storePtrWithPatch(TrustedImmPtr(0), address
); 
2152     static void reemitInitialMoveWithPatch(void* address
, void* value
) 
2154         ARM64Assembler::setPointer(static_cast<int*>(address
), value
, dataTempRegister
, true); 
2157     // Miscellaneous operations: 
2159     void breakpoint(uint16_t imm 
= 0) 
2161         m_assembler
.brk(imm
); 
2170     // Misc helper functions. 
2172     // Invert a relational condition, e.g. == becomes !=, < becomes >=, etc. 
2173     static RelationalCondition 
invert(RelationalCondition cond
) 
2175         return static_cast<RelationalCondition
>(ARM64Assembler::invert(static_cast<ARM64Assembler::Condition
>(cond
))); 
2178     static FunctionPtr 
readCallTarget(CodeLocationCall call
) 
2180         return FunctionPtr(reinterpret_cast<void(*)()>(ARM64Assembler::readCallTarget(call
.dataLocation()))); 
2183     static void replaceWithJump(CodeLocationLabel instructionStart
, CodeLocationLabel destination
) 
2185         ARM64Assembler::replaceWithJump(instructionStart
.dataLocation(), destination
.dataLocation()); 
2188     static ptrdiff_t maxJumpReplacementSize() 
2190         return ARM64Assembler::maxJumpReplacementSize(); 
2193     static bool canJumpReplacePatchableBranchPtrWithPatch() { return false; } 
2195     static CodeLocationLabel 
startOfBranchPtrWithPatchOnRegister(CodeLocationDataLabelPtr label
) 
2197         return label
.labelAtOffset(0); 
2200     static CodeLocationLabel 
startOfPatchableBranchPtrWithPatchOnAddress(CodeLocationDataLabelPtr
) 
2202         UNREACHABLE_FOR_PLATFORM(); 
2203         return CodeLocationLabel(); 
2206     static void revertJumpReplacementToBranchPtrWithPatch(CodeLocationLabel instructionStart
, RegisterID
, void* initialValue
) 
2208         reemitInitialMoveWithPatch(instructionStart
.dataLocation(), initialValue
); 
2211     static void revertJumpReplacementToPatchableBranchPtrWithPatch(CodeLocationLabel
, Address
, void*) 
2213         UNREACHABLE_FOR_PLATFORM(); 
2217     ALWAYS_INLINE Jump 
makeBranch(ARM64Assembler::Condition cond
) 
2219         m_assembler
.b_cond(cond
); 
2220         AssemblerLabel label 
= m_assembler
.label(); 
2222         return Jump(label
, m_makeJumpPatchable 
? ARM64Assembler::JumpConditionFixedSize 
: ARM64Assembler::JumpCondition
, cond
); 
2224     ALWAYS_INLINE Jump 
makeBranch(RelationalCondition cond
) { return makeBranch(ARM64Condition(cond
)); } 
2225     ALWAYS_INLINE Jump 
makeBranch(ResultCondition cond
) { return makeBranch(ARM64Condition(cond
)); } 
2226     ALWAYS_INLINE Jump 
makeBranch(DoubleCondition cond
) { return makeBranch(ARM64Condition(cond
)); } 
2228     template <int dataSize
> 
2229     ALWAYS_INLINE Jump 
makeCompareAndBranch(ZeroCondition cond
, RegisterID reg
) 
2232             m_assembler
.cbz
<dataSize
>(reg
); 
2234             m_assembler
.cbnz
<dataSize
>(reg
); 
2235         AssemblerLabel label 
= m_assembler
.label(); 
2237         return Jump(label
, m_makeJumpPatchable 
? ARM64Assembler::JumpCompareAndBranchFixedSize 
: ARM64Assembler::JumpCompareAndBranch
, static_cast<ARM64Assembler::Condition
>(cond
), dataSize 
== 64, reg
); 
2240     ALWAYS_INLINE Jump 
makeTestBitAndBranch(RegisterID reg
, unsigned bit
, ZeroCondition cond
) 
2245             m_assembler
.tbz(reg
, bit
); 
2247             m_assembler
.tbnz(reg
, bit
); 
2248         AssemblerLabel label 
= m_assembler
.label(); 
2250         return Jump(label
, m_makeJumpPatchable 
? ARM64Assembler::JumpTestBitFixedSize 
: ARM64Assembler::JumpTestBit
, static_cast<ARM64Assembler::Condition
>(cond
), bit
, reg
); 
2253     ARM64Assembler::Condition 
ARM64Condition(RelationalCondition cond
) 
2255         return static_cast<ARM64Assembler::Condition
>(cond
); 
2258     ARM64Assembler::Condition 
ARM64Condition(ResultCondition cond
) 
2260         return static_cast<ARM64Assembler::Condition
>(cond
); 
2263     ARM64Assembler::Condition 
ARM64Condition(DoubleCondition cond
) 
2265         return static_cast<ARM64Assembler::Condition
>(cond
); 
2269     ALWAYS_INLINE RegisterID 
getCachedDataTempRegisterIDAndInvalidate() { return m_dataMemoryTempRegister
.registerIDInvalidate(); } 
2270     ALWAYS_INLINE RegisterID 
getCachedMemoryTempRegisterIDAndInvalidate() { return m_cachedMemoryTempRegister
.registerIDInvalidate(); } 
2272     ALWAYS_INLINE 
bool isInIntRange(intptr_t value
) 
2274         return value 
== ((value 
<< 32) >> 32); 
2277     template<typename ImmediateType
, typename rawType
> 
2278     void moveInternal(ImmediateType imm
, RegisterID dest
) 
2280         const int dataSize 
= sizeof(rawType
)*8; 
2281         const int numberHalfWords 
= dataSize
/16; 
2282         rawType value 
= bitwise_cast
<rawType
>(imm
.m_value
); 
2283         uint16_t halfword
[numberHalfWords
]; 
2285         // Handle 0 and ~0 here to simplify code below 
2287             m_assembler
.movz
<dataSize
>(dest
, 0); 
2291             m_assembler
.movn
<dataSize
>(dest
, 0); 
2295         LogicalImmediate logicalImm 
= dataSize 
== 64 ? LogicalImmediate::create64(static_cast<uint64_t>(value
)) : LogicalImmediate::create32(static_cast<uint32_t>(value
)); 
2297         if (logicalImm
.isValid()) { 
2298             m_assembler
.movi
<dataSize
>(dest
, logicalImm
); 
2302         // Figure out how many halfwords are 0 or FFFF, then choose movz or movn accordingly. 
2303         int zeroOrNegateVote 
= 0; 
2304         for (int i 
= 0; i 
< numberHalfWords
; ++i
) { 
2305             halfword
[i
] = getHalfword(value
, i
); 
2308             else if (halfword
[i
] == 0xffff) 
2312         bool needToClearRegister 
= true; 
2313         if (zeroOrNegateVote 
>= 0) { 
2314             for (int i 
= 0; i 
< numberHalfWords
; i
++) { 
2316                     if (needToClearRegister
) { 
2317                         m_assembler
.movz
<dataSize
>(dest
, halfword
[i
], 16*i
); 
2318                         needToClearRegister 
= false; 
2320                         m_assembler
.movk
<dataSize
>(dest
, halfword
[i
], 16*i
); 
2324             for (int i 
= 0; i 
< numberHalfWords
; i
++) { 
2325                 if (halfword
[i
] != 0xffff) { 
2326                     if (needToClearRegister
) { 
2327                         m_assembler
.movn
<dataSize
>(dest
, ~halfword
[i
], 16*i
); 
2328                         needToClearRegister 
= false; 
2330                         m_assembler
.movk
<dataSize
>(dest
, halfword
[i
], 16*i
); 
2336     template<int datasize
> 
2337     ALWAYS_INLINE 
void loadUnsignedImmediate(RegisterID rt
, RegisterID rn
, unsigned pimm
) 
2339         m_assembler
.ldr
<datasize
>(rt
, rn
, pimm
); 
2342     template<int datasize
> 
2343     ALWAYS_INLINE 
void loadUnscaledImmediate(RegisterID rt
, RegisterID rn
, int simm
) 
2345         m_assembler
.ldur
<datasize
>(rt
, rn
, simm
); 
2348     template<int datasize
> 
2349     ALWAYS_INLINE 
void storeUnsignedImmediate(RegisterID rt
, RegisterID rn
, unsigned pimm
) 
2351         m_assembler
.str
<datasize
>(rt
, rn
, pimm
); 
2354     template<int datasize
> 
2355     ALWAYS_INLINE 
void storeUnscaledImmediate(RegisterID rt
, RegisterID rn
, int simm
) 
2357         m_assembler
.stur
<datasize
>(rt
, rn
, simm
); 
2360     void moveWithFixedWidth(TrustedImm32 imm
, RegisterID dest
) 
2362         int32_t value 
= imm
.m_value
; 
2363         m_assembler
.movz
<32>(dest
, getHalfword(value
, 0)); 
2364         m_assembler
.movk
<32>(dest
, getHalfword(value
, 1), 16); 
2367     void moveWithFixedWidth(TrustedImmPtr imm
, RegisterID dest
) 
2369         intptr_t value 
= reinterpret_cast<intptr_t>(imm
.m_value
); 
2370         m_assembler
.movz
<64>(dest
, getHalfword(value
, 0)); 
2371         m_assembler
.movk
<64>(dest
, getHalfword(value
, 1), 16); 
2372         m_assembler
.movk
<64>(dest
, getHalfword(value
, 2), 32); 
2375     void signExtend32ToPtrWithFixedWidth(int32_t value
, RegisterID dest
) 
2378             m_assembler
.movz
<32>(dest
, getHalfword(value
, 0)); 
2379             m_assembler
.movk
<32>(dest
, getHalfword(value
, 1), 16); 
2381             m_assembler
.movn
<32>(dest
, ~getHalfword(value
, 0)); 
2382             m_assembler
.movk
<32>(dest
, getHalfword(value
, 1), 16); 
2386     void signExtend32ToPtr(TrustedImm32 imm
, RegisterID dest
) 
2388         move(TrustedImmPtr(reinterpret_cast<void*>(static_cast<intptr_t>(imm
.m_value
))), dest
); 
2391     template<int datasize
> 
2392     ALWAYS_INLINE 
void load(const void* address
, RegisterID dest
) 
2394         intptr_t currentRegisterContents
; 
2395         if (m_cachedMemoryTempRegister
.value(currentRegisterContents
)) { 
2396             intptr_t addressAsInt 
= reinterpret_cast<intptr_t>(address
); 
2397             intptr_t addressDelta 
= addressAsInt 
- currentRegisterContents
; 
2399             if (isInIntRange(addressDelta
)) { 
2400                 if (ARM64Assembler::canEncodeSImmOffset(addressDelta
)) { 
2401                     m_assembler
.ldur
<datasize
>(dest
,  memoryTempRegister
, addressDelta
); 
2405                 if (ARM64Assembler::canEncodePImmOffset
<datasize
>(addressDelta
)) { 
2406                     m_assembler
.ldr
<datasize
>(dest
,  memoryTempRegister
, addressDelta
); 
2411             if ((addressAsInt 
& (~maskHalfWord0
)) == (currentRegisterContents 
& (~maskHalfWord0
))) { 
2412                 m_assembler
.movk
<64>(memoryTempRegister
, addressAsInt 
& maskHalfWord0
, 0); 
2413                 m_cachedMemoryTempRegister
.setValue(reinterpret_cast<intptr_t>(address
)); 
2414                 m_assembler
.ldr
<datasize
>(dest
, memoryTempRegister
, ARM64Registers::zr
); 
2419         move(TrustedImmPtr(address
), memoryTempRegister
); 
2420         m_cachedMemoryTempRegister
.setValue(reinterpret_cast<intptr_t>(address
)); 
2421         m_assembler
.ldr
<datasize
>(dest
, memoryTempRegister
, ARM64Registers::zr
); 
2424     template<int datasize
> 
2425     ALWAYS_INLINE 
void store(RegisterID src
, const void* address
) 
2427         intptr_t currentRegisterContents
; 
2428         if (m_cachedMemoryTempRegister
.value(currentRegisterContents
)) { 
2429             intptr_t addressAsInt 
= reinterpret_cast<intptr_t>(address
); 
2430             intptr_t addressDelta 
= addressAsInt 
- currentRegisterContents
; 
2432             if (isInIntRange(addressDelta
)) { 
2433                 if (ARM64Assembler::canEncodeSImmOffset(addressDelta
)) { 
2434                     m_assembler
.stur
<datasize
>(src
, memoryTempRegister
, addressDelta
); 
2438                 if (ARM64Assembler::canEncodePImmOffset
<datasize
>(addressDelta
)) { 
2439                     m_assembler
.str
<datasize
>(src
, memoryTempRegister
, addressDelta
); 
2444             if ((addressAsInt 
& (~maskHalfWord0
)) == (currentRegisterContents 
& (~maskHalfWord0
))) { 
2445                 m_assembler
.movk
<64>(memoryTempRegister
, addressAsInt 
& maskHalfWord0
, 0); 
2446                 m_cachedMemoryTempRegister
.setValue(reinterpret_cast<intptr_t>(address
)); 
2447                 m_assembler
.str
<datasize
>(src
, memoryTempRegister
, ARM64Registers::zr
); 
2452         move(TrustedImmPtr(address
), memoryTempRegister
); 
2453         m_cachedMemoryTempRegister
.setValue(reinterpret_cast<intptr_t>(address
)); 
2454         m_assembler
.str
<datasize
>(src
, memoryTempRegister
, ARM64Registers::zr
); 
2457     template <int dataSize
> 
2458     ALWAYS_INLINE 
bool tryMoveUsingCacheRegisterContents(intptr_t immediate
, CachedTempRegister
& dest
) 
2460         intptr_t currentRegisterContents
; 
2461         if (dest
.value(currentRegisterContents
)) { 
2462             if (currentRegisterContents 
== immediate
) 
2465             LogicalImmediate logicalImm 
= dataSize 
== 64 ? LogicalImmediate::create64(static_cast<uint64_t>(immediate
)) : LogicalImmediate::create32(static_cast<uint32_t>(immediate
)); 
2467             if (logicalImm
.isValid()) { 
2468                 m_assembler
.movi
<dataSize
>(dest
.registerIDNoInvalidate(), logicalImm
); 
2469                 dest
.setValue(immediate
); 
2473             if ((immediate 
& maskUpperWord
) == (currentRegisterContents 
& maskUpperWord
)) { 
2474                 if ((immediate 
& maskHalfWord1
) != (currentRegisterContents 
& maskHalfWord1
)) 
2475                     m_assembler
.movk
<dataSize
>(dest
.registerIDNoInvalidate(), (immediate 
& maskHalfWord1
) >> 16, 16); 
2477                 if ((immediate 
& maskHalfWord0
) != (currentRegisterContents 
& maskHalfWord0
)) 
2478                     m_assembler
.movk
<dataSize
>(dest
.registerIDNoInvalidate(), immediate 
& maskHalfWord0
, 0); 
2480                 dest
.setValue(immediate
); 
2488     void moveToCachedReg(TrustedImm32 imm
, CachedTempRegister
& dest
) 
2490         if (tryMoveUsingCacheRegisterContents
<32>(static_cast<intptr_t>(imm
.m_value
), dest
)) 
2493         moveInternal
<TrustedImm32
, int32_t>(imm
, dest
.registerIDNoInvalidate()); 
2494         dest
.setValue(imm
.m_value
); 
2497     void moveToCachedReg(TrustedImmPtr imm
, CachedTempRegister
& dest
) 
2499         if (tryMoveUsingCacheRegisterContents
<64>(imm
.asIntptr(), dest
)) 
2502         moveInternal
<TrustedImmPtr
, intptr_t>(imm
, dest
.registerIDNoInvalidate()); 
2503         dest
.setValue(imm
.asIntptr()); 
2506     void moveToCachedReg(TrustedImm64 imm
, CachedTempRegister
& dest
) 
2508         if (tryMoveUsingCacheRegisterContents
<64>(static_cast<intptr_t>(imm
.m_value
), dest
)) 
2511         moveInternal
<TrustedImm64
, int64_t>(imm
, dest
.registerIDNoInvalidate()); 
2512         dest
.setValue(imm
.m_value
); 
2515     template<int datasize
> 
2516     ALWAYS_INLINE 
bool tryLoadWithOffset(RegisterID rt
, RegisterID rn
, int32_t offset
) 
2518         if (ARM64Assembler::canEncodeSImmOffset(offset
)) { 
2519             loadUnscaledImmediate
<datasize
>(rt
, rn
, offset
); 
2522         if (ARM64Assembler::canEncodePImmOffset
<datasize
>(offset
)) { 
2523             loadUnsignedImmediate
<datasize
>(rt
, rn
, static_cast<unsigned>(offset
)); 
2529     template<int datasize
> 
2530     ALWAYS_INLINE 
bool tryLoadWithOffset(FPRegisterID rt
, RegisterID rn
, int32_t offset
) 
2532         if (ARM64Assembler::canEncodeSImmOffset(offset
)) { 
2533             m_assembler
.ldur
<datasize
>(rt
, rn
, offset
); 
2536         if (ARM64Assembler::canEncodePImmOffset
<datasize
>(offset
)) { 
2537             m_assembler
.ldr
<datasize
>(rt
, rn
, static_cast<unsigned>(offset
)); 
2543     template<int datasize
> 
2544     ALWAYS_INLINE 
bool tryStoreWithOffset(RegisterID rt
, RegisterID rn
, int32_t offset
) 
2546         if (ARM64Assembler::canEncodeSImmOffset(offset
)) { 
2547             storeUnscaledImmediate
<datasize
>(rt
, rn
, offset
); 
2550         if (ARM64Assembler::canEncodePImmOffset
<datasize
>(offset
)) { 
2551             storeUnsignedImmediate
<datasize
>(rt
, rn
, static_cast<unsigned>(offset
)); 
2557     template<int datasize
> 
2558     ALWAYS_INLINE 
bool tryStoreWithOffset(FPRegisterID rt
, RegisterID rn
, int32_t offset
) 
2560         if (ARM64Assembler::canEncodeSImmOffset(offset
)) { 
2561             m_assembler
.stur
<datasize
>(rt
, rn
, offset
); 
2564         if (ARM64Assembler::canEncodePImmOffset
<datasize
>(offset
)) { 
2565             m_assembler
.str
<datasize
>(rt
, rn
, static_cast<unsigned>(offset
)); 
2571     friend class LinkBuffer
; 
2572     friend class RepatchBuffer
; 
2574     static void linkCall(void* code
, Call call
, FunctionPtr function
) 
2576         if (call
.isFlagSet(Call::Near
)) 
2577             ARM64Assembler::linkCall(code
, call
.m_label
, function
.value()); 
2579             ARM64Assembler::linkPointer(code
, call
.m_label
.labelAtOffset(REPATCH_OFFSET_CALL_TO_POINTER
), function
.value()); 
2582     static void repatchCall(CodeLocationCall call
, CodeLocationLabel destination
) 
2584         ARM64Assembler::repatchPointer(call
.dataLabelPtrAtOffset(REPATCH_OFFSET_CALL_TO_POINTER
).dataLocation(), destination
.executableAddress()); 
2587     static void repatchCall(CodeLocationCall call
, FunctionPtr destination
) 
2589         ARM64Assembler::repatchPointer(call
.dataLabelPtrAtOffset(REPATCH_OFFSET_CALL_TO_POINTER
).dataLocation(), destination
.executableAddress()); 
2592     CachedTempRegister m_dataMemoryTempRegister
; 
2593     CachedTempRegister m_cachedMemoryTempRegister
; 
2594     bool m_makeJumpPatchable
; 
2597 // Extend the {load,store}{Unsigned,Unscaled}Immediate templated general register methods to cover all load/store sizes 
2599 ALWAYS_INLINE 
void MacroAssemblerARM64::loadUnsignedImmediate
<8>(RegisterID rt
, RegisterID rn
, unsigned pimm
) 
2601     m_assembler
.ldrb(rt
, rn
, pimm
); 
2605 ALWAYS_INLINE 
void MacroAssemblerARM64::loadUnsignedImmediate
<16>(RegisterID rt
, RegisterID rn
, unsigned pimm
) 
2607     m_assembler
.ldrh(rt
, rn
, pimm
); 
2611 ALWAYS_INLINE 
void MacroAssemblerARM64::loadUnscaledImmediate
<8>(RegisterID rt
, RegisterID rn
, int simm
) 
2613     m_assembler
.ldurb(rt
, rn
, simm
); 
2617 ALWAYS_INLINE 
void MacroAssemblerARM64::loadUnscaledImmediate
<16>(RegisterID rt
, RegisterID rn
, int simm
) 
2619     m_assembler
.ldurh(rt
, rn
, simm
); 
2623 ALWAYS_INLINE 
void MacroAssemblerARM64::storeUnsignedImmediate
<8>(RegisterID rt
, RegisterID rn
, unsigned pimm
) 
2625     m_assembler
.strb(rt
, rn
, pimm
); 
2629 ALWAYS_INLINE 
void MacroAssemblerARM64::storeUnsignedImmediate
<16>(RegisterID rt
, RegisterID rn
, unsigned pimm
) 
2631     m_assembler
.strh(rt
, rn
, pimm
); 
2635 ALWAYS_INLINE 
void MacroAssemblerARM64::storeUnscaledImmediate
<8>(RegisterID rt
, RegisterID rn
, int simm
) 
2637     m_assembler
.sturb(rt
, rn
, simm
); 
2641 ALWAYS_INLINE 
void MacroAssemblerARM64::storeUnscaledImmediate
<16>(RegisterID rt
, RegisterID rn
, int simm
) 
2643     m_assembler
.sturh(rt
, rn
, simm
); 
2648 #endif // ENABLE(ASSEMBLER) 
2650 #endif // MacroAssemblerARM64_h