2 * Copyright (C) 2011, 2013-2015 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 #ifndef AssemblyHelpers_h
27 #define AssemblyHelpers_h
31 #include "CodeBlock.h"
35 #include "MacroAssembler.h"
36 #include "TypeofType.h"
41 typedef void (*V_DebugOperation_EPP
)(ExecState
*, void*, void*);
43 class AssemblyHelpers
: public MacroAssembler
{
45 AssemblyHelpers(VM
* vm
, CodeBlock
* codeBlock
)
47 , m_codeBlock(codeBlock
)
48 , m_baselineCodeBlock(codeBlock
? codeBlock
->baselineAlternative() : 0)
51 ASSERT(m_baselineCodeBlock
);
52 ASSERT(!m_baselineCodeBlock
->alternative());
53 ASSERT(m_baselineCodeBlock
->jitType() == JITCode::None
|| JITCode::isBaselineCode(m_baselineCodeBlock
->jitType()));
57 CodeBlock
* codeBlock() { return m_codeBlock
; }
58 VM
* vm() { return m_vm
; }
59 AssemblerType_T
& assembler() { return m_assembler
; }
61 void checkStackPointerAlignment()
63 // This check is both unneeded and harder to write correctly for ARM64
64 #if !defined(NDEBUG) && !CPU(ARM64)
65 Jump stackPointerAligned
= branchTestPtr(Zero
, stackPointerRegister
, TrustedImm32(0xf));
66 abortWithReason(AHStackPointerMisaligned
);
67 stackPointerAligned
.link(this);
72 void storeCell(T cell
, Address address
)
75 store64(cell
, address
);
77 store32(cell
, address
.withOffset(PayloadOffset
));
78 store32(TrustedImm32(JSValue::CellTag
), address
.withOffset(TagOffset
));
82 void storeValue(JSValueRegs regs
, Address address
)
85 store64(regs
.gpr(), address
);
87 store32(regs
.payloadGPR(), address
.withOffset(PayloadOffset
));
88 store32(regs
.tagGPR(), address
.withOffset(TagOffset
));
92 void storeValue(JSValueRegs regs
, BaseIndex address
)
95 store64(regs
.gpr(), address
);
97 store32(regs
.payloadGPR(), address
.withOffset(PayloadOffset
));
98 store32(regs
.tagGPR(), address
.withOffset(TagOffset
));
102 void storeValue(JSValueRegs regs
, void* address
)
105 store64(regs
.gpr(), address
);
107 store32(regs
.payloadGPR(), bitwise_cast
<void*>(bitwise_cast
<uintptr_t>(address
) + PayloadOffset
));
108 store32(regs
.tagGPR(), bitwise_cast
<void*>(bitwise_cast
<uintptr_t>(address
) + TagOffset
));
112 void loadValue(Address address
, JSValueRegs regs
)
115 load64(address
, regs
.gpr());
117 if (address
.base
== regs
.payloadGPR()) {
118 load32(address
.withOffset(TagOffset
), regs
.tagGPR());
119 load32(address
.withOffset(PayloadOffset
), regs
.payloadGPR());
121 load32(address
.withOffset(PayloadOffset
), regs
.payloadGPR());
122 load32(address
.withOffset(TagOffset
), regs
.tagGPR());
127 void loadValue(BaseIndex address
, JSValueRegs regs
)
130 load64(address
, regs
.gpr());
132 if (address
.base
== regs
.payloadGPR() || address
.index
== regs
.payloadGPR()) {
133 // We actually could handle the case where the registers are aliased to both
134 // tag and payload, but we don't for now.
135 RELEASE_ASSERT(address
.base
!= regs
.tagGPR());
136 RELEASE_ASSERT(address
.index
!= regs
.tagGPR());
138 load32(address
.withOffset(TagOffset
), regs
.tagGPR());
139 load32(address
.withOffset(PayloadOffset
), regs
.payloadGPR());
141 load32(address
.withOffset(PayloadOffset
), regs
.payloadGPR());
142 load32(address
.withOffset(TagOffset
), regs
.tagGPR());
147 void moveTrustedValue(JSValue value
, JSValueRegs regs
)
150 move(TrustedImm64(JSValue::encode(value
)), regs
.gpr());
152 move(TrustedImm32(value
.tag()), regs
.tagGPR());
153 move(TrustedImm32(value
.payload()), regs
.payloadGPR());
157 void storeTrustedValue(JSValue value
, Address address
)
160 store64(TrustedImm64(JSValue::encode(value
)), address
);
162 store32(TrustedImm32(value
.tag()), address
.withOffset(TagOffset
));
163 store32(TrustedImm32(value
.payload()), address
.withOffset(PayloadOffset
));
167 void storeTrustedValue(JSValue value
, BaseIndex address
)
170 store64(TrustedImm64(JSValue::encode(value
)), address
);
172 store32(TrustedImm32(value
.tag()), address
.withOffset(TagOffset
));
173 store32(TrustedImm32(value
.payload()), address
.withOffset(PayloadOffset
));
177 #if CPU(X86_64) || CPU(X86)
178 static size_t prologueStackPointerDelta()
180 // Prologue only saves the framePointerRegister
181 return sizeof(void*);
184 void emitFunctionPrologue()
186 push(framePointerRegister
);
187 move(stackPointerRegister
, framePointerRegister
);
190 void emitFunctionEpilogue()
192 move(framePointerRegister
, stackPointerRegister
);
193 pop(framePointerRegister
);
196 void preserveReturnAddressAfterCall(GPRReg reg
)
201 void restoreReturnAddressBeforeReturn(GPRReg reg
)
206 void restoreReturnAddressBeforeReturn(Address address
)
210 #endif // CPU(X86_64) || CPU(X86)
212 #if CPU(ARM) || CPU(ARM64)
213 static size_t prologueStackPointerDelta()
215 // Prologue saves the framePointerRegister and linkRegister
216 return 2 * sizeof(void*);
219 void emitFunctionPrologue()
221 pushPair(framePointerRegister
, linkRegister
);
222 move(stackPointerRegister
, framePointerRegister
);
225 void emitFunctionEpilogue()
227 move(framePointerRegister
, stackPointerRegister
);
228 popPair(framePointerRegister
, linkRegister
);
231 ALWAYS_INLINE
void preserveReturnAddressAfterCall(RegisterID reg
)
233 move(linkRegister
, reg
);
236 ALWAYS_INLINE
void restoreReturnAddressBeforeReturn(RegisterID reg
)
238 move(reg
, linkRegister
);
241 ALWAYS_INLINE
void restoreReturnAddressBeforeReturn(Address address
)
243 loadPtr(address
, linkRegister
);
248 static size_t prologueStackPointerDelta()
250 // Prologue saves the framePointerRegister and returnAddressRegister
251 return 2 * sizeof(void*);
254 ALWAYS_INLINE
void preserveReturnAddressAfterCall(RegisterID reg
)
256 move(returnAddressRegister
, reg
);
259 ALWAYS_INLINE
void restoreReturnAddressBeforeReturn(RegisterID reg
)
261 move(reg
, returnAddressRegister
);
264 ALWAYS_INLINE
void restoreReturnAddressBeforeReturn(Address address
)
266 loadPtr(address
, returnAddressRegister
);
271 static size_t prologueStackPointerDelta()
273 // Prologue saves the framePointerRegister and link register
274 return 2 * sizeof(void*);
277 void emitFunctionPrologue()
280 push(framePointerRegister
);
281 move(stackPointerRegister
, framePointerRegister
);
284 void emitFunctionEpilogue()
286 move(framePointerRegister
, stackPointerRegister
);
287 pop(framePointerRegister
);
291 ALWAYS_INLINE
void preserveReturnAddressAfterCall(RegisterID reg
)
293 m_assembler
.stspr(reg
);
296 ALWAYS_INLINE
void restoreReturnAddressBeforeReturn(RegisterID reg
)
298 m_assembler
.ldspr(reg
);
301 ALWAYS_INLINE
void restoreReturnAddressBeforeReturn(Address address
)
303 loadPtrLinkReg(address
);
307 void emitGetFromCallFrameHeaderPtr(JSStack::CallFrameHeaderEntry entry
, GPRReg to
, GPRReg from
= GPRInfo::callFrameRegister
)
309 loadPtr(Address(from
, entry
* sizeof(Register
)), to
);
311 void emitGetFromCallFrameHeader32(JSStack::CallFrameHeaderEntry entry
, GPRReg to
, GPRReg from
= GPRInfo::callFrameRegister
)
313 load32(Address(from
, entry
* sizeof(Register
)), to
);
316 void emitGetFromCallFrameHeader64(JSStack::CallFrameHeaderEntry entry
, GPRReg to
, GPRReg from
= GPRInfo::callFrameRegister
)
318 load64(Address(from
, entry
* sizeof(Register
)), to
);
320 #endif // USE(JSVALUE64)
321 void emitPutToCallFrameHeader(GPRReg from
, JSStack::CallFrameHeaderEntry entry
)
323 storePtr(from
, Address(GPRInfo::callFrameRegister
, entry
* sizeof(Register
)));
326 void emitPutImmediateToCallFrameHeader(void* value
, JSStack::CallFrameHeaderEntry entry
)
328 storePtr(TrustedImmPtr(value
), Address(GPRInfo::callFrameRegister
, entry
* sizeof(Register
)));
331 void emitGetCallerFrameFromCallFrameHeaderPtr(RegisterID to
)
333 loadPtr(Address(GPRInfo::callFrameRegister
, CallFrame::callerFrameOffset()), to
);
335 void emitPutCallerFrameToCallFrameHeader(RegisterID from
)
337 storePtr(from
, Address(GPRInfo::callFrameRegister
, CallFrame::callerFrameOffset()));
340 void emitPutReturnPCToCallFrameHeader(RegisterID from
)
342 storePtr(from
, Address(GPRInfo::callFrameRegister
, CallFrame::returnPCOffset()));
344 void emitPutReturnPCToCallFrameHeader(TrustedImmPtr from
)
346 storePtr(from
, Address(GPRInfo::callFrameRegister
, CallFrame::returnPCOffset()));
349 // emitPutToCallFrameHeaderBeforePrologue() and related are used to access callee frame header
350 // fields before the code from emitFunctionPrologue() has executed.
351 // First, the access is via the stack pointer. Second, the address calculation must also take
352 // into account that the stack pointer may not have been adjusted down for the return PC and/or
353 // caller's frame pointer. On some platforms, the callee is responsible for pushing the
354 // "link register" containing the return address in the function prologue.
356 void emitPutToCallFrameHeaderBeforePrologue(GPRReg from
, JSStack::CallFrameHeaderEntry entry
)
358 storePtr(from
, Address(stackPointerRegister
, entry
* static_cast<ptrdiff_t>(sizeof(Register
)) - prologueStackPointerDelta()));
361 void emitPutPayloadToCallFrameHeaderBeforePrologue(GPRReg from
, JSStack::CallFrameHeaderEntry entry
)
363 storePtr(from
, Address(stackPointerRegister
, entry
* static_cast<ptrdiff_t>(sizeof(Register
)) - prologueStackPointerDelta() + OBJECT_OFFSETOF(EncodedValueDescriptor
, asBits
.payload
)));
366 void emitPutTagToCallFrameHeaderBeforePrologue(TrustedImm32 tag
, JSStack::CallFrameHeaderEntry entry
)
368 storePtr(tag
, Address(stackPointerRegister
, entry
* static_cast<ptrdiff_t>(sizeof(Register
)) - prologueStackPointerDelta() + OBJECT_OFFSETOF(EncodedValueDescriptor
, asBits
.tag
)));
372 JumpList
branchIfNotEqual(JSValueRegs regs
, JSValue value
)
375 return branch64(NotEqual
, regs
.gpr(), TrustedImm64(JSValue::encode(value
)));
378 result
.append(branch32(NotEqual
, regs
.tagGPR(), TrustedImm32(value
.tag())));
379 if (value
.isEmpty() || value
.isUndefinedOrNull())
380 return result
; // These don't have anything interesting in the payload.
381 result
.append(branch32(NotEqual
, regs
.payloadGPR(), TrustedImm32(value
.payload())));
386 Jump
branchIfEqual(JSValueRegs regs
, JSValue value
)
389 return branch64(Equal
, regs
.gpr(), TrustedImm64(JSValue::encode(value
)));
392 // These don't have anything interesting in the payload.
393 if (!value
.isEmpty() && !value
.isUndefinedOrNull())
394 notEqual
= branch32(NotEqual
, regs
.payloadGPR(), TrustedImm32(value
.payload()));
395 Jump result
= branch32(Equal
, regs
.tagGPR(), TrustedImm32(value
.tag()));
396 if (notEqual
.isSet())
402 Jump
branchIfNotCell(GPRReg reg
)
405 return branchTest64(MacroAssembler::NonZero
, reg
, GPRInfo::tagMaskRegister
);
407 return branch32(MacroAssembler::NotEqual
, reg
, TrustedImm32(JSValue::CellTag
));
410 Jump
branchIfNotCell(JSValueRegs regs
)
413 return branchIfNotCell(regs
.gpr());
415 return branchIfNotCell(regs
.tagGPR());
419 Jump
branchIfCell(GPRReg reg
)
422 return branchTest64(MacroAssembler::Zero
, reg
, GPRInfo::tagMaskRegister
);
424 return branch32(MacroAssembler::Equal
, reg
, TrustedImm32(JSValue::CellTag
));
427 Jump
branchIfCell(JSValueRegs regs
)
430 return branchIfCell(regs
.gpr());
432 return branchIfCell(regs
.tagGPR());
436 Jump
branchIfOther(JSValueRegs regs
, GPRReg tempGPR
)
439 move(regs
.gpr(), tempGPR
);
440 and64(TrustedImm32(~TagBitUndefined
), tempGPR
);
441 return branch64(Equal
, tempGPR
, TrustedImm64(ValueNull
));
443 or32(TrustedImm32(1), regs
.tagGPR(), tempGPR
);
444 return branch32(Equal
, tempGPR
, TrustedImm32(JSValue::NullTag
));
448 Jump
branchIfNotOther(JSValueRegs regs
, GPRReg tempGPR
)
451 move(regs
.gpr(), tempGPR
);
452 and64(TrustedImm32(~TagBitUndefined
), tempGPR
);
453 return branch64(NotEqual
, tempGPR
, TrustedImm64(ValueNull
));
455 or32(TrustedImm32(1), regs
.tagGPR(), tempGPR
);
456 return branch32(NotEqual
, tempGPR
, TrustedImm32(JSValue::NullTag
));
460 Jump
branchIfInt32(JSValueRegs regs
)
463 return branch64(AboveOrEqual
, regs
.gpr(), GPRInfo::tagTypeNumberRegister
);
465 return branch32(Equal
, regs
.tagGPR(), TrustedImm32(JSValue::Int32Tag
));
469 Jump
branchIfNotInt32(JSValueRegs regs
)
472 return branch64(Below
, regs
.gpr(), GPRInfo::tagTypeNumberRegister
);
474 return branch32(NotEqual
, regs
.tagGPR(), TrustedImm32(JSValue::Int32Tag
));
478 // Note that the tempGPR is not used in 64-bit mode.
479 Jump
branchIfNumber(JSValueRegs regs
, GPRReg tempGPR
)
482 UNUSED_PARAM(tempGPR
);
483 return branchTest64(NonZero
, regs
.gpr(), GPRInfo::tagTypeNumberRegister
);
485 add32(TrustedImm32(1), regs
.tagGPR(), tempGPR
);
486 return branch32(Below
, tempGPR
, TrustedImm32(JSValue::LowestTag
+ 1));
490 // Note that the tempGPR is not used in 64-bit mode.
491 Jump
branchIfNotNumber(JSValueRegs regs
, GPRReg tempGPR
)
494 UNUSED_PARAM(tempGPR
);
495 return branchTest64(Zero
, regs
.gpr(), GPRInfo::tagTypeNumberRegister
);
497 add32(TrustedImm32(1), regs
.tagGPR(), tempGPR
);
498 return branch32(AboveOrEqual
, tempGPR
, TrustedImm32(JSValue::LowestTag
+ 1));
502 // Note that the tempGPR is not used in 32-bit mode.
503 Jump
branchIfBoolean(JSValueRegs regs
, GPRReg tempGPR
)
506 move(regs
.gpr(), tempGPR
);
507 xor64(TrustedImm32(static_cast<int32_t>(ValueFalse
)), tempGPR
);
508 return branchTest64(Zero
, tempGPR
, TrustedImm32(static_cast<int32_t>(~1)));
510 UNUSED_PARAM(tempGPR
);
511 return branch32(Equal
, regs
.tagGPR(), TrustedImm32(JSValue::BooleanTag
));
515 // Note that the tempGPR is not used in 32-bit mode.
516 Jump
branchIfNotBoolean(JSValueRegs regs
, GPRReg tempGPR
)
519 move(regs
.gpr(), tempGPR
);
520 xor64(TrustedImm32(static_cast<int32_t>(ValueFalse
)), tempGPR
);
521 return branchTest64(NonZero
, tempGPR
, TrustedImm32(static_cast<int32_t>(~1)));
523 UNUSED_PARAM(tempGPR
);
524 return branch32(NotEqual
, regs
.tagGPR(), TrustedImm32(JSValue::BooleanTag
));
528 Jump
branchIfObject(GPRReg cellGPR
)
531 AboveOrEqual
, Address(cellGPR
, JSCell::typeInfoTypeOffset()), TrustedImm32(ObjectType
));
534 Jump
branchIfNotObject(GPRReg cellGPR
)
537 Below
, Address(cellGPR
, JSCell::typeInfoTypeOffset()), TrustedImm32(ObjectType
));
540 Jump
branchIfType(GPRReg cellGPR
, JSType type
)
542 return branch8(Equal
, Address(cellGPR
, JSCell::typeInfoTypeOffset()), TrustedImm32(type
));
545 Jump
branchIfNotType(GPRReg cellGPR
, JSType type
)
547 return branch8(NotEqual
, Address(cellGPR
, JSCell::typeInfoTypeOffset()), TrustedImm32(type
));
550 Jump
branchIfString(GPRReg cellGPR
) { return branchIfType(cellGPR
, StringType
); }
551 Jump
branchIfNotString(GPRReg cellGPR
) { return branchIfNotType(cellGPR
, StringType
); }
552 Jump
branchIfSymbol(GPRReg cellGPR
) { return branchIfType(cellGPR
, SymbolType
); }
553 Jump
branchIfNotSymbol(GPRReg cellGPR
) { return branchIfNotType(cellGPR
, SymbolType
); }
554 Jump
branchIfFunction(GPRReg cellGPR
) { return branchIfType(cellGPR
, JSFunctionType
); }
555 Jump
branchIfNotFunction(GPRReg cellGPR
) { return branchIfNotType(cellGPR
, JSFunctionType
); }
557 Jump
branchIfEmpty(JSValueRegs regs
)
560 return branchTest64(Zero
, regs
.gpr());
562 return branch32(Equal
, regs
.tagGPR(), TrustedImm32(JSValue::EmptyValueTag
));
566 static Address
addressForByteOffset(ptrdiff_t byteOffset
)
568 return Address(GPRInfo::callFrameRegister
, byteOffset
);
570 static Address
addressFor(VirtualRegister virtualRegister
, GPRReg baseReg
)
572 ASSERT(virtualRegister
.isValid());
573 return Address(baseReg
, virtualRegister
.offset() * sizeof(Register
));
575 static Address
addressFor(VirtualRegister virtualRegister
)
577 // NB. It's tempting on some architectures to sometimes use an offset from the stack
578 // register because for some offsets that will encode to a smaller instruction. But we
579 // cannot do this. We use this in places where the stack pointer has been moved to some
580 // unpredictable location.
581 ASSERT(virtualRegister
.isValid());
582 return Address(GPRInfo::callFrameRegister
, virtualRegister
.offset() * sizeof(Register
));
584 static Address
addressFor(int operand
)
586 return addressFor(static_cast<VirtualRegister
>(operand
));
589 static Address
tagFor(VirtualRegister virtualRegister
)
591 ASSERT(virtualRegister
.isValid());
592 return Address(GPRInfo::callFrameRegister
, virtualRegister
.offset() * sizeof(Register
) + TagOffset
);
594 static Address
tagFor(int operand
)
596 return tagFor(static_cast<VirtualRegister
>(operand
));
599 static Address
payloadFor(VirtualRegister virtualRegister
)
601 ASSERT(virtualRegister
.isValid());
602 return Address(GPRInfo::callFrameRegister
, virtualRegister
.offset() * sizeof(Register
) + PayloadOffset
);
604 static Address
payloadFor(int operand
)
606 return payloadFor(static_cast<VirtualRegister
>(operand
));
609 // Access to our fixed callee CallFrame.
610 static Address
calleeFrameSlot(int slot
)
612 ASSERT(slot
>= JSStack::CallerFrameAndPCSize
);
613 return Address(stackPointerRegister
, sizeof(Register
) * (slot
- JSStack::CallerFrameAndPCSize
));
616 // Access to our fixed callee CallFrame.
617 static Address
calleeArgumentSlot(int argument
)
619 return calleeFrameSlot(virtualRegisterForArgument(argument
).offset());
622 static Address
calleeFrameTagSlot(int slot
)
624 return calleeFrameSlot(slot
).withOffset(TagOffset
);
627 static Address
calleeFramePayloadSlot(int slot
)
629 return calleeFrameSlot(slot
).withOffset(PayloadOffset
);
632 static Address
calleeArgumentTagSlot(int argument
)
634 return calleeArgumentSlot(argument
).withOffset(TagOffset
);
637 static Address
calleeArgumentPayloadSlot(int argument
)
639 return calleeArgumentSlot(argument
).withOffset(PayloadOffset
);
642 static Address
calleeFrameCallerFrame()
644 return calleeFrameSlot(0).withOffset(CallFrame::callerFrameOffset());
647 static GPRReg
selectScratchGPR(GPRReg preserve1
= InvalidGPRReg
, GPRReg preserve2
= InvalidGPRReg
, GPRReg preserve3
= InvalidGPRReg
, GPRReg preserve4
= InvalidGPRReg
, GPRReg preserve5
= InvalidGPRReg
)
649 if (preserve1
!= GPRInfo::regT0
&& preserve2
!= GPRInfo::regT0
&& preserve3
!= GPRInfo::regT0
&& preserve4
!= GPRInfo::regT0
&& preserve5
!= GPRInfo::regT0
)
650 return GPRInfo::regT0
;
652 if (preserve1
!= GPRInfo::regT1
&& preserve2
!= GPRInfo::regT1
&& preserve3
!= GPRInfo::regT1
&& preserve4
!= GPRInfo::regT1
&& preserve5
!= GPRInfo::regT1
)
653 return GPRInfo::regT1
;
655 if (preserve1
!= GPRInfo::regT2
&& preserve2
!= GPRInfo::regT2
&& preserve3
!= GPRInfo::regT2
&& preserve4
!= GPRInfo::regT2
&& preserve5
!= GPRInfo::regT2
)
656 return GPRInfo::regT2
;
658 if (preserve1
!= GPRInfo::regT3
&& preserve2
!= GPRInfo::regT3
&& preserve3
!= GPRInfo::regT3
&& preserve4
!= GPRInfo::regT3
&& preserve5
!= GPRInfo::regT3
)
659 return GPRInfo::regT3
;
661 if (preserve1
!= GPRInfo::regT4
&& preserve2
!= GPRInfo::regT4
&& preserve3
!= GPRInfo::regT4
&& preserve4
!= GPRInfo::regT4
&& preserve5
!= GPRInfo::regT4
)
662 return GPRInfo::regT4
;
664 return GPRInfo::regT5
;
667 // Add a debug call. This call has no effect on JIT code execution state.
668 void debugCall(V_DebugOperation_EPP function
, void* argument
)
670 size_t scratchSize
= sizeof(EncodedJSValue
) * (GPRInfo::numberOfRegisters
+ FPRInfo::numberOfRegisters
);
671 ScratchBuffer
* scratchBuffer
= m_vm
->scratchBufferForSize(scratchSize
);
672 EncodedJSValue
* buffer
= static_cast<EncodedJSValue
*>(scratchBuffer
->dataBuffer());
674 for (unsigned i
= 0; i
< GPRInfo::numberOfRegisters
; ++i
) {
676 store64(GPRInfo::toRegister(i
), buffer
+ i
);
678 store32(GPRInfo::toRegister(i
), buffer
+ i
);
682 for (unsigned i
= 0; i
< FPRInfo::numberOfRegisters
; ++i
) {
683 move(TrustedImmPtr(buffer
+ GPRInfo::numberOfRegisters
+ i
), GPRInfo::regT0
);
684 storeDouble(FPRInfo::toRegister(i
), GPRInfo::regT0
);
687 // Tell GC mark phase how much of the scratch buffer is active during call.
688 move(TrustedImmPtr(scratchBuffer
->activeLengthPtr()), GPRInfo::regT0
);
689 storePtr(TrustedImmPtr(scratchSize
), GPRInfo::regT0
);
691 #if CPU(X86_64) || CPU(ARM) || CPU(ARM64) || CPU(MIPS) || CPU(SH4)
692 move(TrustedImmPtr(buffer
), GPRInfo::argumentGPR2
);
693 move(TrustedImmPtr(argument
), GPRInfo::argumentGPR1
);
694 move(GPRInfo::callFrameRegister
, GPRInfo::argumentGPR0
);
695 GPRReg scratch
= selectScratchGPR(GPRInfo::argumentGPR0
, GPRInfo::argumentGPR1
, GPRInfo::argumentGPR2
);
697 poke(GPRInfo::callFrameRegister
, 0);
698 poke(TrustedImmPtr(argument
), 1);
699 poke(TrustedImmPtr(buffer
), 2);
700 GPRReg scratch
= GPRInfo::regT0
;
702 #error "JIT not supported on this platform."
704 move(TrustedImmPtr(reinterpret_cast<void*>(function
)), scratch
);
707 move(TrustedImmPtr(scratchBuffer
->activeLengthPtr()), GPRInfo::regT0
);
708 storePtr(TrustedImmPtr(0), GPRInfo::regT0
);
710 for (unsigned i
= 0; i
< FPRInfo::numberOfRegisters
; ++i
) {
711 move(TrustedImmPtr(buffer
+ GPRInfo::numberOfRegisters
+ i
), GPRInfo::regT0
);
712 loadDouble(GPRInfo::regT0
, FPRInfo::toRegister(i
));
714 for (unsigned i
= 0; i
< GPRInfo::numberOfRegisters
; ++i
) {
716 load64(buffer
+ i
, GPRInfo::toRegister(i
));
718 load32(buffer
+ i
, GPRInfo::toRegister(i
));
723 // These methods JIT generate dynamic, debug-only checks - akin to ASSERTs.
725 void jitAssertIsInt32(GPRReg
);
726 void jitAssertIsJSInt32(GPRReg
);
727 void jitAssertIsJSNumber(GPRReg
);
728 void jitAssertIsJSDouble(GPRReg
);
729 void jitAssertIsCell(GPRReg
);
730 void jitAssertHasValidCallFrame();
731 void jitAssertIsNull(GPRReg
);
732 void jitAssertTagsInPlace();
733 void jitAssertArgumentCountSane();
735 void jitAssertIsInt32(GPRReg
) { }
736 void jitAssertIsJSInt32(GPRReg
) { }
737 void jitAssertIsJSNumber(GPRReg
) { }
738 void jitAssertIsJSDouble(GPRReg
) { }
739 void jitAssertIsCell(GPRReg
) { }
740 void jitAssertHasValidCallFrame() { }
741 void jitAssertIsNull(GPRReg
) { }
742 void jitAssertTagsInPlace() { }
743 void jitAssertArgumentCountSane() { }
746 void purifyNaN(FPRReg
);
748 // These methods convert between doubles, and doubles boxed and JSValues.
750 GPRReg
boxDouble(FPRReg fpr
, GPRReg gpr
)
752 moveDoubleTo64(fpr
, gpr
);
753 sub64(GPRInfo::tagTypeNumberRegister
, gpr
);
754 jitAssertIsJSDouble(gpr
);
757 FPRReg
unboxDoubleWithoutAssertions(GPRReg gpr
, FPRReg fpr
)
759 add64(GPRInfo::tagTypeNumberRegister
, gpr
);
760 move64ToDouble(gpr
, fpr
);
763 FPRReg
unboxDouble(GPRReg gpr
, FPRReg fpr
)
765 jitAssertIsJSDouble(gpr
);
766 return unboxDoubleWithoutAssertions(gpr
, fpr
);
769 void boxDouble(FPRReg fpr
, JSValueRegs regs
)
771 boxDouble(fpr
, regs
.gpr());
774 // Here are possible arrangements of source, target, scratch:
775 // - source, target, scratch can all be separate registers.
776 // - source and target can be the same but scratch is separate.
777 // - target and scratch can be the same but source is separate.
778 void boxInt52(GPRReg source
, GPRReg target
, GPRReg scratch
, FPRReg fpScratch
)
781 signExtend32ToPtr(source
, scratch
);
782 Jump isInt32
= branch64(Equal
, source
, scratch
);
784 // Nope, it's not, but regT0 contains the int64 value.
785 convertInt64ToDouble(source
, fpScratch
);
786 boxDouble(fpScratch
, target
);
790 zeroExtend32ToPtr(source
, target
);
791 or64(GPRInfo::tagTypeNumberRegister
, target
);
797 #if USE(JSVALUE32_64)
798 void boxDouble(FPRReg fpr
, GPRReg tagGPR
, GPRReg payloadGPR
)
800 moveDoubleToInts(fpr
, payloadGPR
, tagGPR
);
802 void unboxDouble(GPRReg tagGPR
, GPRReg payloadGPR
, FPRReg fpr
, FPRReg scratchFPR
)
804 moveIntsToDouble(payloadGPR
, tagGPR
, fpr
, scratchFPR
);
807 void boxDouble(FPRReg fpr
, JSValueRegs regs
)
809 boxDouble(fpr
, regs
.tagGPR(), regs
.payloadGPR());
813 void boxBooleanPayload(GPRReg boolGPR
, GPRReg payloadGPR
)
816 add32(TrustedImm32(ValueFalse
), boolGPR
, payloadGPR
);
818 move(boolGPR
, payloadGPR
);
822 void boxBoolean(GPRReg boolGPR
, JSValueRegs boxedRegs
)
824 boxBooleanPayload(boolGPR
, boxedRegs
.payloadGPR());
825 #if USE(JSVALUE32_64)
826 move(TrustedImm32(JSValue::BooleanTag
), boxedRegs
.tagGPR());
830 void callExceptionFuzz();
832 enum ExceptionCheckKind
{ NormalExceptionCheck
, InvertedExceptionCheck
};
833 enum ExceptionJumpWidth
{ NormalJumpWidth
, FarJumpWidth
};
834 Jump
emitExceptionCheck(
835 ExceptionCheckKind
= NormalExceptionCheck
, ExceptionJumpWidth
= NormalJumpWidth
);
837 #if ENABLE(SAMPLING_COUNTERS)
838 static void emitCount(MacroAssembler
& jit
, AbstractSamplingCounter
& counter
, int32_t increment
= 1)
840 jit
.add64(TrustedImm32(increment
), AbsoluteAddress(counter
.addressOfCounter()));
842 void emitCount(AbstractSamplingCounter
& counter
, int32_t increment
= 1)
844 add64(TrustedImm32(increment
), AbsoluteAddress(counter
.addressOfCounter()));
848 #if ENABLE(SAMPLING_FLAGS)
849 void setSamplingFlag(int32_t);
850 void clearSamplingFlag(int32_t flag
);
853 JSGlobalObject
* globalObjectFor(CodeOrigin codeOrigin
)
855 return codeBlock()->globalObjectFor(codeOrigin
);
858 bool isStrictModeFor(CodeOrigin codeOrigin
)
860 if (!codeOrigin
.inlineCallFrame
)
861 return codeBlock()->isStrictMode();
862 return jsCast
<FunctionExecutable
*>(codeOrigin
.inlineCallFrame
->executable
.get())->isStrictMode();
865 ECMAMode
ecmaModeFor(CodeOrigin codeOrigin
)
867 return isStrictModeFor(codeOrigin
) ? StrictMode
: NotStrictMode
;
870 ExecutableBase
* executableFor(const CodeOrigin
& codeOrigin
);
872 CodeBlock
* baselineCodeBlockFor(const CodeOrigin
& codeOrigin
)
874 return baselineCodeBlockForOriginAndBaselineCodeBlock(codeOrigin
, baselineCodeBlock());
877 CodeBlock
* baselineCodeBlockFor(InlineCallFrame
* inlineCallFrame
)
879 if (!inlineCallFrame
)
880 return baselineCodeBlock();
881 return baselineCodeBlockForInlineCallFrame(inlineCallFrame
);
884 CodeBlock
* baselineCodeBlock()
886 return m_baselineCodeBlock
;
889 SymbolTable
* symbolTableFor(const CodeOrigin
& codeOrigin
)
891 return baselineCodeBlockFor(codeOrigin
)->symbolTable();
894 static VirtualRegister
argumentsStart(InlineCallFrame
* inlineCallFrame
)
896 if (!inlineCallFrame
)
897 return VirtualRegister(CallFrame::argumentOffset(0));
898 if (inlineCallFrame
->arguments
.size() <= 1)
899 return virtualRegisterForLocal(0);
900 ValueRecovery recovery
= inlineCallFrame
->arguments
[1];
901 RELEASE_ASSERT(recovery
.technique() == DisplacedInJSStack
);
902 return recovery
.virtualRegister();
905 static VirtualRegister
argumentsStart(const CodeOrigin
& codeOrigin
)
907 return argumentsStart(codeOrigin
.inlineCallFrame
);
910 void emitLoadStructure(RegisterID source
, RegisterID dest
, RegisterID scratch
)
913 load32(MacroAssembler::Address(source
, JSCell::structureIDOffset()), dest
);
914 loadPtr(vm()->heap
.structureIDTable().base(), scratch
);
915 loadPtr(MacroAssembler::BaseIndex(scratch
, dest
, MacroAssembler::TimesEight
), dest
);
917 UNUSED_PARAM(scratch
);
918 loadPtr(MacroAssembler::Address(source
, JSCell::structureIDOffset()), dest
);
922 static void emitLoadStructure(AssemblyHelpers
& jit
, RegisterID base
, RegisterID dest
, RegisterID scratch
)
925 jit
.load32(MacroAssembler::Address(base
, JSCell::structureIDOffset()), dest
);
926 jit
.loadPtr(jit
.vm()->heap
.structureIDTable().base(), scratch
);
927 jit
.loadPtr(MacroAssembler::BaseIndex(scratch
, dest
, MacroAssembler::TimesEight
), dest
);
929 UNUSED_PARAM(scratch
);
930 jit
.loadPtr(MacroAssembler::Address(base
, JSCell::structureIDOffset()), dest
);
934 void emitStoreStructureWithTypeInfo(TrustedImmPtr structure
, RegisterID dest
, RegisterID
)
936 emitStoreStructureWithTypeInfo(*this, structure
, dest
);
939 void emitStoreStructureWithTypeInfo(RegisterID structure
, RegisterID dest
, RegisterID scratch
)
942 load64(MacroAssembler::Address(structure
, Structure::structureIDOffset()), scratch
);
943 store64(scratch
, MacroAssembler::Address(dest
, JSCell::structureIDOffset()));
945 // Store all the info flags using a single 32-bit wide load and store.
946 load32(MacroAssembler::Address(structure
, Structure::indexingTypeOffset()), scratch
);
947 store32(scratch
, MacroAssembler::Address(dest
, JSCell::indexingTypeOffset()));
949 // Store the StructureID
950 storePtr(structure
, MacroAssembler::Address(dest
, JSCell::structureIDOffset()));
954 static void emitStoreStructureWithTypeInfo(AssemblyHelpers
& jit
, TrustedImmPtr structure
, RegisterID dest
);
956 Jump
jumpIfIsRememberedOrInEden(GPRReg cell
)
958 return branchTest8(MacroAssembler::NonZero
, MacroAssembler::Address(cell
, JSCell::gcDataOffset()));
961 Jump
jumpIfIsRememberedOrInEden(JSCell
* cell
)
963 uint8_t* address
= reinterpret_cast<uint8_t*>(cell
) + JSCell::gcDataOffset();
964 return branchTest8(MacroAssembler::NonZero
, MacroAssembler::AbsoluteAddress(address
));
967 // Emits the branch structure for typeof. The code emitted by this doesn't fall through. The
968 // functor is called at those points where we have pinpointed a type. One way to use this is to
969 // have the functor emit the code to put the type string into an appropriate register and then
970 // jump out. A secondary functor is used for the call trap and masquerades-as-undefined slow
971 // case. It is passed the unlinked jump to the slow case.
972 template<typename Functor
, typename SlowPathFunctor
>
974 JSValueRegs regs
, GPRReg tempGPR
, const Functor
& functor
,
975 const SlowPathFunctor
& slowPathFunctor
)
977 // Implements the following branching structure:
981 // if (is function) {
983 // } else if (doesn't have call trap and doesn't masquerade as undefined) {
986 // return slowPath();
988 // } else if (is string) {
993 // } else if (is number) {
995 // } else if (is null) {
997 // } else if (is boolean) {
1003 Jump notCell
= branchIfNotCell(regs
);
1005 GPRReg cellGPR
= regs
.payloadGPR();
1006 Jump notObject
= branchIfNotObject(cellGPR
);
1008 Jump notFunction
= branchIfNotFunction(cellGPR
);
1009 functor(TypeofType::Function
, false);
1011 notFunction
.link(this);
1015 Address(cellGPR
, JSCell::typeInfoFlagsOffset()),
1016 TrustedImm32(MasqueradesAsUndefined
| TypeOfShouldCallGetCallData
)));
1017 functor(TypeofType::Object
, false);
1019 notObject
.link(this);
1021 Jump notString
= branchIfNotString(cellGPR
);
1022 functor(TypeofType::String
, false);
1023 notString
.link(this);
1024 functor(TypeofType::Symbol
, false);
1028 Jump notNumber
= branchIfNotNumber(regs
, tempGPR
);
1029 functor(TypeofType::Number
, false);
1030 notNumber
.link(this);
1032 JumpList notNull
= branchIfNotEqual(regs
, jsNull());
1033 functor(TypeofType::Object
, false);
1036 Jump notBoolean
= branchIfNotBoolean(regs
, tempGPR
);
1037 functor(TypeofType::Boolean
, false);
1038 notBoolean
.link(this);
1040 functor(TypeofType::Undefined
, true);
1043 Vector
<BytecodeAndMachineOffset
>& decodedCodeMapFor(CodeBlock
*);
1047 CodeBlock
* m_codeBlock
;
1048 CodeBlock
* m_baselineCodeBlock
;
1050 HashMap
<CodeBlock
*, Vector
<BytecodeAndMachineOffset
>> m_decodedCodeMaps
;
1055 #endif // ENABLE(JIT)
1057 #endif // AssemblyHelpers_h