2 * Copyright (C) 2011, 2013 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 #ifndef DFGAssemblyHelpers_h
27 #define DFGAssemblyHelpers_h
29 #include <wtf/Platform.h>
33 #include "CodeBlock.h"
34 #include "DFGFPRInfo.h"
35 #include "DFGGPRInfo.h"
38 #include "MacroAssembler.h"
40 namespace JSC
{ namespace DFG
{
42 typedef void (*V_DFGDebugOperation_EPP
)(ExecState
*, void*, void*);
44 class AssemblyHelpers
: public MacroAssembler
{
46 AssemblyHelpers(VM
* vm
, CodeBlock
* codeBlock
)
48 , m_codeBlock(codeBlock
)
49 , m_baselineCodeBlock(codeBlock
? codeBlock
->baselineVersion() : 0)
52 ASSERT(m_baselineCodeBlock
);
53 ASSERT(!m_baselineCodeBlock
->alternative());
54 ASSERT(m_baselineCodeBlock
->getJITType() == JITCode::BaselineJIT
);
58 CodeBlock
* codeBlock() { return m_codeBlock
; }
59 VM
* vm() { return m_vm
; }
60 AssemblerType_T
& assembler() { return m_assembler
; }
62 #if CPU(X86_64) || CPU(X86)
63 void preserveReturnAddressAfterCall(GPRReg reg
)
68 void restoreReturnAddressBeforeReturn(GPRReg reg
)
73 void restoreReturnAddressBeforeReturn(Address address
)
77 #endif // CPU(X86_64) || CPU(X86)
79 #if CPU(ARM) || CPU(ARM64)
80 ALWAYS_INLINE
void preserveReturnAddressAfterCall(RegisterID reg
)
82 move(linkRegister
, reg
);
85 ALWAYS_INLINE
void restoreReturnAddressBeforeReturn(RegisterID reg
)
87 move(reg
, linkRegister
);
90 ALWAYS_INLINE
void restoreReturnAddressBeforeReturn(Address address
)
92 loadPtr(address
, linkRegister
);
97 ALWAYS_INLINE
void preserveReturnAddressAfterCall(RegisterID reg
)
99 move(returnAddressRegister
, reg
);
102 ALWAYS_INLINE
void restoreReturnAddressBeforeReturn(RegisterID reg
)
104 move(reg
, returnAddressRegister
);
107 ALWAYS_INLINE
void restoreReturnAddressBeforeReturn(Address address
)
109 loadPtr(address
, returnAddressRegister
);
113 void emitGetFromCallFrameHeaderPtr(JSStack::CallFrameHeaderEntry entry
, GPRReg to
)
115 loadPtr(Address(GPRInfo::callFrameRegister
, entry
* sizeof(Register
)), to
);
117 void emitPutToCallFrameHeader(GPRReg from
, JSStack::CallFrameHeaderEntry entry
)
120 store64(from
, Address(GPRInfo::callFrameRegister
, entry
* sizeof(Register
)));
122 store32(from
, Address(GPRInfo::callFrameRegister
, entry
* sizeof(Register
)));
126 void emitPutImmediateToCallFrameHeader(void* value
, JSStack::CallFrameHeaderEntry entry
)
128 storePtr(TrustedImmPtr(value
), Address(GPRInfo::callFrameRegister
, entry
* sizeof(Register
)));
131 Jump
branchIfNotCell(GPRReg reg
)
134 return branchTest64(MacroAssembler::NonZero
, reg
, GPRInfo::tagMaskRegister
);
136 return branch32(MacroAssembler::NotEqual
, reg
, TrustedImm32(JSValue::CellTag
));
140 static Address
addressFor(VirtualRegister virtualRegister
)
142 return Address(GPRInfo::callFrameRegister
, virtualRegister
* sizeof(Register
));
144 static Address
addressFor(int operand
)
146 return addressFor(static_cast<VirtualRegister
>(operand
));
149 static Address
tagFor(VirtualRegister virtualRegister
)
151 return Address(GPRInfo::callFrameRegister
, virtualRegister
* sizeof(Register
) + OBJECT_OFFSETOF(EncodedValueDescriptor
, asBits
.tag
));
153 static Address
tagFor(int operand
)
155 return tagFor(static_cast<VirtualRegister
>(operand
));
158 static Address
payloadFor(VirtualRegister virtualRegister
)
160 return Address(GPRInfo::callFrameRegister
, virtualRegister
* sizeof(Register
) + OBJECT_OFFSETOF(EncodedValueDescriptor
, asBits
.payload
));
162 static Address
payloadFor(int operand
)
164 return payloadFor(static_cast<VirtualRegister
>(operand
));
167 Jump
branchIfNotObject(GPRReg structureReg
)
169 return branch8(Below
, Address(structureReg
, Structure::typeInfoTypeOffset()), TrustedImm32(ObjectType
));
172 static GPRReg
selectScratchGPR(GPRReg preserve1
= InvalidGPRReg
, GPRReg preserve2
= InvalidGPRReg
, GPRReg preserve3
= InvalidGPRReg
, GPRReg preserve4
= InvalidGPRReg
)
174 if (preserve1
!= GPRInfo::regT0
&& preserve2
!= GPRInfo::regT0
&& preserve3
!= GPRInfo::regT0
&& preserve4
!= GPRInfo::regT0
)
175 return GPRInfo::regT0
;
177 if (preserve1
!= GPRInfo::regT1
&& preserve2
!= GPRInfo::regT1
&& preserve3
!= GPRInfo::regT1
&& preserve4
!= GPRInfo::regT1
)
178 return GPRInfo::regT1
;
180 if (preserve1
!= GPRInfo::regT2
&& preserve2
!= GPRInfo::regT2
&& preserve3
!= GPRInfo::regT2
&& preserve4
!= GPRInfo::regT2
)
181 return GPRInfo::regT2
;
183 if (preserve1
!= GPRInfo::regT3
&& preserve2
!= GPRInfo::regT3
&& preserve3
!= GPRInfo::regT3
&& preserve4
!= GPRInfo::regT3
)
184 return GPRInfo::regT3
;
186 return GPRInfo::regT4
;
189 // Add a debug call. This call has no effect on JIT code execution state.
190 void debugCall(V_DFGDebugOperation_EPP function
, void* argument
)
192 size_t scratchSize
= sizeof(EncodedJSValue
) * (GPRInfo::numberOfRegisters
+ FPRInfo::numberOfRegisters
);
193 ScratchBuffer
* scratchBuffer
= m_vm
->scratchBufferForSize(scratchSize
);
194 EncodedJSValue
* buffer
= static_cast<EncodedJSValue
*>(scratchBuffer
->dataBuffer());
196 for (unsigned i
= 0; i
< GPRInfo::numberOfRegisters
; ++i
) {
198 store64(GPRInfo::toRegister(i
), buffer
+ i
);
200 store32(GPRInfo::toRegister(i
), buffer
+ i
);
204 for (unsigned i
= 0; i
< FPRInfo::numberOfRegisters
; ++i
) {
205 move(TrustedImmPtr(buffer
+ GPRInfo::numberOfRegisters
+ i
), GPRInfo::regT0
);
206 storeDouble(FPRInfo::toRegister(i
), GPRInfo::regT0
);
209 // Tell GC mark phase how much of the scratch buffer is active during call.
210 move(TrustedImmPtr(scratchBuffer
->activeLengthPtr()), GPRInfo::regT0
);
211 storePtr(TrustedImmPtr(scratchSize
), GPRInfo::regT0
);
213 #if CPU(X86_64) || CPU(ARM) || CPU(ARM64) || CPU(MIPS)
214 move(TrustedImmPtr(buffer
), GPRInfo::argumentGPR2
);
215 move(TrustedImmPtr(argument
), GPRInfo::argumentGPR1
);
216 move(GPRInfo::callFrameRegister
, GPRInfo::argumentGPR0
);
217 GPRReg scratch
= selectScratchGPR(GPRInfo::argumentGPR0
, GPRInfo::argumentGPR1
);
219 poke(GPRInfo::callFrameRegister
, 0);
220 poke(TrustedImmPtr(argument
), 1);
221 poke(TrustedImmPtr(buffer
), 2);
222 GPRReg scratch
= GPRInfo::regT0
;
224 #error "DFG JIT not supported on this platform."
226 move(TrustedImmPtr(reinterpret_cast<void*>(function
)), scratch
);
229 move(TrustedImmPtr(scratchBuffer
->activeLengthPtr()), GPRInfo::regT0
);
230 storePtr(TrustedImmPtr(0), GPRInfo::regT0
);
232 for (unsigned i
= 0; i
< FPRInfo::numberOfRegisters
; ++i
) {
233 move(TrustedImmPtr(buffer
+ GPRInfo::numberOfRegisters
+ i
), GPRInfo::regT0
);
234 loadDouble(GPRInfo::regT0
, FPRInfo::toRegister(i
));
236 for (unsigned i
= 0; i
< GPRInfo::numberOfRegisters
; ++i
) {
238 load64(buffer
+ i
, GPRInfo::toRegister(i
));
240 load32(buffer
+ i
, GPRInfo::toRegister(i
));
245 // These methods JIT generate dynamic, debug-only checks - akin to ASSERTs.
246 #if DFG_ENABLE(JIT_ASSERT)
247 void jitAssertIsInt32(GPRReg
);
248 void jitAssertIsJSInt32(GPRReg
);
249 void jitAssertIsJSNumber(GPRReg
);
250 void jitAssertIsJSDouble(GPRReg
);
251 void jitAssertIsCell(GPRReg
);
252 void jitAssertHasValidCallFrame();
254 void jitAssertIsInt32(GPRReg
) { }
255 void jitAssertIsJSInt32(GPRReg
) { }
256 void jitAssertIsJSNumber(GPRReg
) { }
257 void jitAssertIsJSDouble(GPRReg
) { }
258 void jitAssertIsCell(GPRReg
) { }
259 void jitAssertHasValidCallFrame() { }
262 // These methods convert between doubles, and doubles boxed and JSValues.
264 GPRReg
boxDouble(FPRReg fpr
, GPRReg gpr
)
266 moveDoubleTo64(fpr
, gpr
);
267 sub64(GPRInfo::tagTypeNumberRegister
, gpr
);
268 jitAssertIsJSDouble(gpr
);
271 FPRReg
unboxDouble(GPRReg gpr
, FPRReg fpr
)
273 jitAssertIsJSDouble(gpr
);
274 add64(GPRInfo::tagTypeNumberRegister
, gpr
);
275 move64ToDouble(gpr
, fpr
);
280 #if USE(JSVALUE32_64)
281 void boxDouble(FPRReg fpr
, GPRReg tagGPR
, GPRReg payloadGPR
)
283 moveDoubleToInts(fpr
, payloadGPR
, tagGPR
);
285 void unboxDouble(GPRReg tagGPR
, GPRReg payloadGPR
, FPRReg fpr
, FPRReg scratchFPR
)
287 moveIntsToDouble(payloadGPR
, tagGPR
, fpr
, scratchFPR
);
291 enum ExceptionCheckKind
{ NormalExceptionCheck
, InvertedExceptionCheck
};
292 Jump
emitExceptionCheck(ExceptionCheckKind kind
= NormalExceptionCheck
)
295 return branchTest64(kind
== NormalExceptionCheck
? NonZero
: Zero
, AbsoluteAddress(&vm()->exception
));
296 #elif USE(JSVALUE32_64)
297 return branch32(kind
== NormalExceptionCheck
? NotEqual
: Equal
, AbsoluteAddress(reinterpret_cast<char*>(&vm()->exception
) + OBJECT_OFFSETOF(JSValue
, u
.asBits
.tag
)), TrustedImm32(JSValue::EmptyValueTag
));
301 #if ENABLE(SAMPLING_COUNTERS)
302 static void emitCount(MacroAssembler
& jit
, AbstractSamplingCounter
& counter
, int32_t increment
= 1)
304 jit
.add64(TrustedImm32(increment
), AbsoluteAddress(counter
.addressOfCounter()));
306 void emitCount(AbstractSamplingCounter
& counter
, int32_t increment
= 1)
308 add64(TrustedImm32(increment
), AbsoluteAddress(counter
.addressOfCounter()));
312 #if ENABLE(SAMPLING_FLAGS)
313 void setSamplingFlag(int32_t);
314 void clearSamplingFlag(int32_t flag
);
317 JSGlobalObject
* globalObjectFor(CodeOrigin codeOrigin
)
319 return codeBlock()->globalObjectFor(codeOrigin
);
322 bool strictModeFor(CodeOrigin codeOrigin
)
324 if (!codeOrigin
.inlineCallFrame
)
325 return codeBlock()->isStrictMode();
326 return jsCast
<FunctionExecutable
*>(codeOrigin
.inlineCallFrame
->executable
.get())->isStrictMode();
329 ExecutableBase
* executableFor(const CodeOrigin
& codeOrigin
);
331 CodeBlock
* baselineCodeBlockFor(const CodeOrigin
& codeOrigin
)
333 return baselineCodeBlockForOriginAndBaselineCodeBlock(codeOrigin
, baselineCodeBlock());
336 CodeBlock
* baselineCodeBlockFor(InlineCallFrame
* inlineCallFrame
)
338 if (!inlineCallFrame
)
339 return baselineCodeBlock();
340 return baselineCodeBlockForInlineCallFrame(inlineCallFrame
);
343 CodeBlock
* baselineCodeBlock()
345 return m_baselineCodeBlock
;
348 int argumentsRegisterFor(InlineCallFrame
* inlineCallFrame
)
350 if (!inlineCallFrame
)
351 return codeBlock()->argumentsRegister();
353 return baselineCodeBlockForInlineCallFrame(
354 inlineCallFrame
)->argumentsRegister() + inlineCallFrame
->stackOffset
;
357 int argumentsRegisterFor(const CodeOrigin
& codeOrigin
)
359 return argumentsRegisterFor(codeOrigin
.inlineCallFrame
);
362 SharedSymbolTable
* symbolTableFor(const CodeOrigin
& codeOrigin
)
364 return baselineCodeBlockFor(codeOrigin
)->symbolTable();
367 int offsetOfLocals(const CodeOrigin
& codeOrigin
)
369 if (!codeOrigin
.inlineCallFrame
)
371 return codeOrigin
.inlineCallFrame
->stackOffset
* sizeof(Register
);
374 int offsetOfArgumentsIncludingThis(const CodeOrigin
& codeOrigin
)
376 if (!codeOrigin
.inlineCallFrame
)
377 return CallFrame::argumentOffsetIncludingThis(0) * sizeof(Register
);
378 return (codeOrigin
.inlineCallFrame
->stackOffset
+ CallFrame::argumentOffsetIncludingThis(0)) * sizeof(Register
);
381 Vector
<BytecodeAndMachineOffset
>& decodedCodeMapFor(CodeBlock
*);
385 CodeBlock
* m_codeBlock
;
386 CodeBlock
* m_baselineCodeBlock
;
388 HashMap
<CodeBlock
*, Vector
<BytecodeAndMachineOffset
> > m_decodedCodeMaps
;
391 } } // namespace JSC::DFG
393 #endif // ENABLE(DFG_JIT)
395 #endif // DFGAssemblyHelpers_h