2 * Copyright (C) 2011 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include "DFGThunks.h"
31 #include "DFGCCallHelpers.h"
32 #include "DFGFPRInfo.h"
33 #include "DFGGPRInfo.h"
34 #include "DFGOSRExitCompiler.h"
35 #include "MacroAssembler.h"
37 namespace JSC
{ namespace DFG
{
39 MacroAssemblerCodeRef
osrExitGenerationThunkGenerator(VM
* vm
)
43 size_t scratchSize
= sizeof(EncodedJSValue
) * (GPRInfo::numberOfRegisters
+ FPRInfo::numberOfRegisters
);
44 ScratchBuffer
* scratchBuffer
= vm
->scratchBufferForSize(scratchSize
);
45 EncodedJSValue
* buffer
= static_cast<EncodedJSValue
*>(scratchBuffer
->dataBuffer());
47 for (unsigned i
= 0; i
< GPRInfo::numberOfRegisters
; ++i
) {
49 jit
.store64(GPRInfo::toRegister(i
), buffer
+ i
);
51 jit
.store32(GPRInfo::toRegister(i
), buffer
+ i
);
54 for (unsigned i
= 0; i
< FPRInfo::numberOfRegisters
; ++i
) {
55 jit
.move(MacroAssembler::TrustedImmPtr(buffer
+ GPRInfo::numberOfRegisters
+ i
), GPRInfo::regT0
);
56 jit
.storeDouble(FPRInfo::toRegister(i
), GPRInfo::regT0
);
59 // Tell GC mark phase how much of the scratch buffer is active during call.
60 jit
.move(MacroAssembler::TrustedImmPtr(scratchBuffer
->activeLengthPtr()), GPRInfo::regT0
);
61 jit
.storePtr(MacroAssembler::TrustedImmPtr(scratchSize
), GPRInfo::regT0
);
63 // Set up one argument.
65 jit
.poke(GPRInfo::callFrameRegister
, 0);
67 jit
.move(GPRInfo::callFrameRegister
, GPRInfo::argumentGPR0
);
70 MacroAssembler::Call functionCall
= jit
.call();
72 jit
.move(MacroAssembler::TrustedImmPtr(scratchBuffer
->activeLengthPtr()), GPRInfo::regT0
);
73 jit
.storePtr(MacroAssembler::TrustedImmPtr(0), GPRInfo::regT0
);
75 for (unsigned i
= 0; i
< FPRInfo::numberOfRegisters
; ++i
) {
76 jit
.move(MacroAssembler::TrustedImmPtr(buffer
+ GPRInfo::numberOfRegisters
+ i
), GPRInfo::regT0
);
77 jit
.loadDouble(GPRInfo::regT0
, FPRInfo::toRegister(i
));
79 for (unsigned i
= 0; i
< GPRInfo::numberOfRegisters
; ++i
) {
81 jit
.load64(buffer
+ i
, GPRInfo::toRegister(i
));
83 jit
.load32(buffer
+ i
, GPRInfo::toRegister(i
));
87 jit
.jump(MacroAssembler::AbsoluteAddress(&vm
->osrExitJumpDestination
));
89 LinkBuffer
patchBuffer(*vm
, &jit
, GLOBAL_THUNK_ID
);
91 patchBuffer
.link(functionCall
, compileOSRExit
);
93 return FINALIZE_CODE(patchBuffer
, ("DFG OSR exit generation thunk"));
96 inline void emitPointerValidation(CCallHelpers
& jit
, GPRReg pointerGPR
)
99 CCallHelpers::Jump isNonZero
= jit
.branchTestPtr(CCallHelpers::NonZero
, pointerGPR
);
101 isNonZero
.link(&jit
);
102 jit
.pushToSave(pointerGPR
);
103 jit
.load8(pointerGPR
, pointerGPR
);
104 jit
.popToRestore(pointerGPR
);
107 UNUSED_PARAM(pointerGPR
);
111 MacroAssemblerCodeRef
throwExceptionFromCallSlowPathGenerator(VM
* vm
)
113 CCallHelpers
jit(vm
);
115 // We will jump to here if the JIT code thinks it's making a call, but the
116 // linking helper (C++ code) decided to throw an exception instead. We will
117 // have saved the callReturnIndex in the first arguments of JITStackFrame.
118 // Note that the return address will be on the stack at this point, so we
119 // need to remove it and drop it on the floor, since we don't care about it.
120 // Finally note that the call frame register points at the callee frame, so
121 // we need to pop it.
122 jit
.preserveReturnAddressAfterCall(GPRInfo::nonPreservedNonReturnGPR
);
124 CCallHelpers::Address(
125 GPRInfo::callFrameRegister
,
126 static_cast<ptrdiff_t>(sizeof(Register
)) * JSStack::CallerFrame
),
127 GPRInfo::callFrameRegister
);
129 jit
.peek64(GPRInfo::nonPreservedNonReturnGPR
, JITSTACKFRAME_ARGS_INDEX
);
131 jit
.peek(GPRInfo::nonPreservedNonReturnGPR
, JITSTACKFRAME_ARGS_INDEX
);
133 jit
.setupArgumentsWithExecState(GPRInfo::nonPreservedNonReturnGPR
);
134 jit
.move(CCallHelpers::TrustedImmPtr(bitwise_cast
<void*>(lookupExceptionHandler
)), GPRInfo::nonArgGPR0
);
135 emitPointerValidation(jit
, GPRInfo::nonArgGPR0
);
136 jit
.call(GPRInfo::nonArgGPR0
);
137 emitPointerValidation(jit
, GPRInfo::returnValueGPR2
);
138 jit
.jump(GPRInfo::returnValueGPR2
);
140 LinkBuffer
patchBuffer(*vm
, &jit
, GLOBAL_THUNK_ID
);
141 return FINALIZE_CODE(patchBuffer
, ("DFG throw exception from call slow path thunk"));
144 static void slowPathFor(
145 CCallHelpers
& jit
, VM
* vm
, P_DFGOperation_E slowPathFunction
)
147 jit
.preserveReturnAddressAfterCall(GPRInfo::nonArgGPR2
);
148 emitPointerValidation(jit
, GPRInfo::nonArgGPR2
);
151 CCallHelpers::Address(
152 GPRInfo::callFrameRegister
,
153 static_cast<ptrdiff_t>(sizeof(Register
)) * JSStack::ReturnPC
));
154 jit
.storePtr(GPRInfo::callFrameRegister
, &vm
->topCallFrame
);
156 jit
.poke64(GPRInfo::nonPreservedNonReturnGPR
, JITSTACKFRAME_ARGS_INDEX
);
158 jit
.poke(GPRInfo::nonPreservedNonReturnGPR
, JITSTACKFRAME_ARGS_INDEX
);
160 jit
.setupArgumentsExecState();
161 jit
.move(CCallHelpers::TrustedImmPtr(bitwise_cast
<void*>(slowPathFunction
)), GPRInfo::nonArgGPR0
);
162 emitPointerValidation(jit
, GPRInfo::nonArgGPR0
);
163 jit
.call(GPRInfo::nonArgGPR0
);
165 // This slow call will return the address of one of the following:
166 // 1) Exception throwing thunk.
167 // 2) Host call return value returner thingy.
168 // 3) The function to call.
170 CCallHelpers::Address(
171 GPRInfo::callFrameRegister
,
172 static_cast<ptrdiff_t>(sizeof(Register
)) * JSStack::ReturnPC
),
173 GPRInfo::nonPreservedNonReturnGPR
);
175 CCallHelpers::TrustedImmPtr(0),
176 CCallHelpers::Address(
177 GPRInfo::callFrameRegister
,
178 static_cast<ptrdiff_t>(sizeof(Register
)) * JSStack::ReturnPC
));
179 emitPointerValidation(jit
, GPRInfo::nonPreservedNonReturnGPR
);
180 jit
.restoreReturnAddressBeforeReturn(GPRInfo::nonPreservedNonReturnGPR
);
181 emitPointerValidation(jit
, GPRInfo::returnValueGPR
);
182 jit
.jump(GPRInfo::returnValueGPR
);
185 static MacroAssemblerCodeRef
linkForThunkGenerator(
186 VM
* vm
, CodeSpecializationKind kind
)
188 // The return address is on the stack or in the link register. We will hence
189 // save the return address to the call frame while we make a C++ function call
190 // to perform linking and lazy compilation if necessary. We expect the callee
191 // to be in nonArgGPR0/nonArgGPR1 (payload/tag), the call frame to have already
192 // been adjusted, nonPreservedNonReturnGPR holds the exception handler index,
193 // and all other registers to be available for use. We use JITStackFrame::args
194 // to save important information across calls.
196 CCallHelpers
jit(vm
);
198 slowPathFor(jit
, vm
, kind
== CodeForCall
? operationLinkCall
: operationLinkConstruct
);
200 LinkBuffer
patchBuffer(*vm
, &jit
, GLOBAL_THUNK_ID
);
201 return FINALIZE_CODE(
203 ("DFG link %s slow path thunk", kind
== CodeForCall
? "call" : "construct"));
206 MacroAssemblerCodeRef
linkCallThunkGenerator(VM
* vm
)
208 return linkForThunkGenerator(vm
, CodeForCall
);
211 MacroAssemblerCodeRef
linkConstructThunkGenerator(VM
* vm
)
213 return linkForThunkGenerator(vm
, CodeForConstruct
);
216 // For closure optimizations, we only include calls, since if you're using closures for
217 // object construction then you're going to lose big time anyway.
218 MacroAssemblerCodeRef
linkClosureCallThunkGenerator(VM
* vm
)
220 CCallHelpers
jit(vm
);
222 slowPathFor(jit
, vm
, operationLinkClosureCall
);
224 LinkBuffer
patchBuffer(*vm
, &jit
, GLOBAL_THUNK_ID
);
225 return FINALIZE_CODE(patchBuffer
, ("DFG link closure call slow path thunk"));
228 static MacroAssemblerCodeRef
virtualForThunkGenerator(
229 VM
* vm
, CodeSpecializationKind kind
)
231 // The return address is on the stack, or in the link register. We will hence
232 // jump to the callee, or save the return address to the call frame while we
233 // make a C++ function call to the appropriate DFG operation.
235 CCallHelpers
jit(vm
);
237 CCallHelpers::JumpList slowCase
;
239 // FIXME: we should have a story for eliminating these checks. In many cases,
240 // the DFG knows that the value is definitely a cell, or definitely a function.
245 CCallHelpers::NonZero
, GPRInfo::nonArgGPR0
, GPRInfo::tagMaskRegister
));
249 CCallHelpers::NotEqual
, GPRInfo::nonArgGPR1
,
250 CCallHelpers::TrustedImm32(JSValue::CellTag
)));
252 jit
.loadPtr(CCallHelpers::Address(GPRInfo::nonArgGPR0
, JSCell::structureOffset()), GPRInfo::nonArgGPR2
);
255 CCallHelpers::NotEqual
,
256 CCallHelpers::Address(GPRInfo::nonArgGPR2
, Structure::classInfoOffset()),
257 CCallHelpers::TrustedImmPtr(&JSFunction::s_info
)));
259 // Now we know we have a JSFunction.
262 CCallHelpers::Address(GPRInfo::nonArgGPR0
, JSFunction::offsetOfExecutable()),
263 GPRInfo::nonArgGPR2
);
266 CCallHelpers::LessThan
,
267 CCallHelpers::Address(
268 GPRInfo::nonArgGPR2
, ExecutableBase::offsetOfNumParametersFor(kind
)),
269 CCallHelpers::TrustedImm32(0)));
271 // Now we know that we have a CodeBlock, and we're committed to making a fast
275 CCallHelpers::Address(GPRInfo::nonArgGPR0
, JSFunction::offsetOfScopeChain()),
276 GPRInfo::nonArgGPR1
);
280 CCallHelpers::Address(
281 GPRInfo::callFrameRegister
,
282 static_cast<ptrdiff_t>(sizeof(Register
)) * JSStack::ScopeChain
));
286 CCallHelpers::Address(
287 GPRInfo::callFrameRegister
,
288 static_cast<ptrdiff_t>(sizeof(Register
)) * JSStack::ScopeChain
+
289 OBJECT_OFFSETOF(EncodedValueDescriptor
, asBits
.payload
)));
291 CCallHelpers::TrustedImm32(JSValue::CellTag
),
292 CCallHelpers::Address(
293 GPRInfo::callFrameRegister
,
294 static_cast<ptrdiff_t>(sizeof(Register
)) * JSStack::ScopeChain
+
295 OBJECT_OFFSETOF(EncodedValueDescriptor
, asBits
.tag
)));
299 CCallHelpers::Address(GPRInfo::nonArgGPR2
, ExecutableBase::offsetOfJITCodeWithArityCheckFor(kind
)),
302 // Make a tail call. This will return back to DFG code.
303 emitPointerValidation(jit
, GPRInfo::regT0
);
304 jit
.jump(GPRInfo::regT0
);
308 // Here we don't know anything, so revert to the full slow path.
310 slowPathFor(jit
, vm
, kind
== CodeForCall
? operationVirtualCall
: operationVirtualConstruct
);
312 LinkBuffer
patchBuffer(*vm
, &jit
, GLOBAL_THUNK_ID
);
313 return FINALIZE_CODE(
315 ("DFG virtual %s slow path thunk", kind
== CodeForCall
? "call" : "construct"));
318 MacroAssemblerCodeRef
virtualCallThunkGenerator(VM
* vm
)
320 return virtualForThunkGenerator(vm
, CodeForCall
);
323 MacroAssemblerCodeRef
virtualConstructThunkGenerator(VM
* vm
)
325 return virtualForThunkGenerator(vm
, CodeForConstruct
);
328 } } // namespace JSC::DFG
330 #endif // ENABLE(DFG_JIT)