]> git.saurik.com Git - apple/javascriptcore.git/blame_incremental - dfg/DFGThunks.cpp
JavaScriptCore-1218.33.tar.gz
[apple/javascriptcore.git] / dfg / DFGThunks.cpp
... / ...
CommitLineData
1/*
2 * Copyright (C) 2011 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#include "config.h"
27#include "DFGThunks.h"
28
29#if ENABLE(DFG_JIT)
30
31#include "DFGCCallHelpers.h"
32#include "DFGFPRInfo.h"
33#include "DFGGPRInfo.h"
34#include "DFGOSRExitCompiler.h"
35#include "MacroAssembler.h"
36
37namespace JSC { namespace DFG {
38
39MacroAssemblerCodeRef osrExitGenerationThunkGenerator(VM* vm)
40{
41 MacroAssembler jit;
42
43 size_t scratchSize = sizeof(EncodedJSValue) * (GPRInfo::numberOfRegisters + FPRInfo::numberOfRegisters);
44 ScratchBuffer* scratchBuffer = vm->scratchBufferForSize(scratchSize);
45 EncodedJSValue* buffer = static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer());
46
47 for (unsigned i = 0; i < GPRInfo::numberOfRegisters; ++i) {
48#if USE(JSVALUE64)
49 jit.store64(GPRInfo::toRegister(i), buffer + i);
50#else
51 jit.store32(GPRInfo::toRegister(i), buffer + i);
52#endif
53 }
54 for (unsigned i = 0; i < FPRInfo::numberOfRegisters; ++i) {
55 jit.move(MacroAssembler::TrustedImmPtr(buffer + GPRInfo::numberOfRegisters + i), GPRInfo::regT0);
56 jit.storeDouble(FPRInfo::toRegister(i), GPRInfo::regT0);
57 }
58
59 // Tell GC mark phase how much of the scratch buffer is active during call.
60 jit.move(MacroAssembler::TrustedImmPtr(scratchBuffer->activeLengthPtr()), GPRInfo::regT0);
61 jit.storePtr(MacroAssembler::TrustedImmPtr(scratchSize), GPRInfo::regT0);
62
63 // Set up one argument.
64#if CPU(X86)
65 jit.poke(GPRInfo::callFrameRegister, 0);
66#else
67 jit.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
68#endif
69
70 MacroAssembler::Call functionCall = jit.call();
71
72 jit.move(MacroAssembler::TrustedImmPtr(scratchBuffer->activeLengthPtr()), GPRInfo::regT0);
73 jit.storePtr(MacroAssembler::TrustedImmPtr(0), GPRInfo::regT0);
74
75 for (unsigned i = 0; i < FPRInfo::numberOfRegisters; ++i) {
76 jit.move(MacroAssembler::TrustedImmPtr(buffer + GPRInfo::numberOfRegisters + i), GPRInfo::regT0);
77 jit.loadDouble(GPRInfo::regT0, FPRInfo::toRegister(i));
78 }
79 for (unsigned i = 0; i < GPRInfo::numberOfRegisters; ++i) {
80#if USE(JSVALUE64)
81 jit.load64(buffer + i, GPRInfo::toRegister(i));
82#else
83 jit.load32(buffer + i, GPRInfo::toRegister(i));
84#endif
85 }
86
87 jit.jump(MacroAssembler::AbsoluteAddress(&vm->osrExitJumpDestination));
88
89 LinkBuffer patchBuffer(*vm, &jit, GLOBAL_THUNK_ID);
90
91 patchBuffer.link(functionCall, compileOSRExit);
92
93 return FINALIZE_CODE(patchBuffer, ("DFG OSR exit generation thunk"));
94}
95
96inline void emitPointerValidation(CCallHelpers& jit, GPRReg pointerGPR)
97{
98#if !ASSERT_DISABLED
99 CCallHelpers::Jump isNonZero = jit.branchTestPtr(CCallHelpers::NonZero, pointerGPR);
100 jit.breakpoint();
101 isNonZero.link(&jit);
102 jit.pushToSave(pointerGPR);
103 jit.load8(pointerGPR, pointerGPR);
104 jit.popToRestore(pointerGPR);
105#else
106 UNUSED_PARAM(jit);
107 UNUSED_PARAM(pointerGPR);
108#endif
109}
110
111MacroAssemblerCodeRef throwExceptionFromCallSlowPathGenerator(VM* vm)
112{
113 CCallHelpers jit(vm);
114
115 // We will jump to here if the JIT code thinks it's making a call, but the
116 // linking helper (C++ code) decided to throw an exception instead. We will
117 // have saved the callReturnIndex in the first arguments of JITStackFrame.
118 // Note that the return address will be on the stack at this point, so we
119 // need to remove it and drop it on the floor, since we don't care about it.
120 // Finally note that the call frame register points at the callee frame, so
121 // we need to pop it.
122 jit.preserveReturnAddressAfterCall(GPRInfo::nonPreservedNonReturnGPR);
123 jit.loadPtr(
124 CCallHelpers::Address(
125 GPRInfo::callFrameRegister,
126 static_cast<ptrdiff_t>(sizeof(Register)) * JSStack::CallerFrame),
127 GPRInfo::callFrameRegister);
128#if USE(JSVALUE64)
129 jit.peek64(GPRInfo::nonPreservedNonReturnGPR, JITSTACKFRAME_ARGS_INDEX);
130#else
131 jit.peek(GPRInfo::nonPreservedNonReturnGPR, JITSTACKFRAME_ARGS_INDEX);
132#endif
133 jit.setupArgumentsWithExecState(GPRInfo::nonPreservedNonReturnGPR);
134 jit.move(CCallHelpers::TrustedImmPtr(bitwise_cast<void*>(lookupExceptionHandler)), GPRInfo::nonArgGPR0);
135 emitPointerValidation(jit, GPRInfo::nonArgGPR0);
136 jit.call(GPRInfo::nonArgGPR0);
137 emitPointerValidation(jit, GPRInfo::returnValueGPR2);
138 jit.jump(GPRInfo::returnValueGPR2);
139
140 LinkBuffer patchBuffer(*vm, &jit, GLOBAL_THUNK_ID);
141 return FINALIZE_CODE(patchBuffer, ("DFG throw exception from call slow path thunk"));
142}
143
144static void slowPathFor(
145 CCallHelpers& jit, VM* vm, P_DFGOperation_E slowPathFunction)
146{
147 jit.preserveReturnAddressAfterCall(GPRInfo::nonArgGPR2);
148 emitPointerValidation(jit, GPRInfo::nonArgGPR2);
149 jit.storePtr(
150 GPRInfo::nonArgGPR2,
151 CCallHelpers::Address(
152 GPRInfo::callFrameRegister,
153 static_cast<ptrdiff_t>(sizeof(Register)) * JSStack::ReturnPC));
154 jit.storePtr(GPRInfo::callFrameRegister, &vm->topCallFrame);
155#if USE(JSVALUE64)
156 jit.poke64(GPRInfo::nonPreservedNonReturnGPR, JITSTACKFRAME_ARGS_INDEX);
157#else
158 jit.poke(GPRInfo::nonPreservedNonReturnGPR, JITSTACKFRAME_ARGS_INDEX);
159#endif
160 jit.setupArgumentsExecState();
161 jit.move(CCallHelpers::TrustedImmPtr(bitwise_cast<void*>(slowPathFunction)), GPRInfo::nonArgGPR0);
162 emitPointerValidation(jit, GPRInfo::nonArgGPR0);
163 jit.call(GPRInfo::nonArgGPR0);
164
165 // This slow call will return the address of one of the following:
166 // 1) Exception throwing thunk.
167 // 2) Host call return value returner thingy.
168 // 3) The function to call.
169 jit.loadPtr(
170 CCallHelpers::Address(
171 GPRInfo::callFrameRegister,
172 static_cast<ptrdiff_t>(sizeof(Register)) * JSStack::ReturnPC),
173 GPRInfo::nonPreservedNonReturnGPR);
174 jit.storePtr(
175 CCallHelpers::TrustedImmPtr(0),
176 CCallHelpers::Address(
177 GPRInfo::callFrameRegister,
178 static_cast<ptrdiff_t>(sizeof(Register)) * JSStack::ReturnPC));
179 emitPointerValidation(jit, GPRInfo::nonPreservedNonReturnGPR);
180 jit.restoreReturnAddressBeforeReturn(GPRInfo::nonPreservedNonReturnGPR);
181 emitPointerValidation(jit, GPRInfo::returnValueGPR);
182 jit.jump(GPRInfo::returnValueGPR);
183}
184
185static MacroAssemblerCodeRef linkForThunkGenerator(
186 VM* vm, CodeSpecializationKind kind)
187{
188 // The return address is on the stack or in the link register. We will hence
189 // save the return address to the call frame while we make a C++ function call
190 // to perform linking and lazy compilation if necessary. We expect the callee
191 // to be in nonArgGPR0/nonArgGPR1 (payload/tag), the call frame to have already
192 // been adjusted, nonPreservedNonReturnGPR holds the exception handler index,
193 // and all other registers to be available for use. We use JITStackFrame::args
194 // to save important information across calls.
195
196 CCallHelpers jit(vm);
197
198 slowPathFor(jit, vm, kind == CodeForCall ? operationLinkCall : operationLinkConstruct);
199
200 LinkBuffer patchBuffer(*vm, &jit, GLOBAL_THUNK_ID);
201 return FINALIZE_CODE(
202 patchBuffer,
203 ("DFG link %s slow path thunk", kind == CodeForCall ? "call" : "construct"));
204}
205
206MacroAssemblerCodeRef linkCallThunkGenerator(VM* vm)
207{
208 return linkForThunkGenerator(vm, CodeForCall);
209}
210
211MacroAssemblerCodeRef linkConstructThunkGenerator(VM* vm)
212{
213 return linkForThunkGenerator(vm, CodeForConstruct);
214}
215
216// For closure optimizations, we only include calls, since if you're using closures for
217// object construction then you're going to lose big time anyway.
218MacroAssemblerCodeRef linkClosureCallThunkGenerator(VM* vm)
219{
220 CCallHelpers jit(vm);
221
222 slowPathFor(jit, vm, operationLinkClosureCall);
223
224 LinkBuffer patchBuffer(*vm, &jit, GLOBAL_THUNK_ID);
225 return FINALIZE_CODE(patchBuffer, ("DFG link closure call slow path thunk"));
226}
227
228static MacroAssemblerCodeRef virtualForThunkGenerator(
229 VM* vm, CodeSpecializationKind kind)
230{
231 // The return address is on the stack, or in the link register. We will hence
232 // jump to the callee, or save the return address to the call frame while we
233 // make a C++ function call to the appropriate DFG operation.
234
235 CCallHelpers jit(vm);
236
237 CCallHelpers::JumpList slowCase;
238
239 // FIXME: we should have a story for eliminating these checks. In many cases,
240 // the DFG knows that the value is definitely a cell, or definitely a function.
241
242#if USE(JSVALUE64)
243 slowCase.append(
244 jit.branchTest64(
245 CCallHelpers::NonZero, GPRInfo::nonArgGPR0, GPRInfo::tagMaskRegister));
246#else
247 slowCase.append(
248 jit.branch32(
249 CCallHelpers::NotEqual, GPRInfo::nonArgGPR1,
250 CCallHelpers::TrustedImm32(JSValue::CellTag)));
251#endif
252 jit.loadPtr(CCallHelpers::Address(GPRInfo::nonArgGPR0, JSCell::structureOffset()), GPRInfo::nonArgGPR2);
253 slowCase.append(
254 jit.branchPtr(
255 CCallHelpers::NotEqual,
256 CCallHelpers::Address(GPRInfo::nonArgGPR2, Structure::classInfoOffset()),
257 CCallHelpers::TrustedImmPtr(&JSFunction::s_info)));
258
259 // Now we know we have a JSFunction.
260
261 jit.loadPtr(
262 CCallHelpers::Address(GPRInfo::nonArgGPR0, JSFunction::offsetOfExecutable()),
263 GPRInfo::nonArgGPR2);
264 slowCase.append(
265 jit.branch32(
266 CCallHelpers::LessThan,
267 CCallHelpers::Address(
268 GPRInfo::nonArgGPR2, ExecutableBase::offsetOfNumParametersFor(kind)),
269 CCallHelpers::TrustedImm32(0)));
270
271 // Now we know that we have a CodeBlock, and we're committed to making a fast
272 // call.
273
274 jit.loadPtr(
275 CCallHelpers::Address(GPRInfo::nonArgGPR0, JSFunction::offsetOfScopeChain()),
276 GPRInfo::nonArgGPR1);
277#if USE(JSVALUE64)
278 jit.store64(
279 GPRInfo::nonArgGPR1,
280 CCallHelpers::Address(
281 GPRInfo::callFrameRegister,
282 static_cast<ptrdiff_t>(sizeof(Register)) * JSStack::ScopeChain));
283#else
284 jit.storePtr(
285 GPRInfo::nonArgGPR1,
286 CCallHelpers::Address(
287 GPRInfo::callFrameRegister,
288 static_cast<ptrdiff_t>(sizeof(Register)) * JSStack::ScopeChain +
289 OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)));
290 jit.store32(
291 CCallHelpers::TrustedImm32(JSValue::CellTag),
292 CCallHelpers::Address(
293 GPRInfo::callFrameRegister,
294 static_cast<ptrdiff_t>(sizeof(Register)) * JSStack::ScopeChain +
295 OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)));
296#endif
297
298 jit.loadPtr(
299 CCallHelpers::Address(GPRInfo::nonArgGPR2, ExecutableBase::offsetOfJITCodeWithArityCheckFor(kind)),
300 GPRInfo::regT0);
301
302 // Make a tail call. This will return back to DFG code.
303 emitPointerValidation(jit, GPRInfo::regT0);
304 jit.jump(GPRInfo::regT0);
305
306 slowCase.link(&jit);
307
308 // Here we don't know anything, so revert to the full slow path.
309
310 slowPathFor(jit, vm, kind == CodeForCall ? operationVirtualCall : operationVirtualConstruct);
311
312 LinkBuffer patchBuffer(*vm, &jit, GLOBAL_THUNK_ID);
313 return FINALIZE_CODE(
314 patchBuffer,
315 ("DFG virtual %s slow path thunk", kind == CodeForCall ? "call" : "construct"));
316}
317
318MacroAssemblerCodeRef virtualCallThunkGenerator(VM* vm)
319{
320 return virtualForThunkGenerator(vm, CodeForCall);
321}
322
323MacroAssemblerCodeRef virtualConstructThunkGenerator(VM* vm)
324{
325 return virtualForThunkGenerator(vm, CodeForConstruct);
326}
327
328} } // namespace JSC::DFG
329
330#endif // ENABLE(DFG_JIT)