]> git.saurik.com Git - apple/javascriptcore.git/blob - dfg/DFGJITCompiler.cpp
JavaScriptCore-1097.3.3.tar.gz
[apple/javascriptcore.git] / dfg / DFGJITCompiler.cpp
1 /*
2 * Copyright (C) 2011 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26 #include "config.h"
27 #include "DFGJITCompiler.h"
28
29 #if ENABLE(DFG_JIT)
30
31 #include "CodeBlock.h"
32 #include "DFGOSRExitCompiler.h"
33 #include "DFGOperations.h"
34 #include "DFGRegisterBank.h"
35 #include "DFGSpeculativeJIT.h"
36 #include "DFGThunks.h"
37 #include "JSGlobalData.h"
38 #include "LinkBuffer.h"
39
40 namespace JSC { namespace DFG {
41
42 void JITCompiler::linkOSRExits()
43 {
44 for (unsigned i = 0; i < codeBlock()->numberOfOSRExits(); ++i) {
45 OSRExit& exit = codeBlock()->osrExit(i);
46 exit.m_check.initialJump().link(this);
47 jitAssertHasValidCallFrame();
48 store32(TrustedImm32(i), &globalData()->osrExitIndex);
49 exit.m_check.switchToLateJump(patchableJump());
50 }
51 }
52
53 void JITCompiler::compileEntry()
54 {
55 // This code currently matches the old JIT. In the function header we need to
56 // pop the return address (since we do not allow any recursion on the machine
57 // stack), and perform a fast register file check.
58 // FIXME: https://bugs.webkit.org/show_bug.cgi?id=56292
59 // We'll need to convert the remaining cti_ style calls (specifically the register file
60 // check) which will be dependent on stack layout. (We'd need to account for this in
61 // both normal return code and when jumping to an exception handler).
62 preserveReturnAddressAfterCall(GPRInfo::regT2);
63 emitPutToCallFrameHeader(GPRInfo::regT2, RegisterFile::ReturnPC);
64 emitPutImmediateToCallFrameHeader(m_codeBlock, RegisterFile::CodeBlock);
65 }
66
67 void JITCompiler::compileBody(SpeculativeJIT& speculative)
68 {
69 // We generate the speculative code path, followed by OSR exit code to return
70 // to the old JIT code if speculations fail.
71
72 #if DFG_ENABLE(JIT_BREAK_ON_EVERY_FUNCTION)
73 // Handy debug tool!
74 breakpoint();
75 #endif
76
77 addPtr(TrustedImm32(1), AbsoluteAddress(codeBlock()->addressOfSpeculativeSuccessCounter()));
78
79 bool compiledSpeculative = speculative.compile();
80 ASSERT_UNUSED(compiledSpeculative, compiledSpeculative);
81
82 linkOSRExits();
83
84 // Iterate over the m_calls vector, checking for jumps to link.
85 bool didLinkExceptionCheck = false;
86 for (unsigned i = 0; i < m_exceptionChecks.size(); ++i) {
87 Jump& exceptionCheck = m_exceptionChecks[i].m_exceptionCheck;
88 if (exceptionCheck.isSet()) {
89 exceptionCheck.link(this);
90 didLinkExceptionCheck = true;
91 }
92 }
93
94 // If any exception checks were linked, generate code to lookup a handler.
95 if (didLinkExceptionCheck) {
96 // lookupExceptionHandler is passed two arguments, exec (the CallFrame*), and
97 // the index into the CodeBlock's callReturnIndexVector corresponding to the
98 // call that threw the exception (this was set in nonPreservedNonReturnGPR, when
99 // the exception check was planted).
100 move(GPRInfo::nonPreservedNonReturnGPR, GPRInfo::argumentGPR1);
101 move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
102 #if CPU(X86)
103 // FIXME: should use the call abstraction, but this is currently in the SpeculativeJIT layer!
104 poke(GPRInfo::argumentGPR0);
105 poke(GPRInfo::argumentGPR1, 1);
106 #endif
107 m_calls.append(CallLinkRecord(call(), lookupExceptionHandler));
108 // lookupExceptionHandler leaves the handler CallFrame* in the returnValueGPR,
109 // and the address of the handler in returnValueGPR2.
110 jump(GPRInfo::returnValueGPR2);
111 }
112 }
113
114 void JITCompiler::link(LinkBuffer& linkBuffer)
115 {
116 // Link the code, populate data in CodeBlock data structures.
117 #if DFG_ENABLE(DEBUG_VERBOSE)
118 dataLog("JIT code for %p start at [%p, %p). Size = %zu.\n", m_codeBlock, linkBuffer.debugAddress(), static_cast<char*>(linkBuffer.debugAddress()) + linkBuffer.debugSize(), linkBuffer.debugSize());
119 #endif
120
121 // Link all calls out from the JIT code to their respective functions.
122 for (unsigned i = 0; i < m_calls.size(); ++i)
123 linkBuffer.link(m_calls[i].m_call, m_calls[i].m_function);
124
125 if (m_codeBlock->needsCallReturnIndices()) {
126 m_codeBlock->callReturnIndexVector().reserveCapacity(m_exceptionChecks.size());
127 for (unsigned i = 0; i < m_exceptionChecks.size(); ++i) {
128 unsigned returnAddressOffset = linkBuffer.returnAddressOffset(m_exceptionChecks[i].m_call);
129 CodeOrigin codeOrigin = m_exceptionChecks[i].m_codeOrigin;
130 while (codeOrigin.inlineCallFrame)
131 codeOrigin = codeOrigin.inlineCallFrame->caller;
132 unsigned exceptionInfo = codeOrigin.bytecodeIndex;
133 m_codeBlock->callReturnIndexVector().append(CallReturnOffsetToBytecodeOffset(returnAddressOffset, exceptionInfo));
134 }
135 }
136
137 Vector<CodeOriginAtCallReturnOffset>& codeOrigins = m_codeBlock->codeOrigins();
138 codeOrigins.resize(m_exceptionChecks.size());
139
140 for (unsigned i = 0; i < m_exceptionChecks.size(); ++i) {
141 CallExceptionRecord& record = m_exceptionChecks[i];
142 unsigned returnAddressOffset = linkBuffer.returnAddressOffset(m_exceptionChecks[i].m_call);
143 codeOrigins[i].codeOrigin = record.m_codeOrigin;
144 codeOrigins[i].callReturnOffset = returnAddressOffset;
145 record.m_token.assertCodeOriginIndex(i);
146 }
147
148 m_codeBlock->setNumberOfStructureStubInfos(m_propertyAccesses.size());
149 for (unsigned i = 0; i < m_propertyAccesses.size(); ++i) {
150 StructureStubInfo& info = m_codeBlock->structureStubInfo(i);
151 CodeLocationCall callReturnLocation = linkBuffer.locationOf(m_propertyAccesses[i].m_functionCall);
152 info.codeOrigin = m_propertyAccesses[i].m_codeOrigin;
153 info.callReturnLocation = callReturnLocation;
154 info.patch.dfg.deltaCheckImmToCall = differenceBetweenCodePtr(linkBuffer.locationOf(m_propertyAccesses[i].m_deltaCheckImmToCall), callReturnLocation);
155 info.patch.dfg.deltaCallToStructCheck = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_propertyAccesses[i].m_deltaCallToStructCheck));
156 #if USE(JSVALUE64)
157 info.patch.dfg.deltaCallToLoadOrStore = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_propertyAccesses[i].m_deltaCallToLoadOrStore));
158 #else
159 info.patch.dfg.deltaCallToTagLoadOrStore = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_propertyAccesses[i].m_deltaCallToTagLoadOrStore));
160 info.patch.dfg.deltaCallToPayloadLoadOrStore = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_propertyAccesses[i].m_deltaCallToPayloadLoadOrStore));
161 #endif
162 info.patch.dfg.deltaCallToSlowCase = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_propertyAccesses[i].m_deltaCallToSlowCase));
163 info.patch.dfg.deltaCallToDone = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_propertyAccesses[i].m_deltaCallToDone));
164 info.patch.dfg.baseGPR = m_propertyAccesses[i].m_baseGPR;
165 #if USE(JSVALUE64)
166 info.patch.dfg.valueGPR = m_propertyAccesses[i].m_valueGPR;
167 #else
168 info.patch.dfg.valueTagGPR = m_propertyAccesses[i].m_valueTagGPR;
169 info.patch.dfg.valueGPR = m_propertyAccesses[i].m_valueGPR;
170 #endif
171 info.patch.dfg.scratchGPR = m_propertyAccesses[i].m_scratchGPR;
172 info.patch.dfg.registersFlushed = m_propertyAccesses[i].m_registerMode == PropertyAccessRecord::RegistersFlushed;
173 }
174
175 m_codeBlock->setNumberOfCallLinkInfos(m_jsCalls.size());
176 for (unsigned i = 0; i < m_jsCalls.size(); ++i) {
177 CallLinkInfo& info = m_codeBlock->callLinkInfo(i);
178 info.callType = m_jsCalls[i].m_callType;
179 info.isDFG = true;
180 info.callReturnLocation = CodeLocationLabel(linkBuffer.locationOf(m_jsCalls[i].m_slowCall));
181 info.hotPathBegin = linkBuffer.locationOf(m_jsCalls[i].m_targetToCheck);
182 info.hotPathOther = linkBuffer.locationOfNearCall(m_jsCalls[i].m_fastCall);
183 }
184
185 MacroAssemblerCodeRef osrExitThunk = globalData()->getCTIStub(osrExitGenerationThunkGenerator);
186 CodeLocationLabel target = CodeLocationLabel(osrExitThunk.code());
187 for (unsigned i = 0; i < codeBlock()->numberOfOSRExits(); ++i) {
188 OSRExit& exit = codeBlock()->osrExit(i);
189 linkBuffer.link(exit.m_check.lateJump(), target);
190 exit.m_check.correctLateJump(linkBuffer);
191 }
192
193 codeBlock()->shrinkWeakReferencesToFit();
194 codeBlock()->shrinkWeakReferenceTransitionsToFit();
195 }
196
197 bool JITCompiler::compile(JITCode& entry)
198 {
199 compileEntry();
200 SpeculativeJIT speculative(*this);
201 compileBody(speculative);
202
203 // Create OSR entry trampolines if necessary.
204 speculative.createOSREntries();
205
206 LinkBuffer linkBuffer(*m_globalData, this, m_codeBlock, JITCompilationCanFail);
207 if (linkBuffer.didFailToAllocate())
208 return false;
209 link(linkBuffer);
210 speculative.linkOSREntries(linkBuffer);
211
212 entry = JITCode(linkBuffer.finalizeCode(), JITCode::DFGJIT);
213 return true;
214 }
215
216 bool JITCompiler::compileFunction(JITCode& entry, MacroAssemblerCodePtr& entryWithArityCheck)
217 {
218 compileEntry();
219
220 // === Function header code generation ===
221 // This is the main entry point, without performing an arity check.
222 // If we needed to perform an arity check we will already have moved the return address,
223 // so enter after this.
224 Label fromArityCheck(this);
225 // Plant a check that sufficient space is available in the RegisterFile.
226 // FIXME: https://bugs.webkit.org/show_bug.cgi?id=56291
227 addPtr(TrustedImm32(m_codeBlock->m_numCalleeRegisters * sizeof(Register)), GPRInfo::callFrameRegister, GPRInfo::regT1);
228 Jump registerFileCheck = branchPtr(Below, AbsoluteAddress(m_globalData->interpreter->registerFile().addressOfEnd()), GPRInfo::regT1);
229 // Return here after register file check.
230 Label fromRegisterFileCheck = label();
231
232
233 // === Function body code generation ===
234 SpeculativeJIT speculative(*this);
235 compileBody(speculative);
236
237 // === Function footer code generation ===
238 //
239 // Generate code to perform the slow register file check (if the fast one in
240 // the function header fails), and generate the entry point with arity check.
241 //
242 // Generate the register file check; if the fast check in the function head fails,
243 // we need to call out to a helper function to check whether more space is available.
244 // FIXME: change this from a cti call to a DFG style operation (normal C calling conventions).
245 registerFileCheck.link(this);
246 move(stackPointerRegister, GPRInfo::argumentGPR0);
247 poke(GPRInfo::callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof(void*));
248
249 CallBeginToken token = beginCall();
250 Call callRegisterFileCheck = call();
251 notifyCall(callRegisterFileCheck, CodeOrigin(0), token);
252 jump(fromRegisterFileCheck);
253
254 // The fast entry point into a function does not check the correct number of arguments
255 // have been passed to the call (we only use the fast entry point where we can statically
256 // determine the correct number of arguments have been passed, or have already checked).
257 // In cases where an arity check is necessary, we enter here.
258 // FIXME: change this from a cti call to a DFG style operation (normal C calling conventions).
259 Label arityCheck = label();
260 compileEntry();
261
262 load32(AssemblyHelpers::payloadFor((VirtualRegister)RegisterFile::ArgumentCount), GPRInfo::regT1);
263 branch32(AboveOrEqual, GPRInfo::regT1, TrustedImm32(m_codeBlock->numParameters())).linkTo(fromArityCheck, this);
264 move(stackPointerRegister, GPRInfo::argumentGPR0);
265 poke(GPRInfo::callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof(void*));
266 token = beginCall();
267 Call callArityCheck = call();
268 notifyCall(callArityCheck, CodeOrigin(0), token);
269 move(GPRInfo::regT0, GPRInfo::callFrameRegister);
270 jump(fromArityCheck);
271
272 // Create OSR entry trampolines if necessary.
273 speculative.createOSREntries();
274
275
276 // === Link ===
277 LinkBuffer linkBuffer(*m_globalData, this, m_codeBlock, JITCompilationCanFail);
278 if (linkBuffer.didFailToAllocate())
279 return false;
280 link(linkBuffer);
281 speculative.linkOSREntries(linkBuffer);
282
283 // FIXME: switch the register file check & arity check over to DFGOpertaion style calls, not JIT stubs.
284 linkBuffer.link(callRegisterFileCheck, cti_register_file_check);
285 linkBuffer.link(callArityCheck, m_codeBlock->m_isConstructor ? cti_op_construct_arityCheck : cti_op_call_arityCheck);
286
287 entryWithArityCheck = linkBuffer.locationOf(arityCheck);
288 entry = JITCode(linkBuffer.finalizeCode(), JITCode::DFGJIT);
289 return true;
290 }
291
292 } } // namespace JSC::DFG
293
294 #endif // ENABLE(DFG_JIT)