]> git.saurik.com Git - apple/javascriptcore.git/blame - dfg/DFGOSREntry.cpp
JavaScriptCore-7601.1.46.3.tar.gz
[apple/javascriptcore.git] / dfg / DFGOSREntry.cpp
CommitLineData
6fe7ccc8 1/*
ed1e77d3 2 * Copyright (C) 2011, 2013, 2014, 2015 Apple Inc. All rights reserved.
6fe7ccc8
A
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#include "config.h"
27#include "DFGOSREntry.h"
28
29#if ENABLE(DFG_JIT)
30
31#include "CallFrame.h"
32#include "CodeBlock.h"
81345200 33#include "DFGJITCode.h"
6fe7ccc8
A
34#include "DFGNode.h"
35#include "JIT.h"
81345200
A
36#include "JSStackInlines.h"
37#include "JSCInlines.h"
ed1e77d3 38#include <wtf/CommaPrinter.h>
6fe7ccc8
A
39
40namespace JSC { namespace DFG {
41
ed1e77d3
A
42void OSREntryData::dumpInContext(PrintStream& out, DumpContext* context) const
43{
44 out.print("bc#", m_bytecodeIndex, ", machine code offset = ", m_machineCodeOffset);
45 out.print(", stack rules = [");
46
47 auto printOperand = [&] (VirtualRegister reg) {
48 out.print(inContext(m_expectedValues.operand(reg), context), " (");
49 VirtualRegister toReg;
50 bool overwritten = false;
51 for (OSREntryReshuffling reshuffling : m_reshufflings) {
52 if (reg == VirtualRegister(reshuffling.fromOffset)) {
53 toReg = VirtualRegister(reshuffling.toOffset);
54 break;
55 }
56 if (reg == VirtualRegister(reshuffling.toOffset))
57 overwritten = true;
58 }
59 if (!overwritten && !toReg.isValid())
60 toReg = reg;
61 if (toReg.isValid()) {
62 if (toReg.isLocal() && !m_machineStackUsed.get(toReg.toLocal()))
63 out.print("ignored");
64 else
65 out.print("maps to ", toReg);
66 } else
67 out.print("overwritten");
68 if (reg.isLocal() && m_localsForcedDouble.get(reg.toLocal()))
69 out.print(", forced double");
70 if (reg.isLocal() && m_localsForcedMachineInt.get(reg.toLocal()))
71 out.print(", forced machine int");
72 out.print(")");
73 };
74
75 CommaPrinter comma;
76 for (size_t argumentIndex = m_expectedValues.numberOfArguments(); argumentIndex--;) {
77 out.print(comma, "arg", argumentIndex, ":");
78 printOperand(virtualRegisterForArgument(argumentIndex));
79 }
80 for (size_t localIndex = 0; localIndex < m_expectedValues.numberOfLocals(); ++localIndex) {
81 out.print(comma, "loc", localIndex, ":");
82 printOperand(virtualRegisterForLocal(localIndex));
83 }
84
85 out.print("], machine stack used = ", m_machineStackUsed);
86}
87
88void OSREntryData::dump(PrintStream& out) const
89{
90 dumpInContext(out, nullptr);
91}
92
6fe7ccc8
A
93void* prepareOSREntry(ExecState* exec, CodeBlock* codeBlock, unsigned bytecodeIndex)
94{
81345200 95 ASSERT(JITCode::isOptimizingJIT(codeBlock->jitType()));
6fe7ccc8 96 ASSERT(codeBlock->alternative());
81345200 97 ASSERT(codeBlock->alternative()->jitType() == JITCode::BaselineJIT);
6fe7ccc8 98 ASSERT(!codeBlock->jitCodeMap());
6fe7ccc8 99
81345200
A
100 if (!Options::enableOSREntryToDFG())
101 return 0;
102
103 if (Options::verboseOSR()) {
104 dataLog(
105 "DFG OSR in ", *codeBlock->alternative(), " -> ", *codeBlock,
106 " from bc#", bytecodeIndex, "\n");
107 }
6fe7ccc8 108
93a37866 109 VM* vm = &exec->vm();
81345200
A
110
111 sanitizeStackForVM(vm);
112
ed1e77d3
A
113 if (bytecodeIndex)
114 codeBlock->ownerExecutable()->setDidTryToEnterInLoop(true);
115
81345200
A
116 if (codeBlock->jitType() != JITCode::DFGJIT) {
117 RELEASE_ASSERT(codeBlock->jitType() == JITCode::FTLJIT);
118
119 // When will this happen? We could have:
120 //
121 // - An exit from the FTL JIT into the baseline JIT followed by an attempt
122 // to reenter. We're fine with allowing this to fail. If it happens
123 // enough we'll just reoptimize. It basically means that the OSR exit cost
124 // us dearly and so reoptimizing is the right thing to do.
125 //
126 // - We have recursive code with hot loops. Consider that foo has a hot loop
127 // that calls itself. We have two foo's on the stack, lets call them foo1
128 // and foo2, with foo1 having called foo2 from foo's hot loop. foo2 gets
129 // optimized all the way into the FTL. Then it returns into foo1, and then
130 // foo1 wants to get optimized. It might reach this conclusion from its
131 // hot loop and attempt to OSR enter. And we'll tell it that it can't. It
132 // might be worth addressing this case, but I just think this case will
133 // be super rare. For now, if it does happen, it'll cause some compilation
134 // thrashing.
135
136 if (Options::verboseOSR())
137 dataLog(" OSR failed because the target code block is not DFG.\n");
138 return 0;
139 }
140
141 JITCode* jitCode = codeBlock->jitCode()->dfg();
142 OSREntryData* entry = jitCode->osrEntryDataForBytecodeIndex(bytecodeIndex);
6fe7ccc8 143
93a37866 144 if (!entry) {
81345200
A
145 if (Options::verboseOSR())
146 dataLogF(" OSR failed because the entrypoint was optimized out.\n");
93a37866
A
147 return 0;
148 }
149
6fe7ccc8
A
150 ASSERT(entry->m_bytecodeIndex == bytecodeIndex);
151
152 // The code below checks if it is safe to perform OSR entry. It may find
153 // that it is unsafe to do so, for any number of reasons, which are documented
154 // below. If the code decides not to OSR then it returns 0, and it's the caller's
155 // responsibility to patch up the state in such a way as to ensure that it's
156 // both safe and efficient to continue executing baseline code for now. This
157 // should almost certainly include calling either codeBlock->optimizeAfterWarmUp()
158 // or codeBlock->dontOptimizeAnytimeSoon().
159
160 // 1) Verify predictions. If the predictions are inconsistent with the actual
161 // values, then OSR entry is not possible at this time. It's tempting to
162 // assume that we could somehow avoid this case. We can certainly avoid it
163 // for first-time loop OSR - that is, OSR into a CodeBlock that we have just
164 // compiled. Then we are almost guaranteed that all of the predictions will
165 // check out. It would be pretty easy to make that a hard guarantee. But
166 // then there would still be the case where two call frames with the same
167 // baseline CodeBlock are on the stack at the same time. The top one
168 // triggers compilation and OSR. In that case, we may no longer have
169 // accurate value profiles for the one deeper in the stack. Hence, when we
170 // pop into the CodeBlock that is deeper on the stack, we might OSR and
171 // realize that the predictions are wrong. Probably, in most cases, this is
172 // just an anomaly in the sense that the older CodeBlock simply went off
173 // into a less-likely path. So, the wisest course of action is to simply not
174 // OSR at this time.
175
176 for (size_t argument = 0; argument < entry->m_expectedValues.numberOfArguments(); ++argument) {
177 if (argument >= exec->argumentCountIncludingThis()) {
81345200
A
178 if (Options::verboseOSR()) {
179 dataLogF(" OSR failed because argument %zu was not passed, expected ", argument);
180 entry->m_expectedValues.argument(argument).dump(WTF::dataFile());
181 dataLogF(".\n");
182 }
6fe7ccc8
A
183 return 0;
184 }
185
186 JSValue value;
187 if (!argument)
81345200 188 value = exec->thisValue();
6fe7ccc8
A
189 else
190 value = exec->argument(argument - 1);
191
192 if (!entry->m_expectedValues.argument(argument).validate(value)) {
81345200
A
193 if (Options::verboseOSR()) {
194 dataLog(
195 " OSR failed because argument ", argument, " is ", value,
196 ", expected ", entry->m_expectedValues.argument(argument), ".\n");
197 }
6fe7ccc8
A
198 return 0;
199 }
200 }
201
202 for (size_t local = 0; local < entry->m_expectedValues.numberOfLocals(); ++local) {
81345200 203 int localOffset = virtualRegisterForLocal(local).offset();
6fe7ccc8 204 if (entry->m_localsForcedDouble.get(local)) {
81345200
A
205 if (!exec->registers()[localOffset].jsValue().isNumber()) {
206 if (Options::verboseOSR()) {
207 dataLog(
208 " OSR failed because variable ", localOffset, " is ",
209 exec->registers()[localOffset].jsValue(), ", expected number.\n");
210 }
211 return 0;
212 }
213 continue;
214 }
215 if (entry->m_localsForcedMachineInt.get(local)) {
216 if (!exec->registers()[localOffset].jsValue().isMachineInt()) {
217 if (Options::verboseOSR()) {
218 dataLog(
219 " OSR failed because variable ", localOffset, " is ",
220 exec->registers()[localOffset].jsValue(), ", expected ",
221 "machine int.\n");
222 }
6fe7ccc8
A
223 return 0;
224 }
225 continue;
226 }
81345200
A
227 if (!entry->m_expectedValues.local(local).validate(exec->registers()[localOffset].jsValue())) {
228 if (Options::verboseOSR()) {
229 dataLog(
230 " OSR failed because variable ", localOffset, " is ",
231 exec->registers()[localOffset].jsValue(), ", expected ",
232 entry->m_expectedValues.local(local), ".\n");
233 }
6fe7ccc8
A
234 return 0;
235 }
236 }
237
238 // 2) Check the stack height. The DFG JIT may require a taller stack than the
239 // baseline JIT, in some cases. If we can't grow the stack, then don't do
240 // OSR right now. That's the only option we have unless we want basic block
241 // boundaries to start throwing RangeErrors. Although that would be possible,
242 // it seems silly: you'd be diverting the program to error handling when it
243 // would have otherwise just kept running albeit less quickly.
244
81345200
A
245 unsigned frameSizeForCheck = jitCode->common.requiredRegisterCountForExecutionAndExit();
246 if (!vm->interpreter->stack().ensureCapacityFor(&exec->registers()[virtualRegisterForLocal(frameSizeForCheck - 1).offset()])) {
247 if (Options::verboseOSR())
248 dataLogF(" OSR failed because stack growth failed.\n");
6fe7ccc8
A
249 return 0;
250 }
251
81345200
A
252 if (Options::verboseOSR())
253 dataLogF(" OSR should succeed.\n");
254
255 // At this point we're committed to entering. We will do some work to set things up,
256 // but we also rely on our caller recognizing that when we return a non-null pointer,
257 // that means that we're already past the point of no return and we must succeed at
258 // entering.
6fe7ccc8 259
81345200
A
260 // 3) Set up the data in the scratch buffer and perform data format conversions.
261
262 unsigned frameSize = jitCode->common.frameRegisterCount;
263 unsigned baselineFrameSize = entry->m_expectedValues.numberOfLocals();
264 unsigned maxFrameSize = std::max(frameSize, baselineFrameSize);
265
266 Register* scratch = bitwise_cast<Register*>(vm->scratchBufferForSize(sizeof(Register) * (2 + JSStack::CallFrameHeaderSize + maxFrameSize))->dataBuffer());
267
268 *bitwise_cast<size_t*>(scratch + 0) = frameSize;
269
270 void* targetPC = codeBlock->jitCode()->executableAddressAtOffset(entry->m_machineCodeOffset);
271 if (Options::verboseOSR())
272 dataLogF(" OSR using target PC %p.\n", targetPC);
273 RELEASE_ASSERT(targetPC);
274 *bitwise_cast<void**>(scratch + 1) = targetPC;
275
276 Register* pivot = scratch + 2 + JSStack::CallFrameHeaderSize;
6fe7ccc8 277
81345200
A
278 for (int index = -JSStack::CallFrameHeaderSize; index < static_cast<int>(baselineFrameSize); ++index) {
279 VirtualRegister reg(-1 - index);
280
281 if (reg.isLocal()) {
282 if (entry->m_localsForcedDouble.get(reg.toLocal())) {
283 *bitwise_cast<double*>(pivot + index) = exec->registers()[reg.offset()].jsValue().asNumber();
284 continue;
285 }
286
287 if (entry->m_localsForcedMachineInt.get(reg.toLocal())) {
288 *bitwise_cast<int64_t*>(pivot + index) = exec->registers()[reg.offset()].jsValue().asMachineInt() << JSValue::int52ShiftAmount;
289 continue;
290 }
291 }
292
293 pivot[index] = exec->registers()[reg.offset()].jsValue();
294 }
6fe7ccc8 295
81345200
A
296 // 4) Reshuffle those registers that need reshuffling.
297 Vector<JSValue> temporaryLocals(entry->m_reshufflings.size());
298 for (unsigned i = entry->m_reshufflings.size(); i--;)
299 temporaryLocals[i] = pivot[VirtualRegister(entry->m_reshufflings[i].fromOffset).toLocal()].jsValue();
300 for (unsigned i = entry->m_reshufflings.size(); i--;)
301 pivot[VirtualRegister(entry->m_reshufflings[i].toOffset).toLocal()] = temporaryLocals[i];
6fe7ccc8 302
81345200
A
303 // 5) Clear those parts of the call frame that the DFG ain't using. This helps GC on
304 // some programs by eliminating some stale pointer pathologies.
305 for (unsigned i = frameSize; i--;) {
306 if (entry->m_machineStackUsed.get(i))
307 continue;
308 pivot[i] = JSValue();
309 }
6fe7ccc8 310
81345200 311 // 6) Fix the call frame to have the right code block.
6fe7ccc8 312
81345200 313 *bitwise_cast<CodeBlock**>(pivot - 1 - JSStack::CodeBlock) = codeBlock;
6fe7ccc8 314
81345200
A
315 if (Options::verboseOSR())
316 dataLogF(" OSR returning data buffer %p.\n", scratch);
317 return scratch;
6fe7ccc8
A
318}
319
320} } // namespace JSC::DFG
321
322#endif // ENABLE(DFG_JIT)