]> git.saurik.com Git - apple/javascriptcore.git/blame - dfg/DFGOSRExitCompiler64.cpp
JavaScriptCore-7601.1.46.3.tar.gz
[apple/javascriptcore.git] / dfg / DFGOSRExitCompiler64.cpp
CommitLineData
6fe7ccc8 1/*
ed1e77d3 2 * Copyright (C) 2011, 2013-2015 Apple Inc. All rights reserved.
6fe7ccc8
A
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#include "config.h"
27#include "DFGOSRExitCompiler.h"
28
29#if ENABLE(DFG_JIT) && USE(JSVALUE64)
30
31#include "DFGOperations.h"
81345200
A
32#include "DFGOSRExitCompilerCommon.h"
33#include "DFGSpeculativeJIT.h"
34#include "JSCInlines.h"
35#include "VirtualRegister.h"
36
93a37866 37#include <wtf/DataLog.h>
6fe7ccc8
A
38
39namespace JSC { namespace DFG {
40
93a37866 41void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecovery>& operands, SpeculationRecovery* recovery)
6fe7ccc8 42{
81345200 43 m_jit.jitAssertTagsInPlace();
93a37866 44
ed1e77d3 45 // Pro-forma stuff.
93a37866
A
46 if (Options::printEachOSRExit()) {
47 SpeculationFailureDebugInfo* debugInfo = new SpeculationFailureDebugInfo;
48 debugInfo->codeBlock = m_jit.codeBlock();
81345200
A
49 debugInfo->kind = exit.m_kind;
50 debugInfo->bytecodeOffset = exit.m_codeOrigin.bytecodeIndex;
93a37866
A
51
52 m_jit.debugCall(debugOperationPrintSpeculationFailure, debugInfo);
53 }
6fe7ccc8 54
ed1e77d3
A
55 // Perform speculation recovery. This only comes into play when an operation
56 // starts mutating state before verifying the speculation it has already made.
6fe7ccc8 57
6fe7ccc8
A
58 if (recovery) {
59 switch (recovery->type()) {
60 case SpeculativeAdd:
61 m_jit.sub32(recovery->src(), recovery->dest());
93a37866 62 m_jit.or64(GPRInfo::tagTypeNumberRegister, recovery->dest());
6fe7ccc8
A
63 break;
64
65 case BooleanSpeculationCheck:
93a37866 66 m_jit.xor64(AssemblyHelpers::TrustedImm32(static_cast<int32_t>(ValueFalse)), recovery->dest());
6fe7ccc8
A
67 break;
68
69 default:
70 break;
71 }
72 }
73
ed1e77d3 74 // Refine some array and/or value profile, if appropriate.
93a37866
A
75
76 if (!!exit.m_jsValueSource) {
77 if (exit.m_kind == BadCache || exit.m_kind == BadIndexingType) {
78 // If the instruction that this originated from has an array profile, then
79 // refine it. If it doesn't, then do nothing. The latter could happen for
80 // hoisted checks, or checks emitted for operations that didn't have array
81 // profiling - either ops that aren't array accesses at all, or weren't
82 // known to be array acceses in the bytecode. The latter case is a FIXME
83 // while the former case is an outcome of a CheckStructure not knowing why
84 // it was emitted (could be either due to an inline cache of a property
85 // property access, or due to an array profile).
86
87 CodeOrigin codeOrigin = exit.m_codeOriginForExitProfile;
88 if (ArrayProfile* arrayProfile = m_jit.baselineCodeBlockFor(codeOrigin)->getArrayProfile(codeOrigin.bytecodeIndex)) {
89 GPRReg usedRegister;
90 if (exit.m_jsValueSource.isAddress())
91 usedRegister = exit.m_jsValueSource.base();
92 else
93 usedRegister = exit.m_jsValueSource.gpr();
94
95 GPRReg scratch1;
96 GPRReg scratch2;
97 scratch1 = AssemblyHelpers::selectScratchGPR(usedRegister);
98 scratch2 = AssemblyHelpers::selectScratchGPR(usedRegister, scratch1);
99
ed1e77d3
A
100 if (isARM64()) {
101 m_jit.pushToSave(scratch1);
102 m_jit.pushToSave(scratch2);
103 } else {
104 m_jit.push(scratch1);
105 m_jit.push(scratch2);
106 }
93a37866
A
107
108 GPRReg value;
109 if (exit.m_jsValueSource.isAddress()) {
110 value = scratch1;
111 m_jit.loadPtr(AssemblyHelpers::Address(exit.m_jsValueSource.asAddress()), value);
112 } else
113 value = exit.m_jsValueSource.gpr();
114
81345200
A
115 m_jit.load32(AssemblyHelpers::Address(value, JSCell::structureIDOffset()), scratch1);
116 m_jit.store32(scratch1, arrayProfile->addressOfLastSeenStructureID());
117 m_jit.load8(AssemblyHelpers::Address(value, JSCell::indexingTypeOffset()), scratch1);
93a37866
A
118 m_jit.move(AssemblyHelpers::TrustedImm32(1), scratch2);
119 m_jit.lshift32(scratch1, scratch2);
120 m_jit.or32(scratch2, AssemblyHelpers::AbsoluteAddress(arrayProfile->addressOfArrayModes()));
121
ed1e77d3
A
122 if (isARM64()) {
123 m_jit.popToRestore(scratch2);
124 m_jit.popToRestore(scratch1);
125 } else {
126 m_jit.pop(scratch2);
127 m_jit.pop(scratch1);
128 }
93a37866
A
129 }
130 }
131
132 if (!!exit.m_valueProfile) {
133 EncodedJSValue* bucket = exit.m_valueProfile.getSpecFailBucket(0);
6fe7ccc8 134
93a37866
A
135 if (exit.m_jsValueSource.isAddress()) {
136 // We can't be sure that we have a spare register. So use the tagTypeNumberRegister,
137 // since we know how to restore it.
138 m_jit.load64(AssemblyHelpers::Address(exit.m_jsValueSource.asAddress()), GPRInfo::tagTypeNumberRegister);
139 m_jit.store64(GPRInfo::tagTypeNumberRegister, bucket);
140 m_jit.move(AssemblyHelpers::TrustedImm64(TagTypeNumber), GPRInfo::tagTypeNumberRegister);
141 } else
142 m_jit.store64(exit.m_jsValueSource.gpr(), bucket);
143 }
6fe7ccc8 144 }
81345200
A
145
146 // What follows is an intentionally simple OSR exit implementation that generates
147 // fairly poor code but is very easy to hack. In particular, it dumps all state that
148 // needs conversion into a scratch buffer so that in step 6, where we actually do the
149 // conversions, we know that all temp registers are free to use and the variable is
150 // definitely in a well-known spot in the scratch buffer regardless of whether it had
151 // originally been in a register or spilled. This allows us to decouple "where was
152 // the variable" from "how was it represented". Consider that the
153 // Int32DisplacedInJSStack recovery: it tells us that the value is in a
154 // particular place and that that place holds an unboxed int32. We have two different
155 // places that a value could be (displaced, register) and a bunch of different
156 // ways of representing a value. The number of recoveries is two * a bunch. The code
157 // below means that we have to have two + a bunch cases rather than two * a bunch.
158 // Once we have loaded the value from wherever it was, the reboxing is the same
159 // regardless of its location. Likewise, before we do the reboxing, the way we get to
160 // the value (i.e. where we load it from) is the same regardless of its type. Because
161 // the code below always dumps everything into a scratch buffer first, the two
162 // questions become orthogonal, which simplifies adding new types and adding new
163 // locations.
164 //
165 // This raises the question: does using such a suboptimal implementation of OSR exit,
166 // where we always emit code to dump all state into a scratch buffer only to then
167 // dump it right back into the stack, hurt us in any way? The asnwer is that OSR exits
168 // are rare. Our tiering strategy ensures this. This is because if an OSR exit is
169 // taken more than ~100 times, we jettison the DFG code block along with all of its
170 // exits. It is impossible for an OSR exit - i.e. the code we compile below - to
171 // execute frequently enough for the codegen to matter that much. It probably matters
172 // enough that we don't want to turn this into some super-slow function call, but so
173 // long as we're generating straight-line code, that code can be pretty bad. Also
174 // because we tend to exit only along one OSR exit from any DFG code block - that's an
175 // empirical result that we're extremely confident about - the code size of this
176 // doesn't matter much. Hence any attempt to optimize the codegen here is just purely
177 // harmful to the system: it probably won't reduce either net memory usage or net
178 // execution time. It will only prevent us from cleanly decoupling "where was the
179 // variable" from "how was it represented", which will make it more difficult to add
180 // features in the future and it will make it harder to reason about bugs.
6fe7ccc8 181
ed1e77d3 182 // Save all state from GPRs into the scratch buffer.
6fe7ccc8 183
81345200
A
184 ScratchBuffer* scratchBuffer = m_jit.vm()->scratchBufferForSize(sizeof(EncodedJSValue) * operands.size());
185 EncodedJSValue* scratch = scratchBuffer ? static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer()) : 0;
6fe7ccc8 186
93a37866
A
187 for (size_t index = 0; index < operands.size(); ++index) {
188 const ValueRecovery& recovery = operands[index];
81345200 189
6fe7ccc8 190 switch (recovery.technique()) {
81345200 191 case InGPR:
6fe7ccc8 192 case UnboxedInt32InGPR:
81345200
A
193 case UnboxedInt52InGPR:
194 case UnboxedStrictInt52InGPR:
195 case UnboxedCellInGPR:
196 m_jit.store64(recovery.gpr(), scratch + index);
6fe7ccc8
A
197 break;
198
81345200 199 default:
6fe7ccc8 200 break;
81345200
A
201 }
202 }
203
204 // And voila, all GPRs are free to reuse.
205
ed1e77d3 206 // Save all state from FPRs into the scratch buffer.
81345200
A
207
208 for (size_t index = 0; index < operands.size(); ++index) {
209 const ValueRecovery& recovery = operands[index];
210
211 switch (recovery.technique()) {
6fe7ccc8 212 case InFPR:
81345200
A
213 m_jit.move(AssemblyHelpers::TrustedImmPtr(scratch + index), GPRInfo::regT0);
214 m_jit.storeDouble(recovery.fpr(), MacroAssembler::Address(GPRInfo::regT0));
93a37866
A
215 break;
216
6fe7ccc8
A
217 default:
218 break;
219 }
220 }
221
81345200 222 // Now, all FPRs are also free.
6fe7ccc8 223
ed1e77d3
A
224 // Save all state from the stack into the scratch buffer. For simplicity we
225 // do this even for state that's already in the right place on the stack.
226 // It makes things simpler later.
6fe7ccc8 227
81345200
A
228 for (size_t index = 0; index < operands.size(); ++index) {
229 const ValueRecovery& recovery = operands[index];
230
231 switch (recovery.technique()) {
232 case DisplacedInJSStack:
233 case CellDisplacedInJSStack:
234 case BooleanDisplacedInJSStack:
235 case Int32DisplacedInJSStack:
236 case DoubleDisplacedInJSStack:
237 case Int52DisplacedInJSStack:
238 case StrictInt52DisplacedInJSStack:
239 m_jit.load64(AssemblyHelpers::addressFor(recovery.virtualRegister()), GPRInfo::regT0);
240 m_jit.store64(GPRInfo::regT0, scratch + index);
241 break;
242
243 default:
244 break;
6fe7ccc8
A
245 }
246 }
247
ed1e77d3
A
248 // Need to ensure that the stack pointer accounts for the worst-case stack usage at exit. This
249 // could toast some stack that the DFG used. We need to do it before storing to stack offsets
250 // used by baseline.
251 m_jit.addPtr(
252 CCallHelpers::TrustedImm32(
253 -m_jit.codeBlock()->jitCode()->dfgCommon()->requiredRegisterCountForExit * sizeof(Register)),
254 CCallHelpers::framePointerRegister, CCallHelpers::stackPointerRegister);
6fe7ccc8 255
ed1e77d3 256 // Do all data format conversions and store the results into the stack.
6fe7ccc8 257
93a37866
A
258 for (size_t index = 0; index < operands.size(); ++index) {
259 const ValueRecovery& recovery = operands[index];
260 int operand = operands.operandForIndex(index);
81345200 261
6fe7ccc8
A
262 switch (recovery.technique()) {
263 case InGPR:
81345200
A
264 case UnboxedCellInGPR:
265 case DisplacedInJSStack:
266 case CellDisplacedInJSStack:
267 case BooleanDisplacedInJSStack:
268 m_jit.load64(scratch + index, GPRInfo::regT0);
269 m_jit.store64(GPRInfo::regT0, AssemblyHelpers::addressFor(operand));
270 break;
271
6fe7ccc8 272 case UnboxedInt32InGPR:
81345200
A
273 case Int32DisplacedInJSStack:
274 m_jit.load64(scratch + index, GPRInfo::regT0);
275 m_jit.zeroExtend32ToPtr(GPRInfo::regT0, GPRInfo::regT0);
276 m_jit.or64(GPRInfo::tagTypeNumberRegister, GPRInfo::regT0);
277 m_jit.store64(GPRInfo::regT0, AssemblyHelpers::addressFor(operand));
6fe7ccc8 278 break;
81345200
A
279
280 case UnboxedInt52InGPR:
281 case Int52DisplacedInJSStack:
282 m_jit.load64(scratch + index, GPRInfo::regT0);
283 m_jit.rshift64(
284 AssemblyHelpers::TrustedImm32(JSValue::int52ShiftAmount), GPRInfo::regT0);
285 m_jit.boxInt52(GPRInfo::regT0, GPRInfo::regT0, GPRInfo::regT1, FPRInfo::fpRegT0);
286 m_jit.store64(GPRInfo::regT0, AssemblyHelpers::addressFor(operand));
6fe7ccc8 287 break;
81345200
A
288
289 case UnboxedStrictInt52InGPR:
290 case StrictInt52DisplacedInJSStack:
291 m_jit.load64(scratch + index, GPRInfo::regT0);
292 m_jit.boxInt52(GPRInfo::regT0, GPRInfo::regT0, GPRInfo::regT1, FPRInfo::fpRegT0);
293 m_jit.store64(GPRInfo::regT0, AssemblyHelpers::addressFor(operand));
294 break;
295
296 case InFPR:
297 case DoubleDisplacedInJSStack:
298 m_jit.move(AssemblyHelpers::TrustedImmPtr(scratch + index), GPRInfo::regT0);
299 m_jit.loadDouble(MacroAssembler::Address(GPRInfo::regT0), FPRInfo::fpRegT0);
300 m_jit.purifyNaN(FPRInfo::fpRegT0);
6fe7ccc8 301 m_jit.boxDouble(FPRInfo::fpRegT0, GPRInfo::regT0);
81345200
A
302 m_jit.store64(GPRInfo::regT0, AssemblyHelpers::addressFor(operand));
303 break;
6fe7ccc8 304
81345200
A
305 case Constant:
306 m_jit.store64(
307 AssemblyHelpers::TrustedImm64(JSValue::encode(recovery.constant())),
308 AssemblyHelpers::addressFor(operand));
309 break;
310
ed1e77d3
A
311 case DirectArgumentsThatWereNotCreated:
312 case ClonedArgumentsThatWereNotCreated:
313 // Don't do this, yet.
81345200
A
314 break;
315
316 default:
ed1e77d3 317 RELEASE_ASSERT_NOT_REACHED();
81345200 318 break;
6fe7ccc8
A
319 }
320 }
321
ed1e77d3
A
322 // Now that things on the stack are recovered, do the arguments recovery. We assume that arguments
323 // recoveries don't recursively refer to each other. But, we don't try to assume that they only
324 // refer to certain ranges of locals. Hence why we need to do this here, once the stack is sensible.
325 // Note that we also roughly assume that the arguments might still be materialized outside of its
326 // inline call frame scope - but for now the DFG wouldn't do that.
327
328 emitRestoreArguments(operands);
329
330 // Adjust the old JIT's execute counter. Since we are exiting OSR, we know
331 // that all new calls into this code will go to the new JIT, so the execute
332 // counter only affects call frames that performed OSR exit and call frames
333 // that were still executing the old JIT at the time of another call frame's
334 // OSR exit. We want to ensure that the following is true:
6fe7ccc8 335 //
ed1e77d3
A
336 // (a) Code the performs an OSR exit gets a chance to reenter optimized
337 // code eventually, since optimized code is faster. But we don't
338 // want to do such reentery too aggressively (see (c) below).
6fe7ccc8 339 //
ed1e77d3
A
340 // (b) If there is code on the call stack that is still running the old
341 // JIT's code and has never OSR'd, then it should get a chance to
342 // perform OSR entry despite the fact that we've exited.
6fe7ccc8 343 //
ed1e77d3
A
344 // (c) Code the performs an OSR exit should not immediately retry OSR
345 // entry, since both forms of OSR are expensive. OSR entry is
346 // particularly expensive.
6fe7ccc8 347 //
ed1e77d3
A
348 // (d) Frequent OSR failures, even those that do not result in the code
349 // running in a hot loop, result in recompilation getting triggered.
6fe7ccc8 350 //
ed1e77d3
A
351 // To ensure (c), we'd like to set the execute counter to
352 // counterValueForOptimizeAfterWarmUp(). This seems like it would endanger
353 // (a) and (b), since then every OSR exit would delay the opportunity for
354 // every call frame to perform OSR entry. Essentially, if OSR exit happens
355 // frequently and the function has few loops, then the counter will never
356 // become non-negative and OSR entry will never be triggered. OSR entry
357 // will only happen if a loop gets hot in the old JIT, which does a pretty
358 // good job of ensuring (a) and (b). But that doesn't take care of (d),
359 // since each speculation failure would reset the execute counter.
360 // So we check here if the number of speculation failures is significantly
361 // larger than the number of successes (we want 90% success rate), and if
362 // there have been a large enough number of failures. If so, we set the
363 // counter to 0; otherwise we set the counter to
364 // counterValueForOptimizeAfterWarmUp().
81345200
A
365
366 handleExitCounts(m_jit, exit);
367
ed1e77d3 368 // Reify inlined call frames.
81345200
A
369
370 reifyInlinedCallFrames(m_jit, exit);
371
ed1e77d3 372 // And finish.
81345200 373 adjustAndJumpToTarget(m_jit, exit);
6fe7ccc8
A
374}
375
376} } // namespace JSC::DFG
377
378#endif // ENABLE(DFG_JIT) && USE(JSVALUE64)