]> git.saurik.com Git - apple/javascriptcore.git/blob - dfg/DFGOSRExitCompiler32_64.cpp
JavaScriptCore-7601.1.46.3.tar.gz
[apple/javascriptcore.git] / dfg / DFGOSRExitCompiler32_64.cpp
1 /*
2 * Copyright (C) 2011, 2013-2015 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26 #include "config.h"
27 #include "DFGOSRExitCompiler.h"
28
29 #if ENABLE(DFG_JIT) && USE(JSVALUE32_64)
30
31 #include "DFGOperations.h"
32 #include "DFGOSRExitCompilerCommon.h"
33 #include "DFGSpeculativeJIT.h"
34 #include "JSCInlines.h"
35 #include <wtf/DataLog.h>
36
37 namespace JSC { namespace DFG {
38
39 void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecovery>& operands, SpeculationRecovery* recovery)
40 {
41 // Pro-forma stuff.
42 if (Options::printEachOSRExit()) {
43 SpeculationFailureDebugInfo* debugInfo = new SpeculationFailureDebugInfo;
44 debugInfo->codeBlock = m_jit.codeBlock();
45 debugInfo->kind = exit.m_kind;
46 debugInfo->bytecodeOffset = exit.m_codeOrigin.bytecodeIndex;
47
48 m_jit.debugCall(debugOperationPrintSpeculationFailure, debugInfo);
49 }
50
51 // Perform speculation recovery. This only comes into play when an operation
52 // starts mutating state before verifying the speculation it has already made.
53
54 if (recovery) {
55 switch (recovery->type()) {
56 case SpeculativeAdd:
57 m_jit.sub32(recovery->src(), recovery->dest());
58 break;
59
60 case BooleanSpeculationCheck:
61 break;
62
63 default:
64 break;
65 }
66 }
67
68 // Refine some value profile, if appropriate.
69
70 if (!!exit.m_jsValueSource) {
71 if (exit.m_kind == BadCache || exit.m_kind == BadIndexingType) {
72 // If the instruction that this originated from has an array profile, then
73 // refine it. If it doesn't, then do nothing. The latter could happen for
74 // hoisted checks, or checks emitted for operations that didn't have array
75 // profiling - either ops that aren't array accesses at all, or weren't
76 // known to be array acceses in the bytecode. The latter case is a FIXME
77 // while the former case is an outcome of a CheckStructure not knowing why
78 // it was emitted (could be either due to an inline cache of a property
79 // property access, or due to an array profile).
80
81 // Note: We are free to assume that the jsValueSource is already known to
82 // be a cell since both BadCache and BadIndexingType exits occur after
83 // the cell check would have already happened.
84
85 CodeOrigin codeOrigin = exit.m_codeOriginForExitProfile;
86 if (ArrayProfile* arrayProfile = m_jit.baselineCodeBlockFor(codeOrigin)->getArrayProfile(codeOrigin.bytecodeIndex)) {
87 GPRReg usedRegister1;
88 GPRReg usedRegister2;
89 if (exit.m_jsValueSource.isAddress()) {
90 usedRegister1 = exit.m_jsValueSource.base();
91 usedRegister2 = InvalidGPRReg;
92 } else {
93 usedRegister1 = exit.m_jsValueSource.payloadGPR();
94 if (exit.m_jsValueSource.hasKnownTag())
95 usedRegister2 = InvalidGPRReg;
96 else
97 usedRegister2 = exit.m_jsValueSource.tagGPR();
98 }
99
100 GPRReg scratch1;
101 GPRReg scratch2;
102 scratch1 = AssemblyHelpers::selectScratchGPR(usedRegister1, usedRegister2);
103 scratch2 = AssemblyHelpers::selectScratchGPR(usedRegister1, usedRegister2, scratch1);
104
105 m_jit.push(scratch1);
106 m_jit.push(scratch2);
107
108 GPRReg value;
109 if (exit.m_jsValueSource.isAddress()) {
110 value = scratch1;
111 m_jit.loadPtr(AssemblyHelpers::Address(exit.m_jsValueSource.asAddress()), value);
112 } else
113 value = exit.m_jsValueSource.payloadGPR();
114
115 m_jit.loadPtr(AssemblyHelpers::Address(value, JSCell::structureIDOffset()), scratch1);
116 m_jit.storePtr(scratch1, arrayProfile->addressOfLastSeenStructureID());
117 m_jit.load8(AssemblyHelpers::Address(scratch1, Structure::indexingTypeOffset()), scratch1);
118 m_jit.move(AssemblyHelpers::TrustedImm32(1), scratch2);
119 m_jit.lshift32(scratch1, scratch2);
120 m_jit.or32(scratch2, AssemblyHelpers::AbsoluteAddress(arrayProfile->addressOfArrayModes()));
121
122 m_jit.pop(scratch2);
123 m_jit.pop(scratch1);
124 }
125 }
126
127 if (!!exit.m_valueProfile) {
128 EncodedJSValue* bucket = exit.m_valueProfile.getSpecFailBucket(0);
129
130 if (exit.m_jsValueSource.isAddress()) {
131 // Save a register so we can use it.
132 GPRReg scratch = AssemblyHelpers::selectScratchGPR(exit.m_jsValueSource.base());
133
134 m_jit.push(scratch);
135
136 m_jit.load32(exit.m_jsValueSource.asAddress(OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)), scratch);
137 m_jit.store32(scratch, &bitwise_cast<EncodedValueDescriptor*>(bucket)->asBits.tag);
138 m_jit.load32(exit.m_jsValueSource.asAddress(OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)), scratch);
139 m_jit.store32(scratch, &bitwise_cast<EncodedValueDescriptor*>(bucket)->asBits.payload);
140
141 m_jit.pop(scratch);
142 } else if (exit.m_jsValueSource.hasKnownTag()) {
143 m_jit.store32(AssemblyHelpers::TrustedImm32(exit.m_jsValueSource.tag()), &bitwise_cast<EncodedValueDescriptor*>(bucket)->asBits.tag);
144 m_jit.store32(exit.m_jsValueSource.payloadGPR(), &bitwise_cast<EncodedValueDescriptor*>(bucket)->asBits.payload);
145 } else {
146 m_jit.store32(exit.m_jsValueSource.tagGPR(), &bitwise_cast<EncodedValueDescriptor*>(bucket)->asBits.tag);
147 m_jit.store32(exit.m_jsValueSource.payloadGPR(), &bitwise_cast<EncodedValueDescriptor*>(bucket)->asBits.payload);
148 }
149 }
150 }
151
152 // Do a simplified OSR exit. See DFGOSRExitCompiler64.cpp's comment regarding how and wny we
153 // do this simple approach.
154
155 // Save all state from GPRs into the scratch buffer.
156
157 ScratchBuffer* scratchBuffer = m_jit.vm()->scratchBufferForSize(sizeof(EncodedJSValue) * operands.size());
158 EncodedJSValue* scratch = scratchBuffer ? static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer()) : 0;
159
160 for (size_t index = 0; index < operands.size(); ++index) {
161 const ValueRecovery& recovery = operands[index];
162
163 switch (recovery.technique()) {
164 case UnboxedInt32InGPR:
165 case UnboxedBooleanInGPR:
166 case UnboxedCellInGPR:
167 m_jit.store32(
168 recovery.gpr(),
169 &bitwise_cast<EncodedValueDescriptor*>(scratch + index)->asBits.payload);
170 break;
171
172 case InPair:
173 m_jit.store32(
174 recovery.tagGPR(),
175 &bitwise_cast<EncodedValueDescriptor*>(scratch + index)->asBits.tag);
176 m_jit.store32(
177 recovery.payloadGPR(),
178 &bitwise_cast<EncodedValueDescriptor*>(scratch + index)->asBits.payload);
179 break;
180
181 default:
182 break;
183 }
184 }
185
186 // Now all GPRs are free to reuse.
187
188 // Save all state from FPRs into the scratch buffer.
189
190 for (size_t index = 0; index < operands.size(); ++index) {
191 const ValueRecovery& recovery = operands[index];
192
193 switch (recovery.technique()) {
194 case InFPR:
195 m_jit.move(AssemblyHelpers::TrustedImmPtr(scratch + index), GPRInfo::regT0);
196 m_jit.storeDouble(recovery.fpr(), MacroAssembler::Address(GPRInfo::regT0));
197 break;
198
199 default:
200 break;
201 }
202 }
203
204 // Now all FPRs are free to reuse.
205
206 // Save all state from the stack into the scratch buffer. For simplicity we
207 // do this even for state that's already in the right place on the stack.
208 // It makes things simpler later.
209
210 for (size_t index = 0; index < operands.size(); ++index) {
211 const ValueRecovery& recovery = operands[index];
212
213 switch (recovery.technique()) {
214 case DisplacedInJSStack:
215 case Int32DisplacedInJSStack:
216 case DoubleDisplacedInJSStack:
217 case CellDisplacedInJSStack:
218 case BooleanDisplacedInJSStack:
219 m_jit.load32(
220 AssemblyHelpers::tagFor(recovery.virtualRegister()),
221 GPRInfo::regT0);
222 m_jit.load32(
223 AssemblyHelpers::payloadFor(recovery.virtualRegister()),
224 GPRInfo::regT1);
225 m_jit.store32(
226 GPRInfo::regT0,
227 &bitwise_cast<EncodedValueDescriptor*>(scratch + index)->asBits.tag);
228 m_jit.store32(
229 GPRInfo::regT1,
230 &bitwise_cast<EncodedValueDescriptor*>(scratch + index)->asBits.payload);
231 break;
232
233 default:
234 break;
235 }
236 }
237
238 // Need to ensure that the stack pointer accounts for the worst-case stack usage at exit. This
239 // could toast some stack that the DFG used. We need to do it before storing to stack offsets
240 // used by baseline.
241 m_jit.addPtr(
242 CCallHelpers::TrustedImm32(
243 -m_jit.codeBlock()->jitCode()->dfgCommon()->requiredRegisterCountForExit * sizeof(Register)),
244 CCallHelpers::framePointerRegister, CCallHelpers::stackPointerRegister);
245
246 // Do all data format conversions and store the results into the stack.
247
248 for (size_t index = 0; index < operands.size(); ++index) {
249 const ValueRecovery& recovery = operands[index];
250 int operand = operands.operandForIndex(index);
251
252 switch (recovery.technique()) {
253 case InPair:
254 case DisplacedInJSStack:
255 m_jit.load32(
256 &bitwise_cast<EncodedValueDescriptor*>(scratch + index)->asBits.tag,
257 GPRInfo::regT0);
258 m_jit.load32(
259 &bitwise_cast<EncodedValueDescriptor*>(scratch + index)->asBits.payload,
260 GPRInfo::regT1);
261 m_jit.store32(
262 GPRInfo::regT0,
263 AssemblyHelpers::tagFor(operand));
264 m_jit.store32(
265 GPRInfo::regT1,
266 AssemblyHelpers::payloadFor(operand));
267 break;
268
269 case InFPR:
270 case DoubleDisplacedInJSStack:
271 m_jit.move(AssemblyHelpers::TrustedImmPtr(scratch + index), GPRInfo::regT0);
272 m_jit.loadDouble(MacroAssembler::Address(GPRInfo::regT0), FPRInfo::fpRegT0);
273 m_jit.purifyNaN(FPRInfo::fpRegT0);
274 m_jit.storeDouble(FPRInfo::fpRegT0, AssemblyHelpers::addressFor(operand));
275 break;
276
277 case UnboxedInt32InGPR:
278 case Int32DisplacedInJSStack:
279 m_jit.load32(
280 &bitwise_cast<EncodedValueDescriptor*>(scratch + index)->asBits.payload,
281 GPRInfo::regT0);
282 m_jit.store32(
283 AssemblyHelpers::TrustedImm32(JSValue::Int32Tag),
284 AssemblyHelpers::tagFor(operand));
285 m_jit.store32(
286 GPRInfo::regT0,
287 AssemblyHelpers::payloadFor(operand));
288 break;
289
290 case UnboxedCellInGPR:
291 case CellDisplacedInJSStack:
292 m_jit.load32(
293 &bitwise_cast<EncodedValueDescriptor*>(scratch + index)->asBits.payload,
294 GPRInfo::regT0);
295 m_jit.store32(
296 AssemblyHelpers::TrustedImm32(JSValue::CellTag),
297 AssemblyHelpers::tagFor(operand));
298 m_jit.store32(
299 GPRInfo::regT0,
300 AssemblyHelpers::payloadFor(operand));
301 break;
302
303 case UnboxedBooleanInGPR:
304 case BooleanDisplacedInJSStack:
305 m_jit.load32(
306 &bitwise_cast<EncodedValueDescriptor*>(scratch + index)->asBits.payload,
307 GPRInfo::regT0);
308 m_jit.store32(
309 AssemblyHelpers::TrustedImm32(JSValue::BooleanTag),
310 AssemblyHelpers::tagFor(operand));
311 m_jit.store32(
312 GPRInfo::regT0,
313 AssemblyHelpers::payloadFor(operand));
314 break;
315
316 case Constant:
317 m_jit.store32(
318 AssemblyHelpers::TrustedImm32(recovery.constant().tag()),
319 AssemblyHelpers::tagFor(operand));
320 m_jit.store32(
321 AssemblyHelpers::TrustedImm32(recovery.constant().payload()),
322 AssemblyHelpers::payloadFor(operand));
323 break;
324
325 case DirectArgumentsThatWereNotCreated:
326 case ClonedArgumentsThatWereNotCreated:
327 // Don't do this, yet.
328 break;
329
330 default:
331 break;
332 }
333 }
334
335 // Now that things on the stack are recovered, do the arguments recovery. We assume that arguments
336 // recoveries don't recursively refer to each other. But, we don't try to assume that they only
337 // refer to certain ranges of locals. Hence why we need to do this here, once the stack is sensible.
338 // Note that we also roughly assume that the arguments might still be materialized outside of its
339 // inline call frame scope - but for now the DFG wouldn't do that.
340
341 emitRestoreArguments(operands);
342
343 // Adjust the old JIT's execute counter. Since we are exiting OSR, we know
344 // that all new calls into this code will go to the new JIT, so the execute
345 // counter only affects call frames that performed OSR exit and call frames
346 // that were still executing the old JIT at the time of another call frame's
347 // OSR exit. We want to ensure that the following is true:
348 //
349 // (a) Code the performs an OSR exit gets a chance to reenter optimized
350 // code eventually, since optimized code is faster. But we don't
351 // want to do such reentery too aggressively (see (c) below).
352 //
353 // (b) If there is code on the call stack that is still running the old
354 // JIT's code and has never OSR'd, then it should get a chance to
355 // perform OSR entry despite the fact that we've exited.
356 //
357 // (c) Code the performs an OSR exit should not immediately retry OSR
358 // entry, since both forms of OSR are expensive. OSR entry is
359 // particularly expensive.
360 //
361 // (d) Frequent OSR failures, even those that do not result in the code
362 // running in a hot loop, result in recompilation getting triggered.
363 //
364 // To ensure (c), we'd like to set the execute counter to
365 // counterValueForOptimizeAfterWarmUp(). This seems like it would endanger
366 // (a) and (b), since then every OSR exit would delay the opportunity for
367 // every call frame to perform OSR entry. Essentially, if OSR exit happens
368 // frequently and the function has few loops, then the counter will never
369 // become non-negative and OSR entry will never be triggered. OSR entry
370 // will only happen if a loop gets hot in the old JIT, which does a pretty
371 // good job of ensuring (a) and (b). But that doesn't take care of (d),
372 // since each speculation failure would reset the execute counter.
373 // So we check here if the number of speculation failures is significantly
374 // larger than the number of successes (we want 90% success rate), and if
375 // there have been a large enough number of failures. If so, we set the
376 // counter to 0; otherwise we set the counter to
377 // counterValueForOptimizeAfterWarmUp().
378
379 handleExitCounts(m_jit, exit);
380
381 // Reify inlined call frames.
382
383 reifyInlinedCallFrames(m_jit, exit);
384
385 // And finish.
386 adjustAndJumpToTarget(m_jit, exit);
387 }
388
389 } } // namespace JSC::DFG
390
391 #endif // ENABLE(DFG_JIT) && USE(JSVALUE32_64)