2 * Copyright (C) 2011, 2013, 2014 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include "DFGOSRExitCompiler.h"
29 #if ENABLE(DFG_JIT) && USE(JSVALUE32_64)
31 #include "DFGOperations.h"
32 #include "DFGOSRExitCompilerCommon.h"
33 #include "DFGSpeculativeJIT.h"
34 #include "JSCInlines.h"
35 #include <wtf/DataLog.h>
37 namespace JSC
{ namespace DFG
{
39 void OSRExitCompiler::compileExit(const OSRExit
& exit
, const Operands
<ValueRecovery
>& operands
, SpeculationRecovery
* recovery
)
41 // 1) Pro-forma stuff.
42 if (Options::printEachOSRExit()) {
43 SpeculationFailureDebugInfo
* debugInfo
= new SpeculationFailureDebugInfo
;
44 debugInfo
->codeBlock
= m_jit
.codeBlock();
45 debugInfo
->kind
= exit
.m_kind
;
46 debugInfo
->bytecodeOffset
= exit
.m_codeOrigin
.bytecodeIndex
;
48 m_jit
.debugCall(debugOperationPrintSpeculationFailure
, debugInfo
);
51 // Need to ensure that the stack pointer accounts for the worst-case stack usage at exit.
53 CCallHelpers::TrustedImm32(
54 -m_jit
.codeBlock()->jitCode()->dfgCommon()->requiredRegisterCountForExit
* sizeof(Register
)),
55 CCallHelpers::framePointerRegister
, CCallHelpers::stackPointerRegister
);
57 // 2) Perform speculation recovery. This only comes into play when an operation
58 // starts mutating state before verifying the speculation it has already made.
61 switch (recovery
->type()) {
63 m_jit
.sub32(recovery
->src(), recovery
->dest());
66 case BooleanSpeculationCheck
:
74 // 3) Refine some value profile, if appropriate.
76 if (!!exit
.m_jsValueSource
) {
77 if (exit
.m_kind
== BadCache
|| exit
.m_kind
== BadIndexingType
) {
78 // If the instruction that this originated from has an array profile, then
79 // refine it. If it doesn't, then do nothing. The latter could happen for
80 // hoisted checks, or checks emitted for operations that didn't have array
81 // profiling - either ops that aren't array accesses at all, or weren't
82 // known to be array acceses in the bytecode. The latter case is a FIXME
83 // while the former case is an outcome of a CheckStructure not knowing why
84 // it was emitted (could be either due to an inline cache of a property
85 // property access, or due to an array profile).
87 // Note: We are free to assume that the jsValueSource is already known to
88 // be a cell since both BadCache and BadIndexingType exits occur after
89 // the cell check would have already happened.
91 CodeOrigin codeOrigin
= exit
.m_codeOriginForExitProfile
;
92 if (ArrayProfile
* arrayProfile
= m_jit
.baselineCodeBlockFor(codeOrigin
)->getArrayProfile(codeOrigin
.bytecodeIndex
)) {
95 if (exit
.m_jsValueSource
.isAddress()) {
96 usedRegister1
= exit
.m_jsValueSource
.base();
97 usedRegister2
= InvalidGPRReg
;
99 usedRegister1
= exit
.m_jsValueSource
.payloadGPR();
100 if (exit
.m_jsValueSource
.hasKnownTag())
101 usedRegister2
= InvalidGPRReg
;
103 usedRegister2
= exit
.m_jsValueSource
.tagGPR();
108 scratch1
= AssemblyHelpers::selectScratchGPR(usedRegister1
, usedRegister2
);
109 scratch2
= AssemblyHelpers::selectScratchGPR(usedRegister1
, usedRegister2
, scratch1
);
112 m_jit
.pushToSave(scratch1
);
113 m_jit
.pushToSave(scratch2
);
115 m_jit
.push(scratch1
);
116 m_jit
.push(scratch2
);
120 if (exit
.m_jsValueSource
.isAddress()) {
122 m_jit
.loadPtr(AssemblyHelpers::Address(exit
.m_jsValueSource
.asAddress()), value
);
124 value
= exit
.m_jsValueSource
.payloadGPR();
126 m_jit
.loadPtr(AssemblyHelpers::Address(value
, JSCell::structureIDOffset()), scratch1
);
127 m_jit
.storePtr(scratch1
, arrayProfile
->addressOfLastSeenStructureID());
128 m_jit
.load8(AssemblyHelpers::Address(scratch1
, Structure::indexingTypeOffset()), scratch1
);
129 m_jit
.move(AssemblyHelpers::TrustedImm32(1), scratch2
);
130 m_jit
.lshift32(scratch1
, scratch2
);
131 m_jit
.or32(scratch2
, AssemblyHelpers::AbsoluteAddress(arrayProfile
->addressOfArrayModes()));
134 m_jit
.popToRestore(scratch2
);
135 m_jit
.popToRestore(scratch1
);
143 if (!!exit
.m_valueProfile
) {
144 EncodedJSValue
* bucket
= exit
.m_valueProfile
.getSpecFailBucket(0);
146 if (exit
.m_jsValueSource
.isAddress()) {
147 // Save a register so we can use it.
148 GPRReg scratch
= AssemblyHelpers::selectScratchGPR(exit
.m_jsValueSource
.base());
151 m_jit
.pushToSave(scratch
);
156 m_jit
.load32(exit
.m_jsValueSource
.asAddress(OBJECT_OFFSETOF(EncodedValueDescriptor
, asBits
.tag
)), scratch
);
157 m_jit
.store32(scratch
, &bitwise_cast
<EncodedValueDescriptor
*>(bucket
)->asBits
.tag
);
158 m_jit
.load32(exit
.m_jsValueSource
.asAddress(OBJECT_OFFSETOF(EncodedValueDescriptor
, asBits
.payload
)), scratch
);
159 m_jit
.store32(scratch
, &bitwise_cast
<EncodedValueDescriptor
*>(bucket
)->asBits
.payload
);
162 m_jit
.popToRestore(scratch
);
166 } else if (exit
.m_jsValueSource
.hasKnownTag()) {
167 m_jit
.store32(AssemblyHelpers::TrustedImm32(exit
.m_jsValueSource
.tag()), &bitwise_cast
<EncodedValueDescriptor
*>(bucket
)->asBits
.tag
);
168 m_jit
.store32(exit
.m_jsValueSource
.payloadGPR(), &bitwise_cast
<EncodedValueDescriptor
*>(bucket
)->asBits
.payload
);
170 m_jit
.store32(exit
.m_jsValueSource
.tagGPR(), &bitwise_cast
<EncodedValueDescriptor
*>(bucket
)->asBits
.tag
);
171 m_jit
.store32(exit
.m_jsValueSource
.payloadGPR(), &bitwise_cast
<EncodedValueDescriptor
*>(bucket
)->asBits
.payload
);
176 // Do a simplified OSR exit. See DFGOSRExitCompiler64.cpp's comment regarding how and wny we
177 // do this simple approach.
179 // 4) Save all state from GPRs into the scratch buffer.
181 ScratchBuffer
* scratchBuffer
= m_jit
.vm()->scratchBufferForSize(sizeof(EncodedJSValue
) * operands
.size());
182 EncodedJSValue
* scratch
= scratchBuffer
? static_cast<EncodedJSValue
*>(scratchBuffer
->dataBuffer()) : 0;
184 for (size_t index
= 0; index
< operands
.size(); ++index
) {
185 const ValueRecovery
& recovery
= operands
[index
];
187 switch (recovery
.technique()) {
188 case UnboxedInt32InGPR
:
189 case UnboxedBooleanInGPR
:
190 case UnboxedCellInGPR
:
193 &bitwise_cast
<EncodedValueDescriptor
*>(scratch
+ index
)->asBits
.payload
);
199 &bitwise_cast
<EncodedValueDescriptor
*>(scratch
+ index
)->asBits
.tag
);
201 recovery
.payloadGPR(),
202 &bitwise_cast
<EncodedValueDescriptor
*>(scratch
+ index
)->asBits
.payload
);
210 // Now all GPRs are free to reuse.
212 // 5) Save all state from FPRs into the scratch buffer.
214 for (size_t index
= 0; index
< operands
.size(); ++index
) {
215 const ValueRecovery
& recovery
= operands
[index
];
217 switch (recovery
.technique()) {
219 m_jit
.move(AssemblyHelpers::TrustedImmPtr(scratch
+ index
), GPRInfo::regT0
);
220 m_jit
.storeDouble(recovery
.fpr(), MacroAssembler::Address(GPRInfo::regT0
));
228 // Now all FPRs are free to reuse.
230 // 6) Save all state from the stack into the scratch buffer. For simplicity we
231 // do this even for state that's already in the right place on the stack.
232 // It makes things simpler later.
234 for (size_t index
= 0; index
< operands
.size(); ++index
) {
235 const ValueRecovery
& recovery
= operands
[index
];
237 switch (recovery
.technique()) {
238 case DisplacedInJSStack
:
239 case Int32DisplacedInJSStack
:
240 case DoubleDisplacedInJSStack
:
241 case CellDisplacedInJSStack
:
242 case BooleanDisplacedInJSStack
:
244 AssemblyHelpers::tagFor(recovery
.virtualRegister()),
247 AssemblyHelpers::payloadFor(recovery
.virtualRegister()),
251 &bitwise_cast
<EncodedValueDescriptor
*>(scratch
+ index
)->asBits
.tag
);
254 &bitwise_cast
<EncodedValueDescriptor
*>(scratch
+ index
)->asBits
.payload
);
262 // 7) Do all data format conversions and store the results into the stack.
264 bool haveArguments
= false;
266 for (size_t index
= 0; index
< operands
.size(); ++index
) {
267 const ValueRecovery
& recovery
= operands
[index
];
268 int operand
= operands
.operandForIndex(index
);
270 switch (recovery
.technique()) {
272 case DisplacedInJSStack
:
274 &bitwise_cast
<EncodedValueDescriptor
*>(scratch
+ index
)->asBits
.tag
,
277 &bitwise_cast
<EncodedValueDescriptor
*>(scratch
+ index
)->asBits
.payload
,
281 AssemblyHelpers::tagFor(operand
));
284 AssemblyHelpers::payloadFor(operand
));
288 case DoubleDisplacedInJSStack
:
289 m_jit
.move(AssemblyHelpers::TrustedImmPtr(scratch
+ index
), GPRInfo::regT0
);
290 m_jit
.loadDouble(MacroAssembler::Address(GPRInfo::regT0
), FPRInfo::fpRegT0
);
291 m_jit
.purifyNaN(FPRInfo::fpRegT0
);
292 m_jit
.storeDouble(FPRInfo::fpRegT0
, AssemblyHelpers::addressFor(operand
));
295 case UnboxedInt32InGPR
:
296 case Int32DisplacedInJSStack
:
298 &bitwise_cast
<EncodedValueDescriptor
*>(scratch
+ index
)->asBits
.payload
,
301 AssemblyHelpers::TrustedImm32(JSValue::Int32Tag
),
302 AssemblyHelpers::tagFor(operand
));
305 AssemblyHelpers::payloadFor(operand
));
308 case UnboxedCellInGPR
:
309 case CellDisplacedInJSStack
:
311 &bitwise_cast
<EncodedValueDescriptor
*>(scratch
+ index
)->asBits
.payload
,
314 AssemblyHelpers::TrustedImm32(JSValue::CellTag
),
315 AssemblyHelpers::tagFor(operand
));
318 AssemblyHelpers::payloadFor(operand
));
321 case UnboxedBooleanInGPR
:
322 case BooleanDisplacedInJSStack
:
324 &bitwise_cast
<EncodedValueDescriptor
*>(scratch
+ index
)->asBits
.payload
,
327 AssemblyHelpers::TrustedImm32(JSValue::BooleanTag
),
328 AssemblyHelpers::tagFor(operand
));
331 AssemblyHelpers::payloadFor(operand
));
336 AssemblyHelpers::TrustedImm32(recovery
.constant().tag()),
337 AssemblyHelpers::tagFor(operand
));
339 AssemblyHelpers::TrustedImm32(recovery
.constant().payload()),
340 AssemblyHelpers::payloadFor(operand
));
343 case ArgumentsThatWereNotCreated
:
344 haveArguments
= true;
346 AssemblyHelpers::TrustedImm32(JSValue().tag()),
347 AssemblyHelpers::tagFor(operand
));
349 AssemblyHelpers::TrustedImm32(JSValue().payload()),
350 AssemblyHelpers::payloadFor(operand
));
358 // 8) Adjust the old JIT's execute counter. Since we are exiting OSR, we know
359 // that all new calls into this code will go to the new JIT, so the execute
360 // counter only affects call frames that performed OSR exit and call frames
361 // that were still executing the old JIT at the time of another call frame's
362 // OSR exit. We want to ensure that the following is true:
364 // (a) Code the performs an OSR exit gets a chance to reenter optimized
365 // code eventually, since optimized code is faster. But we don't
366 // want to do such reentery too aggressively (see (c) below).
368 // (b) If there is code on the call stack that is still running the old
369 // JIT's code and has never OSR'd, then it should get a chance to
370 // perform OSR entry despite the fact that we've exited.
372 // (c) Code the performs an OSR exit should not immediately retry OSR
373 // entry, since both forms of OSR are expensive. OSR entry is
374 // particularly expensive.
376 // (d) Frequent OSR failures, even those that do not result in the code
377 // running in a hot loop, result in recompilation getting triggered.
379 // To ensure (c), we'd like to set the execute counter to
380 // counterValueForOptimizeAfterWarmUp(). This seems like it would endanger
381 // (a) and (b), since then every OSR exit would delay the opportunity for
382 // every call frame to perform OSR entry. Essentially, if OSR exit happens
383 // frequently and the function has few loops, then the counter will never
384 // become non-negative and OSR entry will never be triggered. OSR entry
385 // will only happen if a loop gets hot in the old JIT, which does a pretty
386 // good job of ensuring (a) and (b). But that doesn't take care of (d),
387 // since each speculation failure would reset the execute counter.
388 // So we check here if the number of speculation failures is significantly
389 // larger than the number of successes (we want 90% success rate), and if
390 // there have been a large enough number of failures. If so, we set the
391 // counter to 0; otherwise we set the counter to
392 // counterValueForOptimizeAfterWarmUp().
394 handleExitCounts(m_jit
, exit
);
396 // 9) Reify inlined call frames.
398 reifyInlinedCallFrames(m_jit
, exit
);
400 // 10) Create arguments if necessary and place them into the appropriate aliased
404 ArgumentsRecoveryGenerator argumentsRecovery
;
406 for (size_t index
= 0; index
< operands
.size(); ++index
) {
407 const ValueRecovery
& recovery
= operands
[index
];
408 if (recovery
.technique() != ArgumentsThatWereNotCreated
)
410 argumentsRecovery
.generateFor(
411 operands
.operandForIndex(index
), exit
.m_codeOrigin
, m_jit
);
416 adjustAndJumpToTarget(m_jit
, exit
);
419 } } // namespace JSC::DFG
421 #endif // ENABLE(DFG_JIT) && USE(JSVALUE32_64)