2 * Copyright (C) 2011, 2013-2015 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include "DFGOSRExitCompiler.h"
29 #if ENABLE(DFG_JIT) && USE(JSVALUE32_64)
31 #include "DFGOperations.h"
32 #include "DFGOSRExitCompilerCommon.h"
33 #include "DFGSpeculativeJIT.h"
34 #include "JSCInlines.h"
35 #include <wtf/DataLog.h>
37 namespace JSC
{ namespace DFG
{
39 void OSRExitCompiler::compileExit(const OSRExit
& exit
, const Operands
<ValueRecovery
>& operands
, SpeculationRecovery
* recovery
)
42 if (Options::printEachOSRExit()) {
43 SpeculationFailureDebugInfo
* debugInfo
= new SpeculationFailureDebugInfo
;
44 debugInfo
->codeBlock
= m_jit
.codeBlock();
45 debugInfo
->kind
= exit
.m_kind
;
46 debugInfo
->bytecodeOffset
= exit
.m_codeOrigin
.bytecodeIndex
;
48 m_jit
.debugCall(debugOperationPrintSpeculationFailure
, debugInfo
);
51 // Perform speculation recovery. This only comes into play when an operation
52 // starts mutating state before verifying the speculation it has already made.
55 switch (recovery
->type()) {
57 m_jit
.sub32(recovery
->src(), recovery
->dest());
60 case BooleanSpeculationCheck
:
68 // Refine some value profile, if appropriate.
70 if (!!exit
.m_jsValueSource
) {
71 if (exit
.m_kind
== BadCache
|| exit
.m_kind
== BadIndexingType
) {
72 // If the instruction that this originated from has an array profile, then
73 // refine it. If it doesn't, then do nothing. The latter could happen for
74 // hoisted checks, or checks emitted for operations that didn't have array
75 // profiling - either ops that aren't array accesses at all, or weren't
76 // known to be array acceses in the bytecode. The latter case is a FIXME
77 // while the former case is an outcome of a CheckStructure not knowing why
78 // it was emitted (could be either due to an inline cache of a property
79 // property access, or due to an array profile).
81 // Note: We are free to assume that the jsValueSource is already known to
82 // be a cell since both BadCache and BadIndexingType exits occur after
83 // the cell check would have already happened.
85 CodeOrigin codeOrigin
= exit
.m_codeOriginForExitProfile
;
86 if (ArrayProfile
* arrayProfile
= m_jit
.baselineCodeBlockFor(codeOrigin
)->getArrayProfile(codeOrigin
.bytecodeIndex
)) {
89 if (exit
.m_jsValueSource
.isAddress()) {
90 usedRegister1
= exit
.m_jsValueSource
.base();
91 usedRegister2
= InvalidGPRReg
;
93 usedRegister1
= exit
.m_jsValueSource
.payloadGPR();
94 if (exit
.m_jsValueSource
.hasKnownTag())
95 usedRegister2
= InvalidGPRReg
;
97 usedRegister2
= exit
.m_jsValueSource
.tagGPR();
102 scratch1
= AssemblyHelpers::selectScratchGPR(usedRegister1
, usedRegister2
);
103 scratch2
= AssemblyHelpers::selectScratchGPR(usedRegister1
, usedRegister2
, scratch1
);
105 m_jit
.push(scratch1
);
106 m_jit
.push(scratch2
);
109 if (exit
.m_jsValueSource
.isAddress()) {
111 m_jit
.loadPtr(AssemblyHelpers::Address(exit
.m_jsValueSource
.asAddress()), value
);
113 value
= exit
.m_jsValueSource
.payloadGPR();
115 m_jit
.loadPtr(AssemblyHelpers::Address(value
, JSCell::structureIDOffset()), scratch1
);
116 m_jit
.storePtr(scratch1
, arrayProfile
->addressOfLastSeenStructureID());
117 m_jit
.load8(AssemblyHelpers::Address(scratch1
, Structure::indexingTypeOffset()), scratch1
);
118 m_jit
.move(AssemblyHelpers::TrustedImm32(1), scratch2
);
119 m_jit
.lshift32(scratch1
, scratch2
);
120 m_jit
.or32(scratch2
, AssemblyHelpers::AbsoluteAddress(arrayProfile
->addressOfArrayModes()));
127 if (!!exit
.m_valueProfile
) {
128 EncodedJSValue
* bucket
= exit
.m_valueProfile
.getSpecFailBucket(0);
130 if (exit
.m_jsValueSource
.isAddress()) {
131 // Save a register so we can use it.
132 GPRReg scratch
= AssemblyHelpers::selectScratchGPR(exit
.m_jsValueSource
.base());
136 m_jit
.load32(exit
.m_jsValueSource
.asAddress(OBJECT_OFFSETOF(EncodedValueDescriptor
, asBits
.tag
)), scratch
);
137 m_jit
.store32(scratch
, &bitwise_cast
<EncodedValueDescriptor
*>(bucket
)->asBits
.tag
);
138 m_jit
.load32(exit
.m_jsValueSource
.asAddress(OBJECT_OFFSETOF(EncodedValueDescriptor
, asBits
.payload
)), scratch
);
139 m_jit
.store32(scratch
, &bitwise_cast
<EncodedValueDescriptor
*>(bucket
)->asBits
.payload
);
142 } else if (exit
.m_jsValueSource
.hasKnownTag()) {
143 m_jit
.store32(AssemblyHelpers::TrustedImm32(exit
.m_jsValueSource
.tag()), &bitwise_cast
<EncodedValueDescriptor
*>(bucket
)->asBits
.tag
);
144 m_jit
.store32(exit
.m_jsValueSource
.payloadGPR(), &bitwise_cast
<EncodedValueDescriptor
*>(bucket
)->asBits
.payload
);
146 m_jit
.store32(exit
.m_jsValueSource
.tagGPR(), &bitwise_cast
<EncodedValueDescriptor
*>(bucket
)->asBits
.tag
);
147 m_jit
.store32(exit
.m_jsValueSource
.payloadGPR(), &bitwise_cast
<EncodedValueDescriptor
*>(bucket
)->asBits
.payload
);
152 // Do a simplified OSR exit. See DFGOSRExitCompiler64.cpp's comment regarding how and wny we
153 // do this simple approach.
155 // Save all state from GPRs into the scratch buffer.
157 ScratchBuffer
* scratchBuffer
= m_jit
.vm()->scratchBufferForSize(sizeof(EncodedJSValue
) * operands
.size());
158 EncodedJSValue
* scratch
= scratchBuffer
? static_cast<EncodedJSValue
*>(scratchBuffer
->dataBuffer()) : 0;
160 for (size_t index
= 0; index
< operands
.size(); ++index
) {
161 const ValueRecovery
& recovery
= operands
[index
];
163 switch (recovery
.technique()) {
164 case UnboxedInt32InGPR
:
165 case UnboxedBooleanInGPR
:
166 case UnboxedCellInGPR
:
169 &bitwise_cast
<EncodedValueDescriptor
*>(scratch
+ index
)->asBits
.payload
);
175 &bitwise_cast
<EncodedValueDescriptor
*>(scratch
+ index
)->asBits
.tag
);
177 recovery
.payloadGPR(),
178 &bitwise_cast
<EncodedValueDescriptor
*>(scratch
+ index
)->asBits
.payload
);
186 // Now all GPRs are free to reuse.
188 // Save all state from FPRs into the scratch buffer.
190 for (size_t index
= 0; index
< operands
.size(); ++index
) {
191 const ValueRecovery
& recovery
= operands
[index
];
193 switch (recovery
.technique()) {
195 m_jit
.move(AssemblyHelpers::TrustedImmPtr(scratch
+ index
), GPRInfo::regT0
);
196 m_jit
.storeDouble(recovery
.fpr(), MacroAssembler::Address(GPRInfo::regT0
));
204 // Now all FPRs are free to reuse.
206 // Save all state from the stack into the scratch buffer. For simplicity we
207 // do this even for state that's already in the right place on the stack.
208 // It makes things simpler later.
210 for (size_t index
= 0; index
< operands
.size(); ++index
) {
211 const ValueRecovery
& recovery
= operands
[index
];
213 switch (recovery
.technique()) {
214 case DisplacedInJSStack
:
215 case Int32DisplacedInJSStack
:
216 case DoubleDisplacedInJSStack
:
217 case CellDisplacedInJSStack
:
218 case BooleanDisplacedInJSStack
:
220 AssemblyHelpers::tagFor(recovery
.virtualRegister()),
223 AssemblyHelpers::payloadFor(recovery
.virtualRegister()),
227 &bitwise_cast
<EncodedValueDescriptor
*>(scratch
+ index
)->asBits
.tag
);
230 &bitwise_cast
<EncodedValueDescriptor
*>(scratch
+ index
)->asBits
.payload
);
238 // Need to ensure that the stack pointer accounts for the worst-case stack usage at exit. This
239 // could toast some stack that the DFG used. We need to do it before storing to stack offsets
242 CCallHelpers::TrustedImm32(
243 -m_jit
.codeBlock()->jitCode()->dfgCommon()->requiredRegisterCountForExit
* sizeof(Register
)),
244 CCallHelpers::framePointerRegister
, CCallHelpers::stackPointerRegister
);
246 // Do all data format conversions and store the results into the stack.
248 for (size_t index
= 0; index
< operands
.size(); ++index
) {
249 const ValueRecovery
& recovery
= operands
[index
];
250 int operand
= operands
.operandForIndex(index
);
252 switch (recovery
.technique()) {
254 case DisplacedInJSStack
:
256 &bitwise_cast
<EncodedValueDescriptor
*>(scratch
+ index
)->asBits
.tag
,
259 &bitwise_cast
<EncodedValueDescriptor
*>(scratch
+ index
)->asBits
.payload
,
263 AssemblyHelpers::tagFor(operand
));
266 AssemblyHelpers::payloadFor(operand
));
270 case DoubleDisplacedInJSStack
:
271 m_jit
.move(AssemblyHelpers::TrustedImmPtr(scratch
+ index
), GPRInfo::regT0
);
272 m_jit
.loadDouble(MacroAssembler::Address(GPRInfo::regT0
), FPRInfo::fpRegT0
);
273 m_jit
.purifyNaN(FPRInfo::fpRegT0
);
274 m_jit
.storeDouble(FPRInfo::fpRegT0
, AssemblyHelpers::addressFor(operand
));
277 case UnboxedInt32InGPR
:
278 case Int32DisplacedInJSStack
:
280 &bitwise_cast
<EncodedValueDescriptor
*>(scratch
+ index
)->asBits
.payload
,
283 AssemblyHelpers::TrustedImm32(JSValue::Int32Tag
),
284 AssemblyHelpers::tagFor(operand
));
287 AssemblyHelpers::payloadFor(operand
));
290 case UnboxedCellInGPR
:
291 case CellDisplacedInJSStack
:
293 &bitwise_cast
<EncodedValueDescriptor
*>(scratch
+ index
)->asBits
.payload
,
296 AssemblyHelpers::TrustedImm32(JSValue::CellTag
),
297 AssemblyHelpers::tagFor(operand
));
300 AssemblyHelpers::payloadFor(operand
));
303 case UnboxedBooleanInGPR
:
304 case BooleanDisplacedInJSStack
:
306 &bitwise_cast
<EncodedValueDescriptor
*>(scratch
+ index
)->asBits
.payload
,
309 AssemblyHelpers::TrustedImm32(JSValue::BooleanTag
),
310 AssemblyHelpers::tagFor(operand
));
313 AssemblyHelpers::payloadFor(operand
));
318 AssemblyHelpers::TrustedImm32(recovery
.constant().tag()),
319 AssemblyHelpers::tagFor(operand
));
321 AssemblyHelpers::TrustedImm32(recovery
.constant().payload()),
322 AssemblyHelpers::payloadFor(operand
));
325 case DirectArgumentsThatWereNotCreated
:
326 case ClonedArgumentsThatWereNotCreated
:
327 // Don't do this, yet.
335 // Now that things on the stack are recovered, do the arguments recovery. We assume that arguments
336 // recoveries don't recursively refer to each other. But, we don't try to assume that they only
337 // refer to certain ranges of locals. Hence why we need to do this here, once the stack is sensible.
338 // Note that we also roughly assume that the arguments might still be materialized outside of its
339 // inline call frame scope - but for now the DFG wouldn't do that.
341 emitRestoreArguments(operands
);
343 // Adjust the old JIT's execute counter. Since we are exiting OSR, we know
344 // that all new calls into this code will go to the new JIT, so the execute
345 // counter only affects call frames that performed OSR exit and call frames
346 // that were still executing the old JIT at the time of another call frame's
347 // OSR exit. We want to ensure that the following is true:
349 // (a) Code the performs an OSR exit gets a chance to reenter optimized
350 // code eventually, since optimized code is faster. But we don't
351 // want to do such reentery too aggressively (see (c) below).
353 // (b) If there is code on the call stack that is still running the old
354 // JIT's code and has never OSR'd, then it should get a chance to
355 // perform OSR entry despite the fact that we've exited.
357 // (c) Code the performs an OSR exit should not immediately retry OSR
358 // entry, since both forms of OSR are expensive. OSR entry is
359 // particularly expensive.
361 // (d) Frequent OSR failures, even those that do not result in the code
362 // running in a hot loop, result in recompilation getting triggered.
364 // To ensure (c), we'd like to set the execute counter to
365 // counterValueForOptimizeAfterWarmUp(). This seems like it would endanger
366 // (a) and (b), since then every OSR exit would delay the opportunity for
367 // every call frame to perform OSR entry. Essentially, if OSR exit happens
368 // frequently and the function has few loops, then the counter will never
369 // become non-negative and OSR entry will never be triggered. OSR entry
370 // will only happen if a loop gets hot in the old JIT, which does a pretty
371 // good job of ensuring (a) and (b). But that doesn't take care of (d),
372 // since each speculation failure would reset the execute counter.
373 // So we check here if the number of speculation failures is significantly
374 // larger than the number of successes (we want 90% success rate), and if
375 // there have been a large enough number of failures. If so, we set the
376 // counter to 0; otherwise we set the counter to
377 // counterValueForOptimizeAfterWarmUp().
379 handleExitCounts(m_jit
, exit
);
381 // Reify inlined call frames.
383 reifyInlinedCallFrames(m_jit
, exit
);
386 adjustAndJumpToTarget(m_jit
, exit
);
389 } } // namespace JSC::DFG
391 #endif // ENABLE(DFG_JIT) && USE(JSVALUE32_64)