2 * Copyright (C) 2011 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include "DFGOSRExitCompiler.h"
29 #if ENABLE(DFG_JIT) && USE(JSVALUE32_64)
31 #include "DFGOperations.h"
33 namespace JSC
{ namespace DFG
{
35 void OSRExitCompiler::compileExit(const OSRExit
& exit
, SpeculationRecovery
* recovery
)
37 // 1) Pro-forma stuff.
38 #if DFG_ENABLE(DEBUG_VERBOSE)
39 dataLog("OSR exit for Node @%d (", (int)exit
.m_nodeIndex
);
40 for (CodeOrigin codeOrigin
= exit
.m_codeOrigin
; ; codeOrigin
= codeOrigin
.inlineCallFrame
->caller
) {
41 dataLog("bc#%u", codeOrigin
.bytecodeIndex
);
42 if (!codeOrigin
.inlineCallFrame
)
44 dataLog(" -> %p ", codeOrigin
.inlineCallFrame
->executable
.get());
46 dataLog(") at JIT offset 0x%x ", m_jit
.debugOffset());
47 exit
.dump(WTF::dataFile());
49 #if DFG_ENABLE(VERBOSE_SPECULATION_FAILURE)
50 SpeculationFailureDebugInfo
* debugInfo
= new SpeculationFailureDebugInfo
;
51 debugInfo
->codeBlock
= m_jit
.codeBlock();
52 debugInfo
->nodeIndex
= exit
.m_nodeIndex
;
54 m_jit
.debugCall(debugOperationPrintSpeculationFailure
, debugInfo
);
57 #if DFG_ENABLE(JIT_BREAK_ON_SPECULATION_FAILURE)
61 #if DFG_ENABLE(SUCCESS_STATS)
62 static SamplingCounter
counter("SpeculationFailure");
63 m_jit
.emitCount(counter
);
66 // 2) Perform speculation recovery. This only comes into play when an operation
67 // starts mutating state before verifying the speculation it has already made.
70 switch (recovery
->type()) {
72 m_jit
.sub32(recovery
->src(), recovery
->dest());
75 case BooleanSpeculationCheck
:
83 // 3) Refine some value profile, if appropriate.
85 if (!!exit
.m_jsValueSource
&& !!exit
.m_valueProfile
) {
86 EncodedJSValue
* bucket
= exit
.m_valueProfile
.getSpecFailBucket(0);
88 if (exit
.m_jsValueSource
.isAddress()) {
89 // Save a register so we can use it.
90 GPRReg scratch
= GPRInfo::regT0
;
91 if (scratch
== exit
.m_jsValueSource
.base())
92 scratch
= GPRInfo::regT1
;
93 ScratchBuffer
* scratchBuffer
= m_jit
.globalData()->scratchBufferForSize(sizeof(uint32_t));
94 EncodedJSValue
* scratchDataBuffer
= static_cast<EncodedJSValue
*>(scratchBuffer
->dataBuffer());
95 m_jit
.store32(scratch
, scratchDataBuffer
);
96 m_jit
.load32(exit
.m_jsValueSource
.asAddress(OBJECT_OFFSETOF(EncodedValueDescriptor
, asBits
.tag
)), scratch
);
97 m_jit
.store32(scratch
, &bitwise_cast
<EncodedValueDescriptor
*>(bucket
)->asBits
.tag
);
98 m_jit
.load32(exit
.m_jsValueSource
.asAddress(OBJECT_OFFSETOF(EncodedValueDescriptor
, asBits
.payload
)), scratch
);
99 m_jit
.store32(scratch
, &bitwise_cast
<EncodedValueDescriptor
*>(bucket
)->asBits
.payload
);
100 m_jit
.load32(scratchDataBuffer
, scratch
);
101 } else if (exit
.m_jsValueSource
.hasKnownTag()) {
102 m_jit
.store32(AssemblyHelpers::TrustedImm32(exit
.m_jsValueSource
.tag()), &bitwise_cast
<EncodedValueDescriptor
*>(bucket
)->asBits
.tag
);
103 m_jit
.store32(exit
.m_jsValueSource
.payloadGPR(), &bitwise_cast
<EncodedValueDescriptor
*>(bucket
)->asBits
.payload
);
105 m_jit
.store32(exit
.m_jsValueSource
.tagGPR(), &bitwise_cast
<EncodedValueDescriptor
*>(bucket
)->asBits
.tag
);
106 m_jit
.store32(exit
.m_jsValueSource
.payloadGPR(), &bitwise_cast
<EncodedValueDescriptor
*>(bucket
)->asBits
.payload
);
110 // 4) Figure out how many scratch slots we'll need. We need one for every GPR/FPR
111 // whose destination is now occupied by a DFG virtual register, and we need
112 // one for every displaced virtual register if there are more than
113 // GPRInfo::numberOfRegisters of them. Also see if there are any constants,
114 // any undefined slots, any FPR slots, and any unboxed ints.
116 Vector
<bool> poisonedVirtualRegisters(exit
.m_variables
.size());
117 for (unsigned i
= 0; i
< poisonedVirtualRegisters
.size(); ++i
)
118 poisonedVirtualRegisters
[i
] = false;
120 unsigned numberOfPoisonedVirtualRegisters
= 0;
121 unsigned numberOfDisplacedVirtualRegisters
= 0;
123 // Booleans for fast checks. We expect that most OSR exits do not have to rebox
124 // Int32s, have no FPRs, and have no constants. If there are constants, we
125 // expect most of them to be jsUndefined(); if that's true then we handle that
126 // specially to minimize code size and execution time.
127 bool haveUnboxedInt32InRegisterFile
= false;
128 bool haveUnboxedCellInRegisterFile
= false;
129 bool haveUnboxedBooleanInRegisterFile
= false;
130 bool haveUInt32s
= false;
131 bool haveFPRs
= false;
132 bool haveConstants
= false;
133 bool haveUndefined
= false;
135 for (int index
= 0; index
< exit
.numberOfRecoveries(); ++index
) {
136 const ValueRecovery
& recovery
= exit
.valueRecovery(index
);
137 switch (recovery
.technique()) {
138 case DisplacedInRegisterFile
:
139 case Int32DisplacedInRegisterFile
:
140 case CellDisplacedInRegisterFile
:
141 case BooleanDisplacedInRegisterFile
:
142 numberOfDisplacedVirtualRegisters
++;
143 ASSERT((int)recovery
.virtualRegister() >= 0);
145 // See if we might like to store to this virtual register before doing
146 // virtual register shuffling. If so, we say that the virtual register
147 // is poisoned: it cannot be stored to until after displaced virtual
148 // registers are handled. We track poisoned virtual register carefully
149 // to ensure this happens efficiently. Note that we expect this case
150 // to be rare, so the handling of it is optimized for the cases in
151 // which it does not happen.
152 if (recovery
.virtualRegister() < (int)exit
.m_variables
.size()) {
153 switch (exit
.m_variables
[recovery
.virtualRegister()].technique()) {
155 case UnboxedInt32InGPR
:
156 case UnboxedBooleanInGPR
:
160 if (!poisonedVirtualRegisters
[recovery
.virtualRegister()]) {
161 poisonedVirtualRegisters
[recovery
.virtualRegister()] = true;
162 numberOfPoisonedVirtualRegisters
++;
175 case AlreadyInRegisterFileAsUnboxedInt32
:
176 haveUnboxedInt32InRegisterFile
= true;
179 case AlreadyInRegisterFileAsUnboxedCell
:
180 haveUnboxedCellInRegisterFile
= true;
183 case AlreadyInRegisterFileAsUnboxedBoolean
:
184 haveUnboxedBooleanInRegisterFile
= true;
192 haveConstants
= true;
193 if (recovery
.constant().isUndefined())
194 haveUndefined
= true;
202 unsigned scratchBufferLengthBeforeUInt32s
= numberOfPoisonedVirtualRegisters
+ ((numberOfDisplacedVirtualRegisters
* 2) <= GPRInfo::numberOfRegisters
? 0 : numberOfDisplacedVirtualRegisters
);
203 ScratchBuffer
* scratchBuffer
= m_jit
.globalData()->scratchBufferForSize(sizeof(EncodedJSValue
) * (scratchBufferLengthBeforeUInt32s
+ (haveUInt32s
? 2 : 0)));
204 EncodedJSValue
* scratchDataBuffer
= scratchBuffer
? static_cast<EncodedJSValue
*>(scratchBuffer
->dataBuffer()) : 0;
206 // From here on, the code assumes that it is profitable to maximize the distance
207 // between when something is computed and when it is stored.
209 // 5) Perform all reboxing of integers and cells, except for those in registers.
211 if (haveUnboxedInt32InRegisterFile
|| haveUnboxedCellInRegisterFile
|| haveUnboxedBooleanInRegisterFile
) {
212 for (int index
= 0; index
< exit
.numberOfRecoveries(); ++index
) {
213 const ValueRecovery
& recovery
= exit
.valueRecovery(index
);
214 switch (recovery
.technique()) {
215 case AlreadyInRegisterFileAsUnboxedInt32
:
216 m_jit
.store32(AssemblyHelpers::TrustedImm32(JSValue::Int32Tag
), AssemblyHelpers::tagFor(static_cast<VirtualRegister
>(exit
.operandForIndex(index
))));
219 case AlreadyInRegisterFileAsUnboxedCell
:
220 m_jit
.store32(AssemblyHelpers::TrustedImm32(JSValue::CellTag
), AssemblyHelpers::tagFor(static_cast<VirtualRegister
>(exit
.operandForIndex(index
))));
223 case AlreadyInRegisterFileAsUnboxedBoolean
:
224 m_jit
.store32(AssemblyHelpers::TrustedImm32(JSValue::BooleanTag
), AssemblyHelpers::tagFor(static_cast<VirtualRegister
>(exit
.operandForIndex(index
))));
233 // 6) Dump all non-poisoned GPRs. For poisoned GPRs, save them into the scratch storage.
234 // Note that GPRs do not have a fast change (like haveFPRs) because we expect that
235 // most OSR failure points will have at least one GPR that needs to be dumped.
237 initializePoisoned(exit
.m_variables
.size());
238 unsigned currentPoisonIndex
= 0;
240 for (int index
= 0; index
< exit
.numberOfRecoveries(); ++index
) {
241 const ValueRecovery
& recovery
= exit
.valueRecovery(index
);
242 int operand
= exit
.operandForIndex(index
);
243 switch (recovery
.technique()) {
245 case UnboxedInt32InGPR
:
246 case UnboxedBooleanInGPR
:
247 if (exit
.isVariable(index
) && poisonedVirtualRegisters
[exit
.variableForIndex(index
)]) {
248 m_jit
.store32(recovery
.gpr(), reinterpret_cast<char*>(scratchDataBuffer
+ currentPoisonIndex
) + OBJECT_OFFSETOF(EncodedValueDescriptor
, asBits
.payload
));
249 m_poisonScratchIndices
[exit
.variableForIndex(index
)] = currentPoisonIndex
;
250 currentPoisonIndex
++;
252 uint32_t tag
= JSValue::EmptyValueTag
;
253 if (recovery
.technique() == InGPR
)
254 tag
= JSValue::CellTag
;
255 else if (recovery
.technique() == UnboxedInt32InGPR
)
256 tag
= JSValue::Int32Tag
;
258 tag
= JSValue::BooleanTag
;
259 m_jit
.store32(AssemblyHelpers::TrustedImm32(tag
), AssemblyHelpers::tagFor((VirtualRegister
)operand
));
260 m_jit
.store32(recovery
.gpr(), AssemblyHelpers::payloadFor((VirtualRegister
)operand
));
264 if (exit
.isVariable(index
) && poisonedVirtualRegisters
[exit
.variableForIndex(index
)]) {
265 m_jit
.store32(recovery
.tagGPR(), reinterpret_cast<char*>(scratchDataBuffer
+ currentPoisonIndex
) + OBJECT_OFFSETOF(EncodedValueDescriptor
, asBits
.tag
));
266 m_jit
.store32(recovery
.payloadGPR(), reinterpret_cast<char*>(scratchDataBuffer
+ currentPoisonIndex
) + OBJECT_OFFSETOF(EncodedValueDescriptor
, asBits
.payload
));
267 m_poisonScratchIndices
[exit
.variableForIndex(index
)] = currentPoisonIndex
;
268 currentPoisonIndex
++;
270 m_jit
.store32(recovery
.tagGPR(), AssemblyHelpers::tagFor((VirtualRegister
)operand
));
271 m_jit
.store32(recovery
.payloadGPR(), AssemblyHelpers::payloadFor((VirtualRegister
)operand
));
275 EncodedJSValue
* myScratch
= scratchDataBuffer
+ scratchBufferLengthBeforeUInt32s
;
277 GPRReg addressGPR
= GPRInfo::regT0
;
278 if (addressGPR
== recovery
.gpr())
279 addressGPR
= GPRInfo::regT1
;
281 m_jit
.storePtr(addressGPR
, myScratch
);
282 m_jit
.move(AssemblyHelpers::TrustedImmPtr(myScratch
+ 1), addressGPR
);
283 m_jit
.storeDouble(FPRInfo::fpRegT0
, addressGPR
);
285 AssemblyHelpers::Jump positive
= m_jit
.branch32(AssemblyHelpers::GreaterThanOrEqual
, recovery
.gpr(), AssemblyHelpers::TrustedImm32(0));
287 m_jit
.convertInt32ToDouble(recovery
.gpr(), FPRInfo::fpRegT0
);
288 m_jit
.addDouble(AssemblyHelpers::AbsoluteAddress(&AssemblyHelpers::twoToThe32
), FPRInfo::fpRegT0
);
289 if (exit
.isVariable(index
) && poisonedVirtualRegisters
[exit
.variableForIndex(index
)]) {
290 m_jit
.move(AssemblyHelpers::TrustedImmPtr(scratchDataBuffer
+ currentPoisonIndex
), addressGPR
);
291 m_jit
.storeDouble(FPRInfo::fpRegT0
, addressGPR
);
293 m_jit
.storeDouble(FPRInfo::fpRegT0
, AssemblyHelpers::addressFor((VirtualRegister
)operand
));
295 AssemblyHelpers::Jump done
= m_jit
.jump();
297 positive
.link(&m_jit
);
299 if (exit
.isVariable(index
) && poisonedVirtualRegisters
[exit
.variableForIndex(index
)]) {
300 m_jit
.store32(recovery
.gpr(), reinterpret_cast<char*>(scratchDataBuffer
+ currentPoisonIndex
) + OBJECT_OFFSETOF(EncodedValueDescriptor
, asBits
.payload
));
301 m_jit
.store32(AssemblyHelpers::TrustedImm32(JSValue::Int32Tag
), reinterpret_cast<char*>(scratchDataBuffer
+ currentPoisonIndex
) + OBJECT_OFFSETOF(EncodedValueDescriptor
, asBits
.tag
));
303 m_jit
.store32(recovery
.gpr(), AssemblyHelpers::payloadFor((VirtualRegister
)operand
));
304 m_jit
.store32(AssemblyHelpers::TrustedImm32(JSValue::Int32Tag
), AssemblyHelpers::tagFor((VirtualRegister
)operand
));
309 m_jit
.move(AssemblyHelpers::TrustedImmPtr(myScratch
+ 1), addressGPR
);
310 m_jit
.loadDouble(addressGPR
, FPRInfo::fpRegT0
);
311 m_jit
.loadPtr(myScratch
, addressGPR
);
313 if (exit
.isVariable(index
) && poisonedVirtualRegisters
[exit
.variableForIndex(index
)]) {
314 m_poisonScratchIndices
[exit
.variableForIndex(index
)] = currentPoisonIndex
;
315 currentPoisonIndex
++;
324 // 7) Dump all doubles into the register file, or to the scratch storage if the
325 // destination virtual register is poisoned.
327 for (int index
= 0; index
< exit
.numberOfRecoveries(); ++index
) {
328 const ValueRecovery
& recovery
= exit
.valueRecovery(index
);
329 if (recovery
.technique() != InFPR
)
331 if (exit
.isVariable(index
) && poisonedVirtualRegisters
[exit
.variableForIndex(index
)]) {
332 m_jit
.storeDouble(recovery
.fpr(), scratchDataBuffer
+ currentPoisonIndex
);
333 m_poisonScratchIndices
[exit
.variableForIndex(index
)] = currentPoisonIndex
;
334 currentPoisonIndex
++;
336 m_jit
.storeDouble(recovery
.fpr(), AssemblyHelpers::addressFor((VirtualRegister
)exit
.operandForIndex(index
)));
340 // At this point all GPRs are available for scratch use.
342 ASSERT(currentPoisonIndex
== numberOfPoisonedVirtualRegisters
);
344 // 8) Reshuffle displaced virtual registers. Optimize for the case that
345 // the number of displaced virtual registers is not more than the number
346 // of available physical registers.
348 if (numberOfDisplacedVirtualRegisters
) {
349 if (numberOfDisplacedVirtualRegisters
* 2 <= GPRInfo::numberOfRegisters
) {
350 // So far this appears to be the case that triggers all the time, but
351 // that is far from guaranteed.
353 unsigned displacementIndex
= 0;
354 for (int index
= 0; index
< exit
.numberOfRecoveries(); ++index
) {
355 const ValueRecovery
& recovery
= exit
.valueRecovery(index
);
356 switch (recovery
.technique()) {
357 case DisplacedInRegisterFile
:
358 m_jit
.load32(AssemblyHelpers::payloadFor(recovery
.virtualRegister()), GPRInfo::toRegister(displacementIndex
++));
359 m_jit
.load32(AssemblyHelpers::tagFor(recovery
.virtualRegister()), GPRInfo::toRegister(displacementIndex
++));
361 case Int32DisplacedInRegisterFile
:
362 m_jit
.load32(AssemblyHelpers::payloadFor(recovery
.virtualRegister()), GPRInfo::toRegister(displacementIndex
++));
363 m_jit
.move(AssemblyHelpers::TrustedImm32(JSValue::Int32Tag
), GPRInfo::toRegister(displacementIndex
++));
365 case CellDisplacedInRegisterFile
:
366 m_jit
.load32(AssemblyHelpers::payloadFor(recovery
.virtualRegister()), GPRInfo::toRegister(displacementIndex
++));
367 m_jit
.move(AssemblyHelpers::TrustedImm32(JSValue::CellTag
), GPRInfo::toRegister(displacementIndex
++));
369 case BooleanDisplacedInRegisterFile
:
370 m_jit
.load32(AssemblyHelpers::payloadFor(recovery
.virtualRegister()), GPRInfo::toRegister(displacementIndex
++));
371 m_jit
.move(AssemblyHelpers::TrustedImm32(JSValue::BooleanTag
), GPRInfo::toRegister(displacementIndex
++));
378 displacementIndex
= 0;
379 for (int index
= 0; index
< exit
.numberOfRecoveries(); ++index
) {
380 const ValueRecovery
& recovery
= exit
.valueRecovery(index
);
381 switch (recovery
.technique()) {
382 case DisplacedInRegisterFile
:
383 case Int32DisplacedInRegisterFile
:
384 case CellDisplacedInRegisterFile
:
385 case BooleanDisplacedInRegisterFile
:
386 m_jit
.store32(GPRInfo::toRegister(displacementIndex
++), AssemblyHelpers::payloadFor((VirtualRegister
)exit
.operandForIndex(index
)));
387 m_jit
.store32(GPRInfo::toRegister(displacementIndex
++), AssemblyHelpers::tagFor((VirtualRegister
)exit
.operandForIndex(index
)));
394 // FIXME: This should use the shuffling algorithm that we use
395 // for speculative->non-speculative jumps, if we ever discover that
396 // some hot code with lots of live values that get displaced and
397 // spilled really enjoys frequently failing speculation.
399 // For now this code is engineered to be correct but probably not
400 // super. In particular, it correctly handles cases where for example
401 // the displacements are a permutation of the destination values, like
406 // It accomplishes this by simply lifting all of the virtual registers
407 // from their old (DFG JIT) locations and dropping them in a scratch
408 // location in memory, and then transferring from that scratch location
409 // to their new (old JIT) locations.
411 unsigned scratchIndex
= numberOfPoisonedVirtualRegisters
;
412 for (int index
= 0; index
< exit
.numberOfRecoveries(); ++index
) {
413 const ValueRecovery
& recovery
= exit
.valueRecovery(index
);
414 switch (recovery
.technique()) {
415 case DisplacedInRegisterFile
:
416 m_jit
.load32(AssemblyHelpers::payloadFor(recovery
.virtualRegister()), GPRInfo::regT0
);
417 m_jit
.load32(AssemblyHelpers::tagFor(recovery
.virtualRegister()), GPRInfo::regT1
);
418 m_jit
.store32(GPRInfo::regT0
, reinterpret_cast<char*>(scratchDataBuffer
+ scratchIndex
) + OBJECT_OFFSETOF(EncodedValueDescriptor
, asBits
.payload
));
419 m_jit
.store32(GPRInfo::regT1
, reinterpret_cast<char*>(scratchDataBuffer
+ scratchIndex
) + OBJECT_OFFSETOF(EncodedValueDescriptor
, asBits
.tag
));
422 case Int32DisplacedInRegisterFile
:
423 case CellDisplacedInRegisterFile
:
424 case BooleanDisplacedInRegisterFile
:
425 m_jit
.load32(AssemblyHelpers::payloadFor(recovery
.virtualRegister()), GPRInfo::regT0
);
426 m_jit
.store32(GPRInfo::regT0
, reinterpret_cast<char*>(scratchDataBuffer
+ scratchIndex
++) + OBJECT_OFFSETOF(EncodedValueDescriptor
, asBits
.payload
));
433 scratchIndex
= numberOfPoisonedVirtualRegisters
;
434 for (int index
= 0; index
< exit
.numberOfRecoveries(); ++index
) {
435 const ValueRecovery
& recovery
= exit
.valueRecovery(index
);
436 switch (recovery
.technique()) {
437 case DisplacedInRegisterFile
:
438 m_jit
.load32(reinterpret_cast<char*>(scratchDataBuffer
+ scratchIndex
) + OBJECT_OFFSETOF(EncodedValueDescriptor
, asBits
.payload
), GPRInfo::regT0
);
439 m_jit
.load32(reinterpret_cast<char*>(scratchDataBuffer
+ scratchIndex
) + OBJECT_OFFSETOF(EncodedValueDescriptor
, asBits
.tag
), GPRInfo::regT1
);
440 m_jit
.store32(GPRInfo::regT0
, AssemblyHelpers::payloadFor((VirtualRegister
)exit
.operandForIndex(index
)));
441 m_jit
.store32(GPRInfo::regT1
, AssemblyHelpers::tagFor((VirtualRegister
)exit
.operandForIndex(index
)));
444 case Int32DisplacedInRegisterFile
:
445 m_jit
.load32(reinterpret_cast<char*>(scratchDataBuffer
+ scratchIndex
++) + OBJECT_OFFSETOF(EncodedValueDescriptor
, asBits
.payload
), GPRInfo::regT0
);
446 m_jit
.store32(AssemblyHelpers::TrustedImm32(JSValue::Int32Tag
), AssemblyHelpers::tagFor((VirtualRegister
)exit
.operandForIndex(index
)));
447 m_jit
.store32(GPRInfo::regT0
, AssemblyHelpers::payloadFor((VirtualRegister
)exit
.operandForIndex(index
)));
449 case CellDisplacedInRegisterFile
:
450 m_jit
.load32(reinterpret_cast<char*>(scratchDataBuffer
+ scratchIndex
++) + OBJECT_OFFSETOF(EncodedValueDescriptor
, asBits
.payload
), GPRInfo::regT0
);
451 m_jit
.store32(AssemblyHelpers::TrustedImm32(JSValue::CellTag
), AssemblyHelpers::tagFor((VirtualRegister
)exit
.operandForIndex(index
)));
452 m_jit
.store32(GPRInfo::regT0
, AssemblyHelpers::payloadFor((VirtualRegister
)exit
.operandForIndex(index
)));
454 case BooleanDisplacedInRegisterFile
:
455 m_jit
.load32(reinterpret_cast<char*>(scratchDataBuffer
+ scratchIndex
++) + OBJECT_OFFSETOF(EncodedValueDescriptor
, asBits
.payload
), GPRInfo::regT0
);
456 m_jit
.store32(AssemblyHelpers::TrustedImm32(JSValue::BooleanTag
), AssemblyHelpers::tagFor((VirtualRegister
)exit
.operandForIndex(index
)));
457 m_jit
.store32(GPRInfo::regT0
, AssemblyHelpers::payloadFor((VirtualRegister
)exit
.operandForIndex(index
)));
464 ASSERT(scratchIndex
== numberOfPoisonedVirtualRegisters
+ numberOfDisplacedVirtualRegisters
);
468 // 9) Dump all poisoned virtual registers.
470 if (numberOfPoisonedVirtualRegisters
) {
471 for (int virtualRegister
= 0; virtualRegister
< (int)exit
.m_variables
.size(); ++virtualRegister
) {
472 if (!poisonedVirtualRegisters
[virtualRegister
])
475 const ValueRecovery
& recovery
= exit
.m_variables
[virtualRegister
];
476 switch (recovery
.technique()) {
478 case UnboxedInt32InGPR
:
479 case UnboxedBooleanInGPR
: {
480 m_jit
.load32(reinterpret_cast<char*>(scratchDataBuffer
+ poisonIndex(virtualRegister
)) + OBJECT_OFFSETOF(EncodedValueDescriptor
, asBits
.payload
), GPRInfo::regT0
);
481 m_jit
.store32(GPRInfo::regT0
, AssemblyHelpers::payloadFor((VirtualRegister
)virtualRegister
));
482 uint32_t tag
= JSValue::EmptyValueTag
;
483 if (recovery
.technique() == InGPR
)
484 tag
= JSValue::CellTag
;
485 else if (recovery
.technique() == UnboxedInt32InGPR
)
486 tag
= JSValue::Int32Tag
;
488 tag
= JSValue::BooleanTag
;
489 m_jit
.store32(AssemblyHelpers::TrustedImm32(tag
), AssemblyHelpers::tagFor((VirtualRegister
)virtualRegister
));
496 m_jit
.load32(reinterpret_cast<char*>(scratchDataBuffer
+ poisonIndex(virtualRegister
)) + OBJECT_OFFSETOF(EncodedValueDescriptor
, asBits
.payload
), GPRInfo::regT0
);
497 m_jit
.load32(reinterpret_cast<char*>(scratchDataBuffer
+ poisonIndex(virtualRegister
)) + OBJECT_OFFSETOF(EncodedValueDescriptor
, asBits
.tag
), GPRInfo::regT1
);
498 m_jit
.store32(GPRInfo::regT0
, AssemblyHelpers::payloadFor((VirtualRegister
)virtualRegister
));
499 m_jit
.store32(GPRInfo::regT1
, AssemblyHelpers::tagFor((VirtualRegister
)virtualRegister
));
508 // 10) Dump all constants. Optimize for Undefined, since that's a constant we see
513 m_jit
.move(AssemblyHelpers::TrustedImm32(jsUndefined().payload()), GPRInfo::regT0
);
514 m_jit
.move(AssemblyHelpers::TrustedImm32(jsUndefined().tag()), GPRInfo::regT1
);
517 for (int index
= 0; index
< exit
.numberOfRecoveries(); ++index
) {
518 const ValueRecovery
& recovery
= exit
.valueRecovery(index
);
519 if (recovery
.technique() != Constant
)
521 if (recovery
.constant().isUndefined()) {
522 m_jit
.store32(GPRInfo::regT0
, AssemblyHelpers::payloadFor((VirtualRegister
)exit
.operandForIndex(index
)));
523 m_jit
.store32(GPRInfo::regT1
, AssemblyHelpers::tagFor((VirtualRegister
)exit
.operandForIndex(index
)));
525 m_jit
.store32(AssemblyHelpers::TrustedImm32(recovery
.constant().payload()), AssemblyHelpers::payloadFor((VirtualRegister
)exit
.operandForIndex(index
)));
526 m_jit
.store32(AssemblyHelpers::TrustedImm32(recovery
.constant().tag()), AssemblyHelpers::tagFor((VirtualRegister
)exit
.operandForIndex(index
)));
531 // 11) Adjust the old JIT's execute counter. Since we are exiting OSR, we know
532 // that all new calls into this code will go to the new JIT, so the execute
533 // counter only affects call frames that performed OSR exit and call frames
534 // that were still executing the old JIT at the time of another call frame's
535 // OSR exit. We want to ensure that the following is true:
537 // (a) Code the performs an OSR exit gets a chance to reenter optimized
538 // code eventually, since optimized code is faster. But we don't
539 // want to do such reentery too aggressively (see (c) below).
541 // (b) If there is code on the call stack that is still running the old
542 // JIT's code and has never OSR'd, then it should get a chance to
543 // perform OSR entry despite the fact that we've exited.
545 // (c) Code the performs an OSR exit should not immediately retry OSR
546 // entry, since both forms of OSR are expensive. OSR entry is
547 // particularly expensive.
549 // (d) Frequent OSR failures, even those that do not result in the code
550 // running in a hot loop, result in recompilation getting triggered.
552 // To ensure (c), we'd like to set the execute counter to
553 // counterValueForOptimizeAfterWarmUp(). This seems like it would endanger
554 // (a) and (b), since then every OSR exit would delay the opportunity for
555 // every call frame to perform OSR entry. Essentially, if OSR exit happens
556 // frequently and the function has few loops, then the counter will never
557 // become non-negative and OSR entry will never be triggered. OSR entry
558 // will only happen if a loop gets hot in the old JIT, which does a pretty
559 // good job of ensuring (a) and (b). But that doesn't take care of (d),
560 // since each speculation failure would reset the execute counter.
561 // So we check here if the number of speculation failures is significantly
562 // larger than the number of successes (we want 90% success rate), and if
563 // there have been a large enough number of failures. If so, we set the
564 // counter to 0; otherwise we set the counter to
565 // counterValueForOptimizeAfterWarmUp().
567 handleExitCounts(exit
);
569 // 12) Load the result of the last bytecode operation into regT0.
571 if (exit
.m_lastSetOperand
!= std::numeric_limits
<int>::max()) {
572 m_jit
.load32(AssemblyHelpers::payloadFor((VirtualRegister
)exit
.m_lastSetOperand
), GPRInfo::cachedResultRegister
);
573 m_jit
.load32(AssemblyHelpers::tagFor((VirtualRegister
)exit
.m_lastSetOperand
), GPRInfo::cachedResultRegister2
);
576 // 13) Fix call frame (s).
578 ASSERT(m_jit
.baselineCodeBlock()->getJITType() == JITCode::BaselineJIT
);
579 m_jit
.storePtr(AssemblyHelpers::TrustedImmPtr(m_jit
.baselineCodeBlock()), AssemblyHelpers::addressFor((VirtualRegister
)RegisterFile::CodeBlock
));
581 for (CodeOrigin codeOrigin
= exit
.m_codeOrigin
; codeOrigin
.inlineCallFrame
; codeOrigin
= codeOrigin
.inlineCallFrame
->caller
) {
582 InlineCallFrame
* inlineCallFrame
= codeOrigin
.inlineCallFrame
;
583 CodeBlock
* baselineCodeBlock
= m_jit
.baselineCodeBlockFor(codeOrigin
);
584 CodeBlock
* baselineCodeBlockForCaller
= m_jit
.baselineCodeBlockFor(inlineCallFrame
->caller
);
585 Vector
<BytecodeAndMachineOffset
>& decodedCodeMap
= m_jit
.decodedCodeMapFor(baselineCodeBlockForCaller
);
586 unsigned returnBytecodeIndex
= inlineCallFrame
->caller
.bytecodeIndex
+ OPCODE_LENGTH(op_call
);
587 BytecodeAndMachineOffset
* mapping
= binarySearch
<BytecodeAndMachineOffset
, unsigned, BytecodeAndMachineOffset::getBytecodeIndex
>(decodedCodeMap
.begin(), decodedCodeMap
.size(), returnBytecodeIndex
);
590 ASSERT(mapping
->m_bytecodeIndex
== returnBytecodeIndex
);
592 void* jumpTarget
= baselineCodeBlockForCaller
->getJITCode().executableAddressAtOffset(mapping
->m_machineCodeOffset
);
594 GPRReg callerFrameGPR
;
595 if (inlineCallFrame
->caller
.inlineCallFrame
) {
596 m_jit
.add32(AssemblyHelpers::TrustedImm32(inlineCallFrame
->caller
.inlineCallFrame
->stackOffset
* sizeof(EncodedJSValue
)), GPRInfo::callFrameRegister
, GPRInfo::regT3
);
597 callerFrameGPR
= GPRInfo::regT3
;
599 callerFrameGPR
= GPRInfo::callFrameRegister
;
601 m_jit
.storePtr(AssemblyHelpers::TrustedImmPtr(baselineCodeBlock
), AssemblyHelpers::addressFor((VirtualRegister
)(inlineCallFrame
->stackOffset
+ RegisterFile::CodeBlock
)));
602 m_jit
.store32(AssemblyHelpers::TrustedImm32(JSValue::CellTag
), AssemblyHelpers::tagFor((VirtualRegister
)(inlineCallFrame
->stackOffset
+ RegisterFile::ScopeChain
)));
603 m_jit
.storePtr(AssemblyHelpers::TrustedImmPtr(inlineCallFrame
->callee
->scope()), AssemblyHelpers::payloadFor((VirtualRegister
)(inlineCallFrame
->stackOffset
+ RegisterFile::ScopeChain
)));
604 m_jit
.store32(AssemblyHelpers::TrustedImm32(JSValue::CellTag
), AssemblyHelpers::tagFor((VirtualRegister
)(inlineCallFrame
->stackOffset
+ RegisterFile::CallerFrame
)));
605 m_jit
.storePtr(callerFrameGPR
, AssemblyHelpers::payloadFor((VirtualRegister
)(inlineCallFrame
->stackOffset
+ RegisterFile::CallerFrame
)));
606 m_jit
.storePtr(AssemblyHelpers::TrustedImmPtr(jumpTarget
), AssemblyHelpers::payloadFor((VirtualRegister
)(inlineCallFrame
->stackOffset
+ RegisterFile::ReturnPC
)));
607 m_jit
.store32(AssemblyHelpers::TrustedImm32(inlineCallFrame
->arguments
.size()), AssemblyHelpers::payloadFor((VirtualRegister
)(inlineCallFrame
->stackOffset
+ RegisterFile::ArgumentCount
)));
608 m_jit
.store32(AssemblyHelpers::TrustedImm32(JSValue::CellTag
), AssemblyHelpers::tagFor((VirtualRegister
)(inlineCallFrame
->stackOffset
+ RegisterFile::Callee
)));
609 m_jit
.storePtr(AssemblyHelpers::TrustedImmPtr(inlineCallFrame
->callee
.get()), AssemblyHelpers::payloadFor((VirtualRegister
)(inlineCallFrame
->stackOffset
+ RegisterFile::Callee
)));
612 if (exit
.m_codeOrigin
.inlineCallFrame
)
613 m_jit
.addPtr(AssemblyHelpers::TrustedImm32(exit
.m_codeOrigin
.inlineCallFrame
->stackOffset
* sizeof(EncodedJSValue
)), GPRInfo::callFrameRegister
);
615 // 14) Jump into the corresponding baseline JIT code.
617 CodeBlock
* baselineCodeBlock
= m_jit
.baselineCodeBlockFor(exit
.m_codeOrigin
);
618 Vector
<BytecodeAndMachineOffset
>& decodedCodeMap
= m_jit
.decodedCodeMapFor(baselineCodeBlock
);
620 BytecodeAndMachineOffset
* mapping
= binarySearch
<BytecodeAndMachineOffset
, unsigned, BytecodeAndMachineOffset::getBytecodeIndex
>(decodedCodeMap
.begin(), decodedCodeMap
.size(), exit
.m_codeOrigin
.bytecodeIndex
);
623 ASSERT(mapping
->m_bytecodeIndex
== exit
.m_codeOrigin
.bytecodeIndex
);
625 void* jumpTarget
= baselineCodeBlock
->getJITCode().executableAddressAtOffset(mapping
->m_machineCodeOffset
);
627 ASSERT(GPRInfo::regT2
!= GPRInfo::cachedResultRegister
&& GPRInfo::regT2
!= GPRInfo::cachedResultRegister2
);
629 m_jit
.move(AssemblyHelpers::TrustedImmPtr(jumpTarget
), GPRInfo::regT2
);
630 m_jit
.jump(GPRInfo::regT2
);
632 #if DFG_ENABLE(DEBUG_VERBOSE)
633 dataLog(" -> %p\n", jumpTarget
);
637 } } // namespace JSC::DFG
639 #endif // ENABLE(DFG_JIT) && USE(JSVALUE32_64)