2 * Copyright (C) 2011 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include "DFGOSRExitCompiler.h"
29 #if ENABLE(DFG_JIT) && USE(JSVALUE32_64)
31 #include "DFGOperations.h"
32 #include "Operations.h"
33 #include <wtf/DataLog.h>
35 namespace JSC
{ namespace DFG
{
37 void OSRExitCompiler::compileExit(const OSRExit
& exit
, const Operands
<ValueRecovery
>& operands
, SpeculationRecovery
* recovery
)
39 // 1) Pro-forma stuff.
40 #if DFG_ENABLE(DEBUG_VERBOSE)
41 dataLogF("OSR exit (");
42 for (CodeOrigin codeOrigin
= exit
.m_codeOrigin
; ; codeOrigin
= codeOrigin
.inlineCallFrame
->caller
) {
43 dataLogF("bc#%u", codeOrigin
.bytecodeIndex
);
44 if (!codeOrigin
.inlineCallFrame
)
46 dataLogF(" -> %p ", codeOrigin
.inlineCallFrame
->executable
.get());
48 dataLogF(") at JIT offset 0x%x ", m_jit
.debugOffset());
49 dumpOperands(operands
, WTF::dataFile());
52 if (Options::printEachOSRExit()) {
53 SpeculationFailureDebugInfo
* debugInfo
= new SpeculationFailureDebugInfo
;
54 debugInfo
->codeBlock
= m_jit
.codeBlock();
56 m_jit
.debugCall(debugOperationPrintSpeculationFailure
, debugInfo
);
59 #if DFG_ENABLE(JIT_BREAK_ON_SPECULATION_FAILURE)
63 #if DFG_ENABLE(SUCCESS_STATS)
64 static SamplingCounter
counter("SpeculationFailure");
65 m_jit
.emitCount(counter
);
68 // 2) Perform speculation recovery. This only comes into play when an operation
69 // starts mutating state before verifying the speculation it has already made.
72 switch (recovery
->type()) {
74 m_jit
.sub32(recovery
->src(), recovery
->dest());
77 case BooleanSpeculationCheck
:
85 // 3) Refine some value profile, if appropriate.
87 if (!!exit
.m_jsValueSource
) {
88 if (exit
.m_kind
== BadCache
|| exit
.m_kind
== BadIndexingType
) {
89 // If the instruction that this originated from has an array profile, then
90 // refine it. If it doesn't, then do nothing. The latter could happen for
91 // hoisted checks, or checks emitted for operations that didn't have array
92 // profiling - either ops that aren't array accesses at all, or weren't
93 // known to be array acceses in the bytecode. The latter case is a FIXME
94 // while the former case is an outcome of a CheckStructure not knowing why
95 // it was emitted (could be either due to an inline cache of a property
96 // property access, or due to an array profile).
98 // Note: We are free to assume that the jsValueSource is already known to
99 // be a cell since both BadCache and BadIndexingType exits occur after
100 // the cell check would have already happened.
102 CodeOrigin codeOrigin
= exit
.m_codeOriginForExitProfile
;
103 if (ArrayProfile
* arrayProfile
= m_jit
.baselineCodeBlockFor(codeOrigin
)->getArrayProfile(codeOrigin
.bytecodeIndex
)) {
104 GPRReg usedRegister1
;
105 GPRReg usedRegister2
;
106 if (exit
.m_jsValueSource
.isAddress()) {
107 usedRegister1
= exit
.m_jsValueSource
.base();
108 usedRegister2
= InvalidGPRReg
;
110 usedRegister1
= exit
.m_jsValueSource
.payloadGPR();
111 if (exit
.m_jsValueSource
.hasKnownTag())
112 usedRegister2
= InvalidGPRReg
;
114 usedRegister2
= exit
.m_jsValueSource
.tagGPR();
119 scratch1
= AssemblyHelpers::selectScratchGPR(usedRegister1
, usedRegister2
);
120 scratch2
= AssemblyHelpers::selectScratchGPR(usedRegister1
, usedRegister2
, scratch1
);
123 m_jit
.pushToSave(scratch1
);
124 m_jit
.pushToSave(scratch2
);
126 m_jit
.push(scratch1
);
127 m_jit
.push(scratch2
);
131 if (exit
.m_jsValueSource
.isAddress()) {
133 m_jit
.loadPtr(AssemblyHelpers::Address(exit
.m_jsValueSource
.asAddress()), value
);
135 value
= exit
.m_jsValueSource
.payloadGPR();
137 m_jit
.loadPtr(AssemblyHelpers::Address(value
, JSCell::structureOffset()), scratch1
);
138 m_jit
.storePtr(scratch1
, arrayProfile
->addressOfLastSeenStructure());
139 m_jit
.load8(AssemblyHelpers::Address(scratch1
, Structure::indexingTypeOffset()), scratch1
);
140 m_jit
.move(AssemblyHelpers::TrustedImm32(1), scratch2
);
141 m_jit
.lshift32(scratch1
, scratch2
);
142 m_jit
.or32(scratch2
, AssemblyHelpers::AbsoluteAddress(arrayProfile
->addressOfArrayModes()));
145 m_jit
.popToRestore(scratch2
);
146 m_jit
.popToRestore(scratch1
);
154 if (!!exit
.m_valueProfile
) {
155 EncodedJSValue
* bucket
= exit
.m_valueProfile
.getSpecFailBucket(0);
157 if (exit
.m_jsValueSource
.isAddress()) {
158 // Save a register so we can use it.
159 GPRReg scratch
= AssemblyHelpers::selectScratchGPR(exit
.m_jsValueSource
.base());
162 m_jit
.pushToSave(scratch
);
167 m_jit
.load32(exit
.m_jsValueSource
.asAddress(OBJECT_OFFSETOF(EncodedValueDescriptor
, asBits
.tag
)), scratch
);
168 m_jit
.store32(scratch
, &bitwise_cast
<EncodedValueDescriptor
*>(bucket
)->asBits
.tag
);
169 m_jit
.load32(exit
.m_jsValueSource
.asAddress(OBJECT_OFFSETOF(EncodedValueDescriptor
, asBits
.payload
)), scratch
);
170 m_jit
.store32(scratch
, &bitwise_cast
<EncodedValueDescriptor
*>(bucket
)->asBits
.payload
);
173 m_jit
.popToRestore(scratch
);
177 } else if (exit
.m_jsValueSource
.hasKnownTag()) {
178 m_jit
.store32(AssemblyHelpers::TrustedImm32(exit
.m_jsValueSource
.tag()), &bitwise_cast
<EncodedValueDescriptor
*>(bucket
)->asBits
.tag
);
179 m_jit
.store32(exit
.m_jsValueSource
.payloadGPR(), &bitwise_cast
<EncodedValueDescriptor
*>(bucket
)->asBits
.payload
);
181 m_jit
.store32(exit
.m_jsValueSource
.tagGPR(), &bitwise_cast
<EncodedValueDescriptor
*>(bucket
)->asBits
.tag
);
182 m_jit
.store32(exit
.m_jsValueSource
.payloadGPR(), &bitwise_cast
<EncodedValueDescriptor
*>(bucket
)->asBits
.payload
);
187 // 4) Figure out how many scratch slots we'll need. We need one for every GPR/FPR
188 // whose destination is now occupied by a DFG virtual register, and we need
189 // one for every displaced virtual register if there are more than
190 // GPRInfo::numberOfRegisters of them. Also see if there are any constants,
191 // any undefined slots, any FPR slots, and any unboxed ints.
193 Vector
<bool> poisonedVirtualRegisters(operands
.numberOfLocals());
194 for (unsigned i
= 0; i
< poisonedVirtualRegisters
.size(); ++i
)
195 poisonedVirtualRegisters
[i
] = false;
197 unsigned numberOfPoisonedVirtualRegisters
= 0;
198 unsigned numberOfDisplacedVirtualRegisters
= 0;
200 // Booleans for fast checks. We expect that most OSR exits do not have to rebox
201 // Int32s, have no FPRs, and have no constants. If there are constants, we
202 // expect most of them to be jsUndefined(); if that's true then we handle that
203 // specially to minimize code size and execution time.
204 bool haveUnboxedInt32InJSStack
= false;
205 bool haveUnboxedCellInJSStack
= false;
206 bool haveUnboxedBooleanInJSStack
= false;
207 bool haveUInt32s
= false;
208 bool haveFPRs
= false;
209 bool haveConstants
= false;
210 bool haveUndefined
= false;
211 bool haveArguments
= false;
213 for (size_t index
= 0; index
< operands
.size(); ++index
) {
214 const ValueRecovery
& recovery
= operands
[index
];
215 switch (recovery
.technique()) {
216 case DisplacedInJSStack
:
217 case Int32DisplacedInJSStack
:
218 case CellDisplacedInJSStack
:
219 case BooleanDisplacedInJSStack
:
220 numberOfDisplacedVirtualRegisters
++;
221 ASSERT((int)recovery
.virtualRegister() >= 0);
223 // See if we might like to store to this virtual register before doing
224 // virtual register shuffling. If so, we say that the virtual register
225 // is poisoned: it cannot be stored to until after displaced virtual
226 // registers are handled. We track poisoned virtual register carefully
227 // to ensure this happens efficiently. Note that we expect this case
228 // to be rare, so the handling of it is optimized for the cases in
229 // which it does not happen.
230 if (recovery
.virtualRegister() < (int)operands
.numberOfLocals()) {
231 switch (operands
.local(recovery
.virtualRegister()).technique()) {
233 case UnboxedInt32InGPR
:
234 case UnboxedBooleanInGPR
:
238 if (!poisonedVirtualRegisters
[recovery
.virtualRegister()]) {
239 poisonedVirtualRegisters
[recovery
.virtualRegister()] = true;
240 numberOfPoisonedVirtualRegisters
++;
253 case AlreadyInJSStackAsUnboxedInt32
:
254 haveUnboxedInt32InJSStack
= true;
257 case AlreadyInJSStackAsUnboxedCell
:
258 haveUnboxedCellInJSStack
= true;
261 case AlreadyInJSStackAsUnboxedBoolean
:
262 haveUnboxedBooleanInJSStack
= true;
270 haveConstants
= true;
271 if (recovery
.constant().isUndefined())
272 haveUndefined
= true;
275 case ArgumentsThatWereNotCreated
:
276 haveArguments
= true;
284 unsigned scratchBufferLengthBeforeUInt32s
= numberOfPoisonedVirtualRegisters
+ ((numberOfDisplacedVirtualRegisters
* 2) <= GPRInfo::numberOfRegisters
? 0 : numberOfDisplacedVirtualRegisters
);
285 ScratchBuffer
* scratchBuffer
= m_jit
.vm()->scratchBufferForSize(sizeof(EncodedJSValue
) * (scratchBufferLengthBeforeUInt32s
+ (haveUInt32s
? 2 : 0)));
286 EncodedJSValue
* scratchDataBuffer
= scratchBuffer
? static_cast<EncodedJSValue
*>(scratchBuffer
->dataBuffer()) : 0;
288 // From here on, the code assumes that it is profitable to maximize the distance
289 // between when something is computed and when it is stored.
291 // 5) Perform all reboxing of integers and cells, except for those in registers.
293 if (haveUnboxedInt32InJSStack
|| haveUnboxedCellInJSStack
|| haveUnboxedBooleanInJSStack
) {
294 for (size_t index
= 0; index
< operands
.size(); ++index
) {
295 const ValueRecovery
& recovery
= operands
[index
];
296 switch (recovery
.technique()) {
297 case AlreadyInJSStackAsUnboxedInt32
:
298 m_jit
.store32(AssemblyHelpers::TrustedImm32(JSValue::Int32Tag
), AssemblyHelpers::tagFor(static_cast<VirtualRegister
>(operands
.operandForIndex(index
))));
301 case AlreadyInJSStackAsUnboxedCell
:
302 m_jit
.store32(AssemblyHelpers::TrustedImm32(JSValue::CellTag
), AssemblyHelpers::tagFor(static_cast<VirtualRegister
>(operands
.operandForIndex(index
))));
305 case AlreadyInJSStackAsUnboxedBoolean
:
306 m_jit
.store32(AssemblyHelpers::TrustedImm32(JSValue::BooleanTag
), AssemblyHelpers::tagFor(static_cast<VirtualRegister
>(operands
.operandForIndex(index
))));
315 // 6) Dump all non-poisoned GPRs. For poisoned GPRs, save them into the scratch storage.
316 // Note that GPRs do not have a fast change (like haveFPRs) because we expect that
317 // most OSR failure points will have at least one GPR that needs to be dumped.
319 initializePoisoned(operands
.numberOfLocals());
320 unsigned currentPoisonIndex
= 0;
322 for (size_t index
= 0; index
< operands
.size(); ++index
) {
323 const ValueRecovery
& recovery
= operands
[index
];
324 int operand
= operands
.operandForIndex(index
);
325 switch (recovery
.technique()) {
327 case UnboxedInt32InGPR
:
328 case UnboxedBooleanInGPR
:
329 if (operands
.isVariable(index
) && poisonedVirtualRegisters
[operands
.variableForIndex(index
)]) {
330 m_jit
.store32(recovery
.gpr(), reinterpret_cast<char*>(scratchDataBuffer
+ currentPoisonIndex
) + OBJECT_OFFSETOF(EncodedValueDescriptor
, asBits
.payload
));
331 m_poisonScratchIndices
[operands
.variableForIndex(index
)] = currentPoisonIndex
;
332 currentPoisonIndex
++;
334 uint32_t tag
= JSValue::EmptyValueTag
;
335 if (recovery
.technique() == InGPR
)
336 tag
= JSValue::CellTag
;
337 else if (recovery
.technique() == UnboxedInt32InGPR
)
338 tag
= JSValue::Int32Tag
;
340 tag
= JSValue::BooleanTag
;
341 m_jit
.store32(AssemblyHelpers::TrustedImm32(tag
), AssemblyHelpers::tagFor((VirtualRegister
)operand
));
342 m_jit
.store32(recovery
.gpr(), AssemblyHelpers::payloadFor((VirtualRegister
)operand
));
346 if (operands
.isVariable(index
) && poisonedVirtualRegisters
[operands
.variableForIndex(index
)]) {
347 m_jit
.store32(recovery
.tagGPR(), reinterpret_cast<char*>(scratchDataBuffer
+ currentPoisonIndex
) + OBJECT_OFFSETOF(EncodedValueDescriptor
, asBits
.tag
));
348 m_jit
.store32(recovery
.payloadGPR(), reinterpret_cast<char*>(scratchDataBuffer
+ currentPoisonIndex
) + OBJECT_OFFSETOF(EncodedValueDescriptor
, asBits
.payload
));
349 m_poisonScratchIndices
[operands
.variableForIndex(index
)] = currentPoisonIndex
;
350 currentPoisonIndex
++;
352 m_jit
.store32(recovery
.tagGPR(), AssemblyHelpers::tagFor((VirtualRegister
)operand
));
353 m_jit
.store32(recovery
.payloadGPR(), AssemblyHelpers::payloadFor((VirtualRegister
)operand
));
357 EncodedJSValue
* myScratch
= scratchDataBuffer
+ scratchBufferLengthBeforeUInt32s
;
359 GPRReg addressGPR
= GPRInfo::regT0
;
360 if (addressGPR
== recovery
.gpr())
361 addressGPR
= GPRInfo::regT1
;
363 m_jit
.storePtr(addressGPR
, myScratch
);
364 m_jit
.move(AssemblyHelpers::TrustedImmPtr(myScratch
+ 1), addressGPR
);
365 m_jit
.storeDouble(FPRInfo::fpRegT0
, addressGPR
);
367 AssemblyHelpers::Jump positive
= m_jit
.branch32(AssemblyHelpers::GreaterThanOrEqual
, recovery
.gpr(), AssemblyHelpers::TrustedImm32(0));
369 m_jit
.convertInt32ToDouble(recovery
.gpr(), FPRInfo::fpRegT0
);
370 m_jit
.addDouble(AssemblyHelpers::AbsoluteAddress(&AssemblyHelpers::twoToThe32
), FPRInfo::fpRegT0
);
371 if (operands
.isVariable(index
) && poisonedVirtualRegisters
[operands
.variableForIndex(index
)]) {
372 m_jit
.move(AssemblyHelpers::TrustedImmPtr(scratchDataBuffer
+ currentPoisonIndex
), addressGPR
);
373 m_jit
.storeDouble(FPRInfo::fpRegT0
, addressGPR
);
375 m_jit
.storeDouble(FPRInfo::fpRegT0
, AssemblyHelpers::addressFor((VirtualRegister
)operand
));
377 AssemblyHelpers::Jump done
= m_jit
.jump();
379 positive
.link(&m_jit
);
381 if (operands
.isVariable(index
) && poisonedVirtualRegisters
[operands
.variableForIndex(index
)]) {
382 m_jit
.store32(recovery
.gpr(), reinterpret_cast<char*>(scratchDataBuffer
+ currentPoisonIndex
) + OBJECT_OFFSETOF(EncodedValueDescriptor
, asBits
.payload
));
383 m_jit
.store32(AssemblyHelpers::TrustedImm32(JSValue::Int32Tag
), reinterpret_cast<char*>(scratchDataBuffer
+ currentPoisonIndex
) + OBJECT_OFFSETOF(EncodedValueDescriptor
, asBits
.tag
));
385 m_jit
.store32(recovery
.gpr(), AssemblyHelpers::payloadFor((VirtualRegister
)operand
));
386 m_jit
.store32(AssemblyHelpers::TrustedImm32(JSValue::Int32Tag
), AssemblyHelpers::tagFor((VirtualRegister
)operand
));
391 m_jit
.move(AssemblyHelpers::TrustedImmPtr(myScratch
+ 1), addressGPR
);
392 m_jit
.loadDouble(addressGPR
, FPRInfo::fpRegT0
);
393 m_jit
.loadPtr(myScratch
, addressGPR
);
395 if (operands
.isVariable(index
) && poisonedVirtualRegisters
[operands
.variableForIndex(index
)]) {
396 m_poisonScratchIndices
[operands
.variableForIndex(index
)] = currentPoisonIndex
;
397 currentPoisonIndex
++;
406 // 7) Dump all doubles into the stack, or to the scratch storage if the
407 // destination virtual register is poisoned.
409 for (size_t index
= 0; index
< operands
.size(); ++index
) {
410 const ValueRecovery
& recovery
= operands
[index
];
411 if (recovery
.technique() != InFPR
)
413 if (operands
.isVariable(index
) && poisonedVirtualRegisters
[operands
.variableForIndex(index
)]) {
414 m_jit
.storeDouble(recovery
.fpr(), scratchDataBuffer
+ currentPoisonIndex
);
415 m_poisonScratchIndices
[operands
.variableForIndex(index
)] = currentPoisonIndex
;
416 currentPoisonIndex
++;
418 m_jit
.storeDouble(recovery
.fpr(), AssemblyHelpers::addressFor((VirtualRegister
)operands
.operandForIndex(index
)));
422 // At this point all GPRs are available for scratch use.
424 ASSERT(currentPoisonIndex
== numberOfPoisonedVirtualRegisters
);
426 // 8) Reshuffle displaced virtual registers. Optimize for the case that
427 // the number of displaced virtual registers is not more than the number
428 // of available physical registers.
430 if (numberOfDisplacedVirtualRegisters
) {
431 if (numberOfDisplacedVirtualRegisters
* 2 <= GPRInfo::numberOfRegisters
) {
432 // So far this appears to be the case that triggers all the time, but
433 // that is far from guaranteed.
435 unsigned displacementIndex
= 0;
436 for (size_t index
= 0; index
< operands
.size(); ++index
) {
437 const ValueRecovery
& recovery
= operands
[index
];
438 switch (recovery
.technique()) {
439 case DisplacedInJSStack
:
440 m_jit
.load32(AssemblyHelpers::payloadFor(recovery
.virtualRegister()), GPRInfo::toRegister(displacementIndex
++));
441 m_jit
.load32(AssemblyHelpers::tagFor(recovery
.virtualRegister()), GPRInfo::toRegister(displacementIndex
++));
443 case Int32DisplacedInJSStack
:
444 m_jit
.load32(AssemblyHelpers::payloadFor(recovery
.virtualRegister()), GPRInfo::toRegister(displacementIndex
++));
445 m_jit
.move(AssemblyHelpers::TrustedImm32(JSValue::Int32Tag
), GPRInfo::toRegister(displacementIndex
++));
447 case CellDisplacedInJSStack
:
448 m_jit
.load32(AssemblyHelpers::payloadFor(recovery
.virtualRegister()), GPRInfo::toRegister(displacementIndex
++));
449 m_jit
.move(AssemblyHelpers::TrustedImm32(JSValue::CellTag
), GPRInfo::toRegister(displacementIndex
++));
451 case BooleanDisplacedInJSStack
:
452 m_jit
.load32(AssemblyHelpers::payloadFor(recovery
.virtualRegister()), GPRInfo::toRegister(displacementIndex
++));
453 m_jit
.move(AssemblyHelpers::TrustedImm32(JSValue::BooleanTag
), GPRInfo::toRegister(displacementIndex
++));
460 displacementIndex
= 0;
461 for (size_t index
= 0; index
< operands
.size(); ++index
) {
462 const ValueRecovery
& recovery
= operands
[index
];
463 switch (recovery
.technique()) {
464 case DisplacedInJSStack
:
465 case Int32DisplacedInJSStack
:
466 case CellDisplacedInJSStack
:
467 case BooleanDisplacedInJSStack
:
468 m_jit
.store32(GPRInfo::toRegister(displacementIndex
++), AssemblyHelpers::payloadFor((VirtualRegister
)operands
.operandForIndex(index
)));
469 m_jit
.store32(GPRInfo::toRegister(displacementIndex
++), AssemblyHelpers::tagFor((VirtualRegister
)operands
.operandForIndex(index
)));
476 // FIXME: This should use the shuffling algorithm that we use
477 // for speculative->non-speculative jumps, if we ever discover that
478 // some hot code with lots of live values that get displaced and
479 // spilled really enjoys frequently failing speculation.
481 // For now this code is engineered to be correct but probably not
482 // super. In particular, it correctly handles cases where for example
483 // the displacements are a permutation of the destination values, like
488 // It accomplishes this by simply lifting all of the virtual registers
489 // from their old (DFG JIT) locations and dropping them in a scratch
490 // location in memory, and then transferring from that scratch location
491 // to their new (old JIT) locations.
493 unsigned scratchIndex
= numberOfPoisonedVirtualRegisters
;
494 for (size_t index
= 0; index
< operands
.size(); ++index
) {
495 const ValueRecovery
& recovery
= operands
[index
];
496 switch (recovery
.technique()) {
497 case DisplacedInJSStack
:
498 m_jit
.load32(AssemblyHelpers::payloadFor(recovery
.virtualRegister()), GPRInfo::regT0
);
499 m_jit
.load32(AssemblyHelpers::tagFor(recovery
.virtualRegister()), GPRInfo::regT1
);
500 m_jit
.store32(GPRInfo::regT0
, reinterpret_cast<char*>(scratchDataBuffer
+ scratchIndex
) + OBJECT_OFFSETOF(EncodedValueDescriptor
, asBits
.payload
));
501 m_jit
.store32(GPRInfo::regT1
, reinterpret_cast<char*>(scratchDataBuffer
+ scratchIndex
) + OBJECT_OFFSETOF(EncodedValueDescriptor
, asBits
.tag
));
504 case Int32DisplacedInJSStack
:
505 case CellDisplacedInJSStack
:
506 case BooleanDisplacedInJSStack
:
507 m_jit
.load32(AssemblyHelpers::payloadFor(recovery
.virtualRegister()), GPRInfo::regT0
);
508 m_jit
.store32(GPRInfo::regT0
, reinterpret_cast<char*>(scratchDataBuffer
+ scratchIndex
++) + OBJECT_OFFSETOF(EncodedValueDescriptor
, asBits
.payload
));
515 scratchIndex
= numberOfPoisonedVirtualRegisters
;
516 for (size_t index
= 0; index
< operands
.size(); ++index
) {
517 const ValueRecovery
& recovery
= operands
[index
];
518 switch (recovery
.technique()) {
519 case DisplacedInJSStack
:
520 m_jit
.load32(reinterpret_cast<char*>(scratchDataBuffer
+ scratchIndex
) + OBJECT_OFFSETOF(EncodedValueDescriptor
, asBits
.payload
), GPRInfo::regT0
);
521 m_jit
.load32(reinterpret_cast<char*>(scratchDataBuffer
+ scratchIndex
) + OBJECT_OFFSETOF(EncodedValueDescriptor
, asBits
.tag
), GPRInfo::regT1
);
522 m_jit
.store32(GPRInfo::regT0
, AssemblyHelpers::payloadFor((VirtualRegister
)operands
.operandForIndex(index
)));
523 m_jit
.store32(GPRInfo::regT1
, AssemblyHelpers::tagFor((VirtualRegister
)operands
.operandForIndex(index
)));
526 case Int32DisplacedInJSStack
:
527 m_jit
.load32(reinterpret_cast<char*>(scratchDataBuffer
+ scratchIndex
++) + OBJECT_OFFSETOF(EncodedValueDescriptor
, asBits
.payload
), GPRInfo::regT0
);
528 m_jit
.store32(AssemblyHelpers::TrustedImm32(JSValue::Int32Tag
), AssemblyHelpers::tagFor((VirtualRegister
)operands
.operandForIndex(index
)));
529 m_jit
.store32(GPRInfo::regT0
, AssemblyHelpers::payloadFor((VirtualRegister
)operands
.operandForIndex(index
)));
531 case CellDisplacedInJSStack
:
532 m_jit
.load32(reinterpret_cast<char*>(scratchDataBuffer
+ scratchIndex
++) + OBJECT_OFFSETOF(EncodedValueDescriptor
, asBits
.payload
), GPRInfo::regT0
);
533 m_jit
.store32(AssemblyHelpers::TrustedImm32(JSValue::CellTag
), AssemblyHelpers::tagFor((VirtualRegister
)operands
.operandForIndex(index
)));
534 m_jit
.store32(GPRInfo::regT0
, AssemblyHelpers::payloadFor((VirtualRegister
)operands
.operandForIndex(index
)));
536 case BooleanDisplacedInJSStack
:
537 m_jit
.load32(reinterpret_cast<char*>(scratchDataBuffer
+ scratchIndex
++) + OBJECT_OFFSETOF(EncodedValueDescriptor
, asBits
.payload
), GPRInfo::regT0
);
538 m_jit
.store32(AssemblyHelpers::TrustedImm32(JSValue::BooleanTag
), AssemblyHelpers::tagFor((VirtualRegister
)operands
.operandForIndex(index
)));
539 m_jit
.store32(GPRInfo::regT0
, AssemblyHelpers::payloadFor((VirtualRegister
)operands
.operandForIndex(index
)));
546 ASSERT(scratchIndex
== numberOfPoisonedVirtualRegisters
+ numberOfDisplacedVirtualRegisters
);
550 // 9) Dump all poisoned virtual registers.
552 if (numberOfPoisonedVirtualRegisters
) {
553 for (int virtualRegister
= 0; virtualRegister
< (int)operands
.numberOfLocals(); ++virtualRegister
) {
554 if (!poisonedVirtualRegisters
[virtualRegister
])
557 const ValueRecovery
& recovery
= operands
.local(virtualRegister
);
558 switch (recovery
.technique()) {
560 case UnboxedInt32InGPR
:
561 case UnboxedBooleanInGPR
: {
562 m_jit
.load32(reinterpret_cast<char*>(scratchDataBuffer
+ poisonIndex(virtualRegister
)) + OBJECT_OFFSETOF(EncodedValueDescriptor
, asBits
.payload
), GPRInfo::regT0
);
563 m_jit
.store32(GPRInfo::regT0
, AssemblyHelpers::payloadFor((VirtualRegister
)virtualRegister
));
564 uint32_t tag
= JSValue::EmptyValueTag
;
565 if (recovery
.technique() == InGPR
)
566 tag
= JSValue::CellTag
;
567 else if (recovery
.technique() == UnboxedInt32InGPR
)
568 tag
= JSValue::Int32Tag
;
570 tag
= JSValue::BooleanTag
;
571 m_jit
.store32(AssemblyHelpers::TrustedImm32(tag
), AssemblyHelpers::tagFor((VirtualRegister
)virtualRegister
));
578 m_jit
.load32(reinterpret_cast<char*>(scratchDataBuffer
+ poisonIndex(virtualRegister
)) + OBJECT_OFFSETOF(EncodedValueDescriptor
, asBits
.payload
), GPRInfo::regT0
);
579 m_jit
.load32(reinterpret_cast<char*>(scratchDataBuffer
+ poisonIndex(virtualRegister
)) + OBJECT_OFFSETOF(EncodedValueDescriptor
, asBits
.tag
), GPRInfo::regT1
);
580 m_jit
.store32(GPRInfo::regT0
, AssemblyHelpers::payloadFor((VirtualRegister
)virtualRegister
));
581 m_jit
.store32(GPRInfo::regT1
, AssemblyHelpers::tagFor((VirtualRegister
)virtualRegister
));
590 // 10) Dump all constants. Optimize for Undefined, since that's a constant we see
595 m_jit
.move(AssemblyHelpers::TrustedImm32(jsUndefined().payload()), GPRInfo::regT0
);
596 m_jit
.move(AssemblyHelpers::TrustedImm32(jsUndefined().tag()), GPRInfo::regT1
);
599 for (size_t index
= 0; index
< operands
.size(); ++index
) {
600 const ValueRecovery
& recovery
= operands
[index
];
601 if (recovery
.technique() != Constant
)
603 if (recovery
.constant().isUndefined()) {
604 m_jit
.store32(GPRInfo::regT0
, AssemblyHelpers::payloadFor((VirtualRegister
)operands
.operandForIndex(index
)));
605 m_jit
.store32(GPRInfo::regT1
, AssemblyHelpers::tagFor((VirtualRegister
)operands
.operandForIndex(index
)));
607 m_jit
.store32(AssemblyHelpers::TrustedImm32(recovery
.constant().payload()), AssemblyHelpers::payloadFor((VirtualRegister
)operands
.operandForIndex(index
)));
608 m_jit
.store32(AssemblyHelpers::TrustedImm32(recovery
.constant().tag()), AssemblyHelpers::tagFor((VirtualRegister
)operands
.operandForIndex(index
)));
613 // 12) Adjust the old JIT's execute counter. Since we are exiting OSR, we know
614 // that all new calls into this code will go to the new JIT, so the execute
615 // counter only affects call frames that performed OSR exit and call frames
616 // that were still executing the old JIT at the time of another call frame's
617 // OSR exit. We want to ensure that the following is true:
619 // (a) Code the performs an OSR exit gets a chance to reenter optimized
620 // code eventually, since optimized code is faster. But we don't
621 // want to do such reentery too aggressively (see (c) below).
623 // (b) If there is code on the call stack that is still running the old
624 // JIT's code and has never OSR'd, then it should get a chance to
625 // perform OSR entry despite the fact that we've exited.
627 // (c) Code the performs an OSR exit should not immediately retry OSR
628 // entry, since both forms of OSR are expensive. OSR entry is
629 // particularly expensive.
631 // (d) Frequent OSR failures, even those that do not result in the code
632 // running in a hot loop, result in recompilation getting triggered.
634 // To ensure (c), we'd like to set the execute counter to
635 // counterValueForOptimizeAfterWarmUp(). This seems like it would endanger
636 // (a) and (b), since then every OSR exit would delay the opportunity for
637 // every call frame to perform OSR entry. Essentially, if OSR exit happens
638 // frequently and the function has few loops, then the counter will never
639 // become non-negative and OSR entry will never be triggered. OSR entry
640 // will only happen if a loop gets hot in the old JIT, which does a pretty
641 // good job of ensuring (a) and (b). But that doesn't take care of (d),
642 // since each speculation failure would reset the execute counter.
643 // So we check here if the number of speculation failures is significantly
644 // larger than the number of successes (we want 90% success rate), and if
645 // there have been a large enough number of failures. If so, we set the
646 // counter to 0; otherwise we set the counter to
647 // counterValueForOptimizeAfterWarmUp().
649 handleExitCounts(exit
);
651 // 13) Reify inlined call frames.
653 ASSERT(m_jit
.baselineCodeBlock()->getJITType() == JITCode::BaselineJIT
);
654 m_jit
.storePtr(AssemblyHelpers::TrustedImmPtr(m_jit
.baselineCodeBlock()), AssemblyHelpers::addressFor((VirtualRegister
)JSStack::CodeBlock
));
656 for (CodeOrigin codeOrigin
= exit
.m_codeOrigin
; codeOrigin
.inlineCallFrame
; codeOrigin
= codeOrigin
.inlineCallFrame
->caller
) {
657 InlineCallFrame
* inlineCallFrame
= codeOrigin
.inlineCallFrame
;
658 CodeBlock
* baselineCodeBlock
= m_jit
.baselineCodeBlockFor(codeOrigin
);
659 CodeBlock
* baselineCodeBlockForCaller
= m_jit
.baselineCodeBlockFor(inlineCallFrame
->caller
);
660 Vector
<BytecodeAndMachineOffset
>& decodedCodeMap
= m_jit
.decodedCodeMapFor(baselineCodeBlockForCaller
);
661 unsigned returnBytecodeIndex
= inlineCallFrame
->caller
.bytecodeIndex
+ OPCODE_LENGTH(op_call
);
662 BytecodeAndMachineOffset
* mapping
= binarySearch
<BytecodeAndMachineOffset
, unsigned>(decodedCodeMap
, decodedCodeMap
.size(), returnBytecodeIndex
, BytecodeAndMachineOffset::getBytecodeIndex
);
665 ASSERT(mapping
->m_bytecodeIndex
== returnBytecodeIndex
);
667 void* jumpTarget
= baselineCodeBlockForCaller
->getJITCode().executableAddressAtOffset(mapping
->m_machineCodeOffset
);
669 GPRReg callerFrameGPR
;
670 if (inlineCallFrame
->caller
.inlineCallFrame
) {
671 m_jit
.add32(AssemblyHelpers::TrustedImm32(inlineCallFrame
->caller
.inlineCallFrame
->stackOffset
* sizeof(EncodedJSValue
)), GPRInfo::callFrameRegister
, GPRInfo::regT3
);
672 callerFrameGPR
= GPRInfo::regT3
;
674 callerFrameGPR
= GPRInfo::callFrameRegister
;
676 m_jit
.storePtr(AssemblyHelpers::TrustedImmPtr(baselineCodeBlock
), AssemblyHelpers::addressFor((VirtualRegister
)(inlineCallFrame
->stackOffset
+ JSStack::CodeBlock
)));
677 m_jit
.store32(AssemblyHelpers::TrustedImm32(JSValue::CellTag
), AssemblyHelpers::tagFor((VirtualRegister
)(inlineCallFrame
->stackOffset
+ JSStack::ScopeChain
)));
678 if (!inlineCallFrame
->isClosureCall())
679 m_jit
.storePtr(AssemblyHelpers::TrustedImmPtr(inlineCallFrame
->callee
->scope()), AssemblyHelpers::payloadFor((VirtualRegister
)(inlineCallFrame
->stackOffset
+ JSStack::ScopeChain
)));
680 m_jit
.store32(AssemblyHelpers::TrustedImm32(JSValue::CellTag
), AssemblyHelpers::tagFor((VirtualRegister
)(inlineCallFrame
->stackOffset
+ JSStack::CallerFrame
)));
681 m_jit
.storePtr(callerFrameGPR
, AssemblyHelpers::payloadFor((VirtualRegister
)(inlineCallFrame
->stackOffset
+ JSStack::CallerFrame
)));
682 m_jit
.storePtr(AssemblyHelpers::TrustedImmPtr(jumpTarget
), AssemblyHelpers::payloadFor((VirtualRegister
)(inlineCallFrame
->stackOffset
+ JSStack::ReturnPC
)));
683 m_jit
.store32(AssemblyHelpers::TrustedImm32(inlineCallFrame
->arguments
.size()), AssemblyHelpers::payloadFor((VirtualRegister
)(inlineCallFrame
->stackOffset
+ JSStack::ArgumentCount
)));
684 m_jit
.store32(AssemblyHelpers::TrustedImm32(JSValue::CellTag
), AssemblyHelpers::tagFor((VirtualRegister
)(inlineCallFrame
->stackOffset
+ JSStack::Callee
)));
685 if (!inlineCallFrame
->isClosureCall())
686 m_jit
.storePtr(AssemblyHelpers::TrustedImmPtr(inlineCallFrame
->callee
.get()), AssemblyHelpers::payloadFor((VirtualRegister
)(inlineCallFrame
->stackOffset
+ JSStack::Callee
)));
689 // 14) Create arguments if necessary and place them into the appropriate aliased
693 HashSet
<InlineCallFrame
*, DefaultHash
<InlineCallFrame
*>::Hash
,
694 NullableHashTraits
<InlineCallFrame
*> > didCreateArgumentsObject
;
696 for (size_t index
= 0; index
< operands
.size(); ++index
) {
697 const ValueRecovery
& recovery
= operands
[index
];
698 if (recovery
.technique() != ArgumentsThatWereNotCreated
)
700 int operand
= operands
.operandForIndex(index
);
701 // Find the right inline call frame.
702 InlineCallFrame
* inlineCallFrame
= 0;
703 for (InlineCallFrame
* current
= exit
.m_codeOrigin
.inlineCallFrame
;
705 current
= current
->caller
.inlineCallFrame
) {
706 if (current
->stackOffset
<= operand
) {
707 inlineCallFrame
= current
;
712 if (!m_jit
.baselineCodeBlockFor(inlineCallFrame
)->usesArguments())
714 int argumentsRegister
= m_jit
.argumentsRegisterFor(inlineCallFrame
);
715 if (didCreateArgumentsObject
.add(inlineCallFrame
).isNewEntry
) {
716 // We know this call frame optimized out an arguments object that
717 // the baseline JIT would have created. Do that creation now.
718 if (inlineCallFrame
) {
719 m_jit
.setupArgumentsWithExecState(
720 AssemblyHelpers::TrustedImmPtr(inlineCallFrame
));
722 AssemblyHelpers::TrustedImmPtr(
723 bitwise_cast
<void*>(operationCreateInlinedArguments
)),
724 GPRInfo::nonArgGPR0
);
726 m_jit
.setupArgumentsExecState();
728 AssemblyHelpers::TrustedImmPtr(
729 bitwise_cast
<void*>(operationCreateArguments
)),
730 GPRInfo::nonArgGPR0
);
732 m_jit
.call(GPRInfo::nonArgGPR0
);
734 AssemblyHelpers::TrustedImm32(JSValue::CellTag
),
735 AssemblyHelpers::tagFor(argumentsRegister
));
737 GPRInfo::returnValueGPR
,
738 AssemblyHelpers::payloadFor(argumentsRegister
));
740 AssemblyHelpers::TrustedImm32(JSValue::CellTag
),
741 AssemblyHelpers::tagFor(unmodifiedArgumentsRegister(argumentsRegister
)));
743 GPRInfo::returnValueGPR
,
744 AssemblyHelpers::payloadFor(unmodifiedArgumentsRegister(argumentsRegister
)));
745 m_jit
.move(GPRInfo::returnValueGPR
, GPRInfo::regT0
); // no-op move on almost all platforms.
748 m_jit
.load32(AssemblyHelpers::payloadFor(argumentsRegister
), GPRInfo::regT0
);
750 AssemblyHelpers::TrustedImm32(JSValue::CellTag
),
751 AssemblyHelpers::tagFor(operand
));
752 m_jit
.store32(GPRInfo::regT0
, AssemblyHelpers::payloadFor(operand
));
756 // 15) Load the result of the last bytecode operation into regT0.
758 if (exit
.m_lastSetOperand
!= std::numeric_limits
<int>::max()) {
759 m_jit
.load32(AssemblyHelpers::payloadFor((VirtualRegister
)exit
.m_lastSetOperand
), GPRInfo::cachedResultRegister
);
760 m_jit
.load32(AssemblyHelpers::tagFor((VirtualRegister
)exit
.m_lastSetOperand
), GPRInfo::cachedResultRegister2
);
763 // 16) Adjust the call frame pointer.
765 if (exit
.m_codeOrigin
.inlineCallFrame
)
766 m_jit
.addPtr(AssemblyHelpers::TrustedImm32(exit
.m_codeOrigin
.inlineCallFrame
->stackOffset
* sizeof(EncodedJSValue
)), GPRInfo::callFrameRegister
);
768 // 17) Jump into the corresponding baseline JIT code.
770 CodeBlock
* baselineCodeBlock
= m_jit
.baselineCodeBlockFor(exit
.m_codeOrigin
);
771 Vector
<BytecodeAndMachineOffset
>& decodedCodeMap
= m_jit
.decodedCodeMapFor(baselineCodeBlock
);
773 BytecodeAndMachineOffset
* mapping
= binarySearch
<BytecodeAndMachineOffset
, unsigned>(decodedCodeMap
, decodedCodeMap
.size(), exit
.m_codeOrigin
.bytecodeIndex
, BytecodeAndMachineOffset::getBytecodeIndex
);
776 ASSERT(mapping
->m_bytecodeIndex
== exit
.m_codeOrigin
.bytecodeIndex
);
778 void* jumpTarget
= baselineCodeBlock
->getJITCode().executableAddressAtOffset(mapping
->m_machineCodeOffset
);
780 ASSERT(GPRInfo::regT2
!= GPRInfo::cachedResultRegister
&& GPRInfo::regT2
!= GPRInfo::cachedResultRegister2
);
782 m_jit
.move(AssemblyHelpers::TrustedImmPtr(jumpTarget
), GPRInfo::regT2
);
783 m_jit
.jump(GPRInfo::regT2
);
785 #if DFG_ENABLE(DEBUG_VERBOSE)
786 dataLogF(" -> %p\n", jumpTarget
);
790 } } // namespace JSC::DFG
792 #endif // ENABLE(DFG_JIT) && USE(JSVALUE32_64)