2 * Copyright (C) 2011, 2013 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include "DFGOSRExitCompiler.h"
29 #if ENABLE(DFG_JIT) && USE(JSVALUE64)
31 #include "DFGOperations.h"
32 #include "Operations.h"
33 #include <wtf/DataLog.h>
35 namespace JSC
{ namespace DFG
{
37 void OSRExitCompiler::compileExit(const OSRExit
& exit
, const Operands
<ValueRecovery
>& operands
, SpeculationRecovery
* recovery
)
39 // 1) Pro-forma stuff.
40 #if DFG_ENABLE(DEBUG_VERBOSE)
41 dataLogF("OSR exit for (");
42 for (CodeOrigin codeOrigin
= exit
.m_codeOrigin
; ; codeOrigin
= codeOrigin
.inlineCallFrame
->caller
) {
43 dataLogF("bc#%u", codeOrigin
.bytecodeIndex
);
44 if (!codeOrigin
.inlineCallFrame
)
46 dataLogF(" -> %p ", codeOrigin
.inlineCallFrame
->executable
.get());
49 dumpOperands(operands
, WTF::dataFile());
52 if (Options::printEachOSRExit()) {
53 SpeculationFailureDebugInfo
* debugInfo
= new SpeculationFailureDebugInfo
;
54 debugInfo
->codeBlock
= m_jit
.codeBlock();
56 m_jit
.debugCall(debugOperationPrintSpeculationFailure
, debugInfo
);
59 #if DFG_ENABLE(JIT_BREAK_ON_SPECULATION_FAILURE)
63 #if DFG_ENABLE(SUCCESS_STATS)
64 static SamplingCounter
counter("SpeculationFailure");
65 m_jit
.emitCount(counter
);
68 // 2) Perform speculation recovery. This only comes into play when an operation
69 // starts mutating state before verifying the speculation it has already made.
71 GPRReg alreadyBoxed
= InvalidGPRReg
;
74 switch (recovery
->type()) {
76 m_jit
.sub32(recovery
->src(), recovery
->dest());
77 m_jit
.or64(GPRInfo::tagTypeNumberRegister
, recovery
->dest());
78 alreadyBoxed
= recovery
->dest();
81 case BooleanSpeculationCheck
:
82 m_jit
.xor64(AssemblyHelpers::TrustedImm32(static_cast<int32_t>(ValueFalse
)), recovery
->dest());
90 // 3) Refine some array and/or value profile, if appropriate.
92 if (!!exit
.m_jsValueSource
) {
93 if (exit
.m_kind
== BadCache
|| exit
.m_kind
== BadIndexingType
) {
94 // If the instruction that this originated from has an array profile, then
95 // refine it. If it doesn't, then do nothing. The latter could happen for
96 // hoisted checks, or checks emitted for operations that didn't have array
97 // profiling - either ops that aren't array accesses at all, or weren't
98 // known to be array acceses in the bytecode. The latter case is a FIXME
99 // while the former case is an outcome of a CheckStructure not knowing why
100 // it was emitted (could be either due to an inline cache of a property
101 // property access, or due to an array profile).
103 CodeOrigin codeOrigin
= exit
.m_codeOriginForExitProfile
;
104 if (ArrayProfile
* arrayProfile
= m_jit
.baselineCodeBlockFor(codeOrigin
)->getArrayProfile(codeOrigin
.bytecodeIndex
)) {
106 if (exit
.m_jsValueSource
.isAddress())
107 usedRegister
= exit
.m_jsValueSource
.base();
109 usedRegister
= exit
.m_jsValueSource
.gpr();
113 scratch1
= AssemblyHelpers::selectScratchGPR(usedRegister
);
114 scratch2
= AssemblyHelpers::selectScratchGPR(usedRegister
, scratch1
);
117 m_jit
.pushToSave(scratch1
);
118 m_jit
.pushToSave(scratch2
);
120 m_jit
.push(scratch1
);
121 m_jit
.push(scratch2
);
125 if (exit
.m_jsValueSource
.isAddress()) {
127 m_jit
.loadPtr(AssemblyHelpers::Address(exit
.m_jsValueSource
.asAddress()), value
);
129 value
= exit
.m_jsValueSource
.gpr();
131 m_jit
.loadPtr(AssemblyHelpers::Address(value
, JSCell::structureOffset()), scratch1
);
132 m_jit
.storePtr(scratch1
, arrayProfile
->addressOfLastSeenStructure());
133 m_jit
.load8(AssemblyHelpers::Address(scratch1
, Structure::indexingTypeOffset()), scratch1
);
134 m_jit
.move(AssemblyHelpers::TrustedImm32(1), scratch2
);
135 m_jit
.lshift32(scratch1
, scratch2
);
136 m_jit
.or32(scratch2
, AssemblyHelpers::AbsoluteAddress(arrayProfile
->addressOfArrayModes()));
139 m_jit
.popToRestore(scratch2
);
140 m_jit
.popToRestore(scratch1
);
148 if (!!exit
.m_valueProfile
) {
149 EncodedJSValue
* bucket
= exit
.m_valueProfile
.getSpecFailBucket(0);
151 if (exit
.m_jsValueSource
.isAddress()) {
152 // We can't be sure that we have a spare register. So use the tagTypeNumberRegister,
153 // since we know how to restore it.
154 m_jit
.load64(AssemblyHelpers::Address(exit
.m_jsValueSource
.asAddress()), GPRInfo::tagTypeNumberRegister
);
155 m_jit
.store64(GPRInfo::tagTypeNumberRegister
, bucket
);
156 m_jit
.move(AssemblyHelpers::TrustedImm64(TagTypeNumber
), GPRInfo::tagTypeNumberRegister
);
158 m_jit
.store64(exit
.m_jsValueSource
.gpr(), bucket
);
162 // 4) Figure out how many scratch slots we'll need. We need one for every GPR/FPR
163 // whose destination is now occupied by a DFG virtual register, and we need
164 // one for every displaced virtual register if there are more than
165 // GPRInfo::numberOfRegisters of them. Also see if there are any constants,
166 // any undefined slots, any FPR slots, and any unboxed ints.
168 Vector
<bool> poisonedVirtualRegisters(operands
.numberOfLocals());
169 for (unsigned i
= 0; i
< poisonedVirtualRegisters
.size(); ++i
)
170 poisonedVirtualRegisters
[i
] = false;
172 unsigned numberOfPoisonedVirtualRegisters
= 0;
173 unsigned numberOfDisplacedVirtualRegisters
= 0;
175 // Booleans for fast checks. We expect that most OSR exits do not have to rebox
176 // Int32s, have no FPRs, and have no constants. If there are constants, we
177 // expect most of them to be jsUndefined(); if that's true then we handle that
178 // specially to minimize code size and execution time.
179 bool haveUnboxedInt32s
= false;
180 bool haveUnboxedDoubles
= false;
181 bool haveFPRs
= false;
182 bool haveConstants
= false;
183 bool haveUndefined
= false;
184 bool haveUInt32s
= false;
185 bool haveArguments
= false;
187 for (size_t index
= 0; index
< operands
.size(); ++index
) {
188 const ValueRecovery
& recovery
= operands
[index
];
189 switch (recovery
.technique()) {
190 case Int32DisplacedInJSStack
:
191 case DoubleDisplacedInJSStack
:
192 case DisplacedInJSStack
:
193 numberOfDisplacedVirtualRegisters
++;
194 ASSERT((int)recovery
.virtualRegister() >= 0);
196 // See if we might like to store to this virtual register before doing
197 // virtual register shuffling. If so, we say that the virtual register
198 // is poisoned: it cannot be stored to until after displaced virtual
199 // registers are handled. We track poisoned virtual register carefully
200 // to ensure this happens efficiently. Note that we expect this case
201 // to be rare, so the handling of it is optimized for the cases in
202 // which it does not happen.
203 if (recovery
.virtualRegister() < (int)operands
.numberOfLocals()) {
204 switch (operands
.local(recovery
.virtualRegister()).technique()) {
206 case UnboxedInt32InGPR
:
209 if (!poisonedVirtualRegisters
[recovery
.virtualRegister()]) {
210 poisonedVirtualRegisters
[recovery
.virtualRegister()] = true;
211 numberOfPoisonedVirtualRegisters
++;
220 case UnboxedInt32InGPR
:
221 case AlreadyInJSStackAsUnboxedInt32
:
222 haveUnboxedInt32s
= true;
225 case AlreadyInJSStackAsUnboxedDouble
:
226 haveUnboxedDoubles
= true;
238 haveConstants
= true;
239 if (recovery
.constant().isUndefined())
240 haveUndefined
= true;
243 case ArgumentsThatWereNotCreated
:
244 haveArguments
= true;
252 #if DFG_ENABLE(DEBUG_VERBOSE)
254 if (numberOfPoisonedVirtualRegisters
)
255 dataLogF("Poisoned=%u ", numberOfPoisonedVirtualRegisters
);
256 if (numberOfDisplacedVirtualRegisters
)
257 dataLogF("Displaced=%u ", numberOfDisplacedVirtualRegisters
);
258 if (haveUnboxedInt32s
)
259 dataLogF("UnboxedInt32 ");
260 if (haveUnboxedDoubles
)
261 dataLogF("UnboxedDoubles ");
267 dataLogF("Constants ");
269 dataLogF("Undefined ");
273 ScratchBuffer
* scratchBuffer
= m_jit
.vm()->scratchBufferForSize(sizeof(EncodedJSValue
) * std::max(haveUInt32s
? 2u : 0u, numberOfPoisonedVirtualRegisters
+ (numberOfDisplacedVirtualRegisters
<= GPRInfo::numberOfRegisters
? 0 : numberOfDisplacedVirtualRegisters
)));
274 EncodedJSValue
* scratchDataBuffer
= scratchBuffer
? static_cast<EncodedJSValue
*>(scratchBuffer
->dataBuffer()) : 0;
276 // From here on, the code assumes that it is profitable to maximize the distance
277 // between when something is computed and when it is stored.
279 // 5) Perform all reboxing of integers.
281 if (haveUnboxedInt32s
|| haveUInt32s
) {
282 for (size_t index
= 0; index
< operands
.size(); ++index
) {
283 const ValueRecovery
& recovery
= operands
[index
];
284 switch (recovery
.technique()) {
285 case UnboxedInt32InGPR
:
286 if (recovery
.gpr() != alreadyBoxed
)
287 m_jit
.or64(GPRInfo::tagTypeNumberRegister
, recovery
.gpr());
290 case AlreadyInJSStackAsUnboxedInt32
:
291 m_jit
.store32(AssemblyHelpers::TrustedImm32(static_cast<uint32_t>(TagTypeNumber
>> 32)), AssemblyHelpers::tagFor(static_cast<VirtualRegister
>(operands
.operandForIndex(index
))));
295 // This occurs when the speculative JIT left an unsigned 32-bit integer
296 // in a GPR. If it's positive, we can just box the int. Otherwise we
297 // need to turn it into a boxed double.
299 // We don't try to be clever with register allocation here; we assume
300 // that the program is using FPRs and we don't try to figure out which
301 // ones it is using. Instead just temporarily save fpRegT0 and then
302 // restore it. This makes sense because this path is not cheap to begin
303 // with, and should happen very rarely.
305 GPRReg addressGPR
= GPRInfo::regT0
;
306 if (addressGPR
== recovery
.gpr())
307 addressGPR
= GPRInfo::regT1
;
309 m_jit
.store64(addressGPR
, scratchDataBuffer
);
310 m_jit
.move(AssemblyHelpers::TrustedImmPtr(scratchDataBuffer
+ 1), addressGPR
);
311 m_jit
.storeDouble(FPRInfo::fpRegT0
, addressGPR
);
313 AssemblyHelpers::Jump positive
= m_jit
.branch32(AssemblyHelpers::GreaterThanOrEqual
, recovery
.gpr(), AssemblyHelpers::TrustedImm32(0));
315 m_jit
.convertInt32ToDouble(recovery
.gpr(), FPRInfo::fpRegT0
);
316 m_jit
.addDouble(AssemblyHelpers::AbsoluteAddress(&AssemblyHelpers::twoToThe32
), FPRInfo::fpRegT0
);
317 m_jit
.boxDouble(FPRInfo::fpRegT0
, recovery
.gpr());
319 AssemblyHelpers::Jump done
= m_jit
.jump();
321 positive
.link(&m_jit
);
323 m_jit
.or64(GPRInfo::tagTypeNumberRegister
, recovery
.gpr());
327 m_jit
.loadDouble(addressGPR
, FPRInfo::fpRegT0
);
328 m_jit
.load64(scratchDataBuffer
, addressGPR
);
338 // 6) Dump all non-poisoned GPRs. For poisoned GPRs, save them into the scratch storage.
339 // Note that GPRs do not have a fast change (like haveFPRs) because we expect that
340 // most OSR failure points will have at least one GPR that needs to be dumped.
342 initializePoisoned(operands
.numberOfLocals());
343 unsigned currentPoisonIndex
= 0;
345 for (size_t index
= 0; index
< operands
.size(); ++index
) {
346 const ValueRecovery
& recovery
= operands
[index
];
347 int operand
= operands
.operandForIndex(index
);
348 switch (recovery
.technique()) {
350 case UnboxedInt32InGPR
:
352 if (operands
.isVariable(index
) && poisonedVirtualRegisters
[operands
.variableForIndex(index
)]) {
353 m_jit
.store64(recovery
.gpr(), scratchDataBuffer
+ currentPoisonIndex
);
354 m_poisonScratchIndices
[operands
.variableForIndex(index
)] = currentPoisonIndex
;
355 currentPoisonIndex
++;
357 m_jit
.store64(recovery
.gpr(), AssemblyHelpers::addressFor((VirtualRegister
)operand
));
364 // At this point all GPRs are available for scratch use.
367 // 7) Box all doubles (relies on there being more GPRs than FPRs)
369 for (size_t index
= 0; index
< operands
.size(); ++index
) {
370 const ValueRecovery
& recovery
= operands
[index
];
371 if (recovery
.technique() != InFPR
)
373 FPRReg fpr
= recovery
.fpr();
374 GPRReg gpr
= GPRInfo::toRegister(FPRInfo::toIndex(fpr
));
375 m_jit
.boxDouble(fpr
, gpr
);
378 // 8) Dump all doubles into the stack, or to the scratch storage if
379 // the destination virtual register is poisoned.
381 for (size_t index
= 0; index
< operands
.size(); ++index
) {
382 const ValueRecovery
& recovery
= operands
[index
];
383 if (recovery
.technique() != InFPR
)
385 GPRReg gpr
= GPRInfo::toRegister(FPRInfo::toIndex(recovery
.fpr()));
386 if (operands
.isVariable(index
) && poisonedVirtualRegisters
[operands
.variableForIndex(index
)]) {
387 m_jit
.store64(gpr
, scratchDataBuffer
+ currentPoisonIndex
);
388 m_poisonScratchIndices
[operands
.variableForIndex(index
)] = currentPoisonIndex
;
389 currentPoisonIndex
++;
391 m_jit
.store64(gpr
, AssemblyHelpers::addressFor((VirtualRegister
)operands
.operandForIndex(index
)));
395 // At this point all GPRs and FPRs are available for scratch use.
397 // 9) Box all unboxed doubles in the stack.
398 if (haveUnboxedDoubles
) {
399 for (size_t index
= 0; index
< operands
.size(); ++index
) {
400 const ValueRecovery
& recovery
= operands
[index
];
401 if (recovery
.technique() != AlreadyInJSStackAsUnboxedDouble
)
403 m_jit
.loadDouble(AssemblyHelpers::addressFor((VirtualRegister
)operands
.operandForIndex(index
)), FPRInfo::fpRegT0
);
404 m_jit
.boxDouble(FPRInfo::fpRegT0
, GPRInfo::regT0
);
405 m_jit
.store64(GPRInfo::regT0
, AssemblyHelpers::addressFor((VirtualRegister
)operands
.operandForIndex(index
)));
409 ASSERT(currentPoisonIndex
== numberOfPoisonedVirtualRegisters
);
411 // 10) Reshuffle displaced virtual registers. Optimize for the case that
412 // the number of displaced virtual registers is not more than the number
413 // of available physical registers.
415 if (numberOfDisplacedVirtualRegisters
) {
416 if (numberOfDisplacedVirtualRegisters
<= GPRInfo::numberOfRegisters
) {
417 // So far this appears to be the case that triggers all the time, but
418 // that is far from guaranteed.
420 unsigned displacementIndex
= 0;
421 for (size_t index
= 0; index
< operands
.size(); ++index
) {
422 const ValueRecovery
& recovery
= operands
[index
];
423 switch (recovery
.technique()) {
424 case DisplacedInJSStack
:
425 m_jit
.load64(AssemblyHelpers::addressFor(recovery
.virtualRegister()), GPRInfo::toRegister(displacementIndex
++));
428 case Int32DisplacedInJSStack
: {
429 GPRReg gpr
= GPRInfo::toRegister(displacementIndex
++);
430 m_jit
.load32(AssemblyHelpers::addressFor(recovery
.virtualRegister()), gpr
);
431 m_jit
.or64(GPRInfo::tagTypeNumberRegister
, gpr
);
435 case DoubleDisplacedInJSStack
: {
436 GPRReg gpr
= GPRInfo::toRegister(displacementIndex
++);
437 m_jit
.load64(AssemblyHelpers::addressFor(recovery
.virtualRegister()), gpr
);
438 m_jit
.sub64(GPRInfo::tagTypeNumberRegister
, gpr
);
447 displacementIndex
= 0;
448 for (size_t index
= 0; index
< operands
.size(); ++index
) {
449 const ValueRecovery
& recovery
= operands
[index
];
450 switch (recovery
.technique()) {
451 case DisplacedInJSStack
:
452 case Int32DisplacedInJSStack
:
453 case DoubleDisplacedInJSStack
:
454 m_jit
.store64(GPRInfo::toRegister(displacementIndex
++), AssemblyHelpers::addressFor((VirtualRegister
)operands
.operandForIndex(index
)));
462 // FIXME: This should use the shuffling algorithm that we use
463 // for speculative->non-speculative jumps, if we ever discover that
464 // some hot code with lots of live values that get displaced and
465 // spilled really enjoys frequently failing speculation.
467 // For now this code is engineered to be correct but probably not
468 // super. In particular, it correctly handles cases where for example
469 // the displacements are a permutation of the destination values, like
474 // It accomplishes this by simply lifting all of the virtual registers
475 // from their old (DFG JIT) locations and dropping them in a scratch
476 // location in memory, and then transferring from that scratch location
477 // to their new (old JIT) locations.
479 unsigned scratchIndex
= numberOfPoisonedVirtualRegisters
;
480 for (size_t index
= 0; index
< operands
.size(); ++index
) {
481 const ValueRecovery
& recovery
= operands
[index
];
483 switch (recovery
.technique()) {
484 case DisplacedInJSStack
:
485 m_jit
.load64(AssemblyHelpers::addressFor(recovery
.virtualRegister()), GPRInfo::regT0
);
486 m_jit
.store64(GPRInfo::regT0
, scratchDataBuffer
+ scratchIndex
++);
489 case Int32DisplacedInJSStack
: {
490 m_jit
.load32(AssemblyHelpers::addressFor(recovery
.virtualRegister()), GPRInfo::regT0
);
491 m_jit
.or64(GPRInfo::tagTypeNumberRegister
, GPRInfo::regT0
);
492 m_jit
.store64(GPRInfo::regT0
, scratchDataBuffer
+ scratchIndex
++);
496 case DoubleDisplacedInJSStack
: {
497 m_jit
.load64(AssemblyHelpers::addressFor(recovery
.virtualRegister()), GPRInfo::regT0
);
498 m_jit
.sub64(GPRInfo::tagTypeNumberRegister
, GPRInfo::regT0
);
499 m_jit
.store64(GPRInfo::regT0
, scratchDataBuffer
+ scratchIndex
++);
508 scratchIndex
= numberOfPoisonedVirtualRegisters
;
509 for (size_t index
= 0; index
< operands
.size(); ++index
) {
510 const ValueRecovery
& recovery
= operands
[index
];
511 switch (recovery
.technique()) {
512 case DisplacedInJSStack
:
513 case Int32DisplacedInJSStack
:
514 case DoubleDisplacedInJSStack
:
515 m_jit
.load64(scratchDataBuffer
+ scratchIndex
++, GPRInfo::regT0
);
516 m_jit
.store64(GPRInfo::regT0
, AssemblyHelpers::addressFor((VirtualRegister
)operands
.operandForIndex(index
)));
524 ASSERT(scratchIndex
== numberOfPoisonedVirtualRegisters
+ numberOfDisplacedVirtualRegisters
);
528 // 11) Dump all poisoned virtual registers.
530 if (numberOfPoisonedVirtualRegisters
) {
531 for (int virtualRegister
= 0; virtualRegister
< (int)operands
.numberOfLocals(); ++virtualRegister
) {
532 if (!poisonedVirtualRegisters
[virtualRegister
])
535 const ValueRecovery
& recovery
= operands
.local(virtualRegister
);
536 switch (recovery
.technique()) {
538 case UnboxedInt32InGPR
:
541 m_jit
.load64(scratchDataBuffer
+ poisonIndex(virtualRegister
), GPRInfo::regT0
);
542 m_jit
.store64(GPRInfo::regT0
, AssemblyHelpers::addressFor((VirtualRegister
)virtualRegister
));
551 // 12) Dump all constants. Optimize for Undefined, since that's a constant we see
556 m_jit
.move(AssemblyHelpers::TrustedImm64(JSValue::encode(jsUndefined())), GPRInfo::regT0
);
558 for (size_t index
= 0; index
< operands
.size(); ++index
) {
559 const ValueRecovery
& recovery
= operands
[index
];
560 if (recovery
.technique() != Constant
)
562 if (recovery
.constant().isUndefined())
563 m_jit
.store64(GPRInfo::regT0
, AssemblyHelpers::addressFor((VirtualRegister
)operands
.operandForIndex(index
)));
565 m_jit
.store64(AssemblyHelpers::TrustedImm64(JSValue::encode(recovery
.constant())), AssemblyHelpers::addressFor((VirtualRegister
)operands
.operandForIndex(index
)));
569 // 13) Adjust the old JIT's execute counter. Since we are exiting OSR, we know
570 // that all new calls into this code will go to the new JIT, so the execute
571 // counter only affects call frames that performed OSR exit and call frames
572 // that were still executing the old JIT at the time of another call frame's
573 // OSR exit. We want to ensure that the following is true:
575 // (a) Code the performs an OSR exit gets a chance to reenter optimized
576 // code eventually, since optimized code is faster. But we don't
577 // want to do such reentery too aggressively (see (c) below).
579 // (b) If there is code on the call stack that is still running the old
580 // JIT's code and has never OSR'd, then it should get a chance to
581 // perform OSR entry despite the fact that we've exited.
583 // (c) Code the performs an OSR exit should not immediately retry OSR
584 // entry, since both forms of OSR are expensive. OSR entry is
585 // particularly expensive.
587 // (d) Frequent OSR failures, even those that do not result in the code
588 // running in a hot loop, result in recompilation getting triggered.
590 // To ensure (c), we'd like to set the execute counter to
591 // counterValueForOptimizeAfterWarmUp(). This seems like it would endanger
592 // (a) and (b), since then every OSR exit would delay the opportunity for
593 // every call frame to perform OSR entry. Essentially, if OSR exit happens
594 // frequently and the function has few loops, then the counter will never
595 // become non-negative and OSR entry will never be triggered. OSR entry
596 // will only happen if a loop gets hot in the old JIT, which does a pretty
597 // good job of ensuring (a) and (b). But that doesn't take care of (d),
598 // since each speculation failure would reset the execute counter.
599 // So we check here if the number of speculation failures is significantly
600 // larger than the number of successes (we want 90% success rate), and if
601 // there have been a large enough number of failures. If so, we set the
602 // counter to 0; otherwise we set the counter to
603 // counterValueForOptimizeAfterWarmUp().
605 handleExitCounts(exit
);
607 // 14) Reify inlined call frames.
609 ASSERT(m_jit
.baselineCodeBlock()->getJITType() == JITCode::BaselineJIT
);
610 m_jit
.storePtr(AssemblyHelpers::TrustedImmPtr(m_jit
.baselineCodeBlock()), AssemblyHelpers::addressFor((VirtualRegister
)JSStack::CodeBlock
));
612 for (CodeOrigin codeOrigin
= exit
.m_codeOrigin
; codeOrigin
.inlineCallFrame
; codeOrigin
= codeOrigin
.inlineCallFrame
->caller
) {
613 InlineCallFrame
* inlineCallFrame
= codeOrigin
.inlineCallFrame
;
614 CodeBlock
* baselineCodeBlock
= m_jit
.baselineCodeBlockFor(codeOrigin
);
615 CodeBlock
* baselineCodeBlockForCaller
= m_jit
.baselineCodeBlockFor(inlineCallFrame
->caller
);
616 Vector
<BytecodeAndMachineOffset
>& decodedCodeMap
= m_jit
.decodedCodeMapFor(baselineCodeBlockForCaller
);
617 unsigned returnBytecodeIndex
= inlineCallFrame
->caller
.bytecodeIndex
+ OPCODE_LENGTH(op_call
);
618 BytecodeAndMachineOffset
* mapping
= binarySearch
<BytecodeAndMachineOffset
, unsigned>(decodedCodeMap
, decodedCodeMap
.size(), returnBytecodeIndex
, BytecodeAndMachineOffset::getBytecodeIndex
);
621 ASSERT(mapping
->m_bytecodeIndex
== returnBytecodeIndex
);
623 void* jumpTarget
= baselineCodeBlockForCaller
->getJITCode().executableAddressAtOffset(mapping
->m_machineCodeOffset
);
625 GPRReg callerFrameGPR
;
626 if (inlineCallFrame
->caller
.inlineCallFrame
) {
627 m_jit
.addPtr(AssemblyHelpers::TrustedImm32(inlineCallFrame
->caller
.inlineCallFrame
->stackOffset
* sizeof(EncodedJSValue
)), GPRInfo::callFrameRegister
, GPRInfo::regT3
);
628 callerFrameGPR
= GPRInfo::regT3
;
630 callerFrameGPR
= GPRInfo::callFrameRegister
;
632 m_jit
.storePtr(AssemblyHelpers::TrustedImmPtr(baselineCodeBlock
), AssemblyHelpers::addressFor((VirtualRegister
)(inlineCallFrame
->stackOffset
+ JSStack::CodeBlock
)));
633 if (!inlineCallFrame
->isClosureCall())
634 m_jit
.store64(AssemblyHelpers::TrustedImm64(JSValue::encode(JSValue(inlineCallFrame
->callee
->scope()))), AssemblyHelpers::addressFor((VirtualRegister
)(inlineCallFrame
->stackOffset
+ JSStack::ScopeChain
)));
635 m_jit
.store64(callerFrameGPR
, AssemblyHelpers::addressFor((VirtualRegister
)(inlineCallFrame
->stackOffset
+ JSStack::CallerFrame
)));
636 m_jit
.storePtr(AssemblyHelpers::TrustedImmPtr(jumpTarget
), AssemblyHelpers::addressFor((VirtualRegister
)(inlineCallFrame
->stackOffset
+ JSStack::ReturnPC
)));
637 m_jit
.store32(AssemblyHelpers::TrustedImm32(inlineCallFrame
->arguments
.size()), AssemblyHelpers::payloadFor((VirtualRegister
)(inlineCallFrame
->stackOffset
+ JSStack::ArgumentCount
)));
638 if (!inlineCallFrame
->isClosureCall())
639 m_jit
.store64(AssemblyHelpers::TrustedImm64(JSValue::encode(JSValue(inlineCallFrame
->callee
.get()))), AssemblyHelpers::addressFor((VirtualRegister
)(inlineCallFrame
->stackOffset
+ JSStack::Callee
)));
642 // 15) Create arguments if necessary and place them into the appropriate aliased
646 HashSet
<InlineCallFrame
*, DefaultHash
<InlineCallFrame
*>::Hash
,
647 NullableHashTraits
<InlineCallFrame
*> > didCreateArgumentsObject
;
649 for (size_t index
= 0; index
< operands
.size(); ++index
) {
650 const ValueRecovery
& recovery
= operands
[index
];
651 if (recovery
.technique() != ArgumentsThatWereNotCreated
)
653 int operand
= operands
.operandForIndex(index
);
654 // Find the right inline call frame.
655 InlineCallFrame
* inlineCallFrame
= 0;
656 for (InlineCallFrame
* current
= exit
.m_codeOrigin
.inlineCallFrame
;
658 current
= current
->caller
.inlineCallFrame
) {
659 if (current
->stackOffset
<= operand
) {
660 inlineCallFrame
= current
;
665 if (!m_jit
.baselineCodeBlockFor(inlineCallFrame
)->usesArguments())
667 int argumentsRegister
= m_jit
.argumentsRegisterFor(inlineCallFrame
);
668 if (didCreateArgumentsObject
.add(inlineCallFrame
).isNewEntry
) {
669 // We know this call frame optimized out an arguments object that
670 // the baseline JIT would have created. Do that creation now.
671 if (inlineCallFrame
) {
672 m_jit
.addPtr(AssemblyHelpers::TrustedImm32(inlineCallFrame
->stackOffset
* sizeof(EncodedJSValue
)), GPRInfo::callFrameRegister
, GPRInfo::regT0
);
673 m_jit
.setupArguments(GPRInfo::regT0
);
675 m_jit
.setupArgumentsExecState();
677 AssemblyHelpers::TrustedImmPtr(
678 bitwise_cast
<void*>(operationCreateArguments
)),
679 GPRInfo::nonArgGPR0
);
680 m_jit
.call(GPRInfo::nonArgGPR0
);
681 m_jit
.store64(GPRInfo::returnValueGPR
, AssemblyHelpers::addressFor(argumentsRegister
));
683 GPRInfo::returnValueGPR
,
684 AssemblyHelpers::addressFor(unmodifiedArgumentsRegister(argumentsRegister
)));
685 m_jit
.move(GPRInfo::returnValueGPR
, GPRInfo::regT0
); // no-op move on almost all platforms.
688 m_jit
.load64(AssemblyHelpers::addressFor(argumentsRegister
), GPRInfo::regT0
);
689 m_jit
.store64(GPRInfo::regT0
, AssemblyHelpers::addressFor(operand
));
693 // 16) Load the result of the last bytecode operation into regT0.
695 if (exit
.m_lastSetOperand
!= std::numeric_limits
<int>::max())
696 m_jit
.load64(AssemblyHelpers::addressFor((VirtualRegister
)exit
.m_lastSetOperand
), GPRInfo::cachedResultRegister
);
698 // 17) Adjust the call frame pointer.
700 if (exit
.m_codeOrigin
.inlineCallFrame
)
701 m_jit
.addPtr(AssemblyHelpers::TrustedImm32(exit
.m_codeOrigin
.inlineCallFrame
->stackOffset
* sizeof(EncodedJSValue
)), GPRInfo::callFrameRegister
);
703 // 18) Jump into the corresponding baseline JIT code.
705 CodeBlock
* baselineCodeBlock
= m_jit
.baselineCodeBlockFor(exit
.m_codeOrigin
);
706 Vector
<BytecodeAndMachineOffset
>& decodedCodeMap
= m_jit
.decodedCodeMapFor(baselineCodeBlock
);
708 BytecodeAndMachineOffset
* mapping
= binarySearch
<BytecodeAndMachineOffset
, unsigned>(decodedCodeMap
, decodedCodeMap
.size(), exit
.m_codeOrigin
.bytecodeIndex
, BytecodeAndMachineOffset::getBytecodeIndex
);
711 ASSERT(mapping
->m_bytecodeIndex
== exit
.m_codeOrigin
.bytecodeIndex
);
713 void* jumpTarget
= baselineCodeBlock
->getJITCode().executableAddressAtOffset(mapping
->m_machineCodeOffset
);
715 ASSERT(GPRInfo::regT1
!= GPRInfo::cachedResultRegister
);
717 m_jit
.move(AssemblyHelpers::TrustedImmPtr(jumpTarget
), GPRInfo::regT1
);
719 m_jit
.jump(GPRInfo::regT1
);
721 #if DFG_ENABLE(DEBUG_VERBOSE)
722 dataLogF("-> %p\n", jumpTarget
);
726 } } // namespace JSC::DFG
728 #endif // ENABLE(DFG_JIT) && USE(JSVALUE64)