2 * Copyright (C) 2011 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include "DFGOSRExitCompiler.h"
29 #if ENABLE(DFG_JIT) && USE(JSVALUE64)
31 #include "DFGOperations.h"
33 namespace JSC
{ namespace DFG
{
35 void OSRExitCompiler::compileExit(const OSRExit
& exit
, SpeculationRecovery
* recovery
)
37 // 1) Pro-forma stuff.
38 #if DFG_ENABLE(DEBUG_VERBOSE)
39 dataLog("OSR exit for Node @%d (", (int)exit
.m_nodeIndex
);
40 for (CodeOrigin codeOrigin
= exit
.m_codeOrigin
; ; codeOrigin
= codeOrigin
.inlineCallFrame
->caller
) {
41 dataLog("bc#%u", codeOrigin
.bytecodeIndex
);
42 if (!codeOrigin
.inlineCallFrame
)
44 dataLog(" -> %p ", codeOrigin
.inlineCallFrame
->executable
.get());
47 exit
.dump(WTF::dataFile());
49 #if DFG_ENABLE(VERBOSE_SPECULATION_FAILURE)
50 SpeculationFailureDebugInfo
* debugInfo
= new SpeculationFailureDebugInfo
;
51 debugInfo
->codeBlock
= m_jit
.codeBlock();
52 debugInfo
->nodeIndex
= exit
.m_nodeIndex
;
54 m_jit
.debugCall(debugOperationPrintSpeculationFailure
, debugInfo
);
57 #if DFG_ENABLE(JIT_BREAK_ON_SPECULATION_FAILURE)
61 #if DFG_ENABLE(SUCCESS_STATS)
62 static SamplingCounter
counter("SpeculationFailure");
63 m_jit
.emitCount(counter
);
66 // 2) Perform speculation recovery. This only comes into play when an operation
67 // starts mutating state before verifying the speculation it has already made.
69 GPRReg alreadyBoxed
= InvalidGPRReg
;
72 switch (recovery
->type()) {
74 m_jit
.sub32(recovery
->src(), recovery
->dest());
75 m_jit
.orPtr(GPRInfo::tagTypeNumberRegister
, recovery
->dest());
76 alreadyBoxed
= recovery
->dest();
79 case BooleanSpeculationCheck
:
80 m_jit
.xorPtr(AssemblyHelpers::TrustedImm32(static_cast<int32_t>(ValueFalse
)), recovery
->dest());
88 // 3) Refine some value profile, if appropriate.
90 if (!!exit
.m_jsValueSource
&& !!exit
.m_valueProfile
) {
91 EncodedJSValue
* bucket
= exit
.m_valueProfile
.getSpecFailBucket(0);
93 #if DFG_ENABLE(VERBOSE_SPECULATION_FAILURE)
94 dataLog(" (have exit profile, bucket %p) ", bucket
);
97 if (exit
.m_jsValueSource
.isAddress()) {
98 // We can't be sure that we have a spare register. So use the tagTypeNumberRegister,
99 // since we know how to restore it.
100 m_jit
.loadPtr(AssemblyHelpers::Address(exit
.m_jsValueSource
.asAddress()), GPRInfo::tagTypeNumberRegister
);
101 m_jit
.storePtr(GPRInfo::tagTypeNumberRegister
, bucket
);
102 m_jit
.move(AssemblyHelpers::TrustedImmPtr(bitwise_cast
<void*>(TagTypeNumber
)), GPRInfo::tagTypeNumberRegister
);
104 m_jit
.storePtr(exit
.m_jsValueSource
.gpr(), bucket
);
107 // 4) Figure out how many scratch slots we'll need. We need one for every GPR/FPR
108 // whose destination is now occupied by a DFG virtual register, and we need
109 // one for every displaced virtual register if there are more than
110 // GPRInfo::numberOfRegisters of them. Also see if there are any constants,
111 // any undefined slots, any FPR slots, and any unboxed ints.
113 Vector
<bool> poisonedVirtualRegisters(exit
.m_variables
.size());
114 for (unsigned i
= 0; i
< poisonedVirtualRegisters
.size(); ++i
)
115 poisonedVirtualRegisters
[i
] = false;
117 unsigned numberOfPoisonedVirtualRegisters
= 0;
118 unsigned numberOfDisplacedVirtualRegisters
= 0;
120 // Booleans for fast checks. We expect that most OSR exits do not have to rebox
121 // Int32s, have no FPRs, and have no constants. If there are constants, we
122 // expect most of them to be jsUndefined(); if that's true then we handle that
123 // specially to minimize code size and execution time.
124 bool haveUnboxedInt32s
= false;
125 bool haveUnboxedDoubles
= false;
126 bool haveFPRs
= false;
127 bool haveConstants
= false;
128 bool haveUndefined
= false;
129 bool haveUInt32s
= false;
131 for (int index
= 0; index
< exit
.numberOfRecoveries(); ++index
) {
132 const ValueRecovery
& recovery
= exit
.valueRecovery(index
);
133 switch (recovery
.technique()) {
134 case Int32DisplacedInRegisterFile
:
135 case DoubleDisplacedInRegisterFile
:
136 case DisplacedInRegisterFile
:
137 numberOfDisplacedVirtualRegisters
++;
138 ASSERT((int)recovery
.virtualRegister() >= 0);
140 // See if we might like to store to this virtual register before doing
141 // virtual register shuffling. If so, we say that the virtual register
142 // is poisoned: it cannot be stored to until after displaced virtual
143 // registers are handled. We track poisoned virtual register carefully
144 // to ensure this happens efficiently. Note that we expect this case
145 // to be rare, so the handling of it is optimized for the cases in
146 // which it does not happen.
147 if (recovery
.virtualRegister() < (int)exit
.m_variables
.size()) {
148 switch (exit
.m_variables
[recovery
.virtualRegister()].technique()) {
150 case UnboxedInt32InGPR
:
153 if (!poisonedVirtualRegisters
[recovery
.virtualRegister()]) {
154 poisonedVirtualRegisters
[recovery
.virtualRegister()] = true;
155 numberOfPoisonedVirtualRegisters
++;
164 case UnboxedInt32InGPR
:
165 case AlreadyInRegisterFileAsUnboxedInt32
:
166 haveUnboxedInt32s
= true;
169 case AlreadyInRegisterFileAsUnboxedDouble
:
170 haveUnboxedDoubles
= true;
182 haveConstants
= true;
183 if (recovery
.constant().isUndefined())
184 haveUndefined
= true;
192 #if DFG_ENABLE(DEBUG_VERBOSE)
194 if (numberOfPoisonedVirtualRegisters
)
195 dataLog("Poisoned=%u ", numberOfPoisonedVirtualRegisters
);
196 if (numberOfDisplacedVirtualRegisters
)
197 dataLog("Displaced=%u ", numberOfDisplacedVirtualRegisters
);
198 if (haveUnboxedInt32s
)
199 dataLog("UnboxedInt32 ");
200 if (haveUnboxedDoubles
)
201 dataLog("UnboxedDoubles ");
207 dataLog("Constants ");
209 dataLog("Undefined ");
213 ScratchBuffer
* scratchBuffer
= m_jit
.globalData()->scratchBufferForSize(sizeof(EncodedJSValue
) * std::max(haveUInt32s
? 2u : 0u, numberOfPoisonedVirtualRegisters
+ (numberOfDisplacedVirtualRegisters
<= GPRInfo::numberOfRegisters
? 0 : numberOfDisplacedVirtualRegisters
)));
214 EncodedJSValue
* scratchDataBuffer
= scratchBuffer
? static_cast<EncodedJSValue
*>(scratchBuffer
->dataBuffer()) : 0;
216 // From here on, the code assumes that it is profitable to maximize the distance
217 // between when something is computed and when it is stored.
219 // 5) Perform all reboxing of integers.
221 if (haveUnboxedInt32s
|| haveUInt32s
) {
222 for (int index
= 0; index
< exit
.numberOfRecoveries(); ++index
) {
223 const ValueRecovery
& recovery
= exit
.valueRecovery(index
);
224 switch (recovery
.technique()) {
225 case UnboxedInt32InGPR
:
226 if (recovery
.gpr() != alreadyBoxed
)
227 m_jit
.orPtr(GPRInfo::tagTypeNumberRegister
, recovery
.gpr());
230 case AlreadyInRegisterFileAsUnboxedInt32
:
231 m_jit
.store32(AssemblyHelpers::TrustedImm32(static_cast<uint32_t>(TagTypeNumber
>> 32)), AssemblyHelpers::tagFor(static_cast<VirtualRegister
>(exit
.operandForIndex(index
))));
235 // This occurs when the speculative JIT left an unsigned 32-bit integer
236 // in a GPR. If it's positive, we can just box the int. Otherwise we
237 // need to turn it into a boxed double.
239 // We don't try to be clever with register allocation here; we assume
240 // that the program is using FPRs and we don't try to figure out which
241 // ones it is using. Instead just temporarily save fpRegT0 and then
242 // restore it. This makes sense because this path is not cheap to begin
243 // with, and should happen very rarely.
245 GPRReg addressGPR
= GPRInfo::regT0
;
246 if (addressGPR
== recovery
.gpr())
247 addressGPR
= GPRInfo::regT1
;
249 m_jit
.storePtr(addressGPR
, scratchDataBuffer
);
250 m_jit
.move(AssemblyHelpers::TrustedImmPtr(scratchDataBuffer
+ 1), addressGPR
);
251 m_jit
.storeDouble(FPRInfo::fpRegT0
, addressGPR
);
253 AssemblyHelpers::Jump positive
= m_jit
.branch32(AssemblyHelpers::GreaterThanOrEqual
, recovery
.gpr(), AssemblyHelpers::TrustedImm32(0));
255 m_jit
.convertInt32ToDouble(recovery
.gpr(), FPRInfo::fpRegT0
);
256 m_jit
.addDouble(AssemblyHelpers::AbsoluteAddress(&AssemblyHelpers::twoToThe32
), FPRInfo::fpRegT0
);
257 m_jit
.boxDouble(FPRInfo::fpRegT0
, recovery
.gpr());
259 AssemblyHelpers::Jump done
= m_jit
.jump();
261 positive
.link(&m_jit
);
263 m_jit
.orPtr(GPRInfo::tagTypeNumberRegister
, recovery
.gpr());
267 m_jit
.loadDouble(addressGPR
, FPRInfo::fpRegT0
);
268 m_jit
.loadPtr(scratchDataBuffer
, addressGPR
);
278 // 6) Dump all non-poisoned GPRs. For poisoned GPRs, save them into the scratch storage.
279 // Note that GPRs do not have a fast change (like haveFPRs) because we expect that
280 // most OSR failure points will have at least one GPR that needs to be dumped.
282 initializePoisoned(exit
.m_variables
.size());
283 unsigned currentPoisonIndex
= 0;
285 for (int index
= 0; index
< exit
.numberOfRecoveries(); ++index
) {
286 const ValueRecovery
& recovery
= exit
.valueRecovery(index
);
287 int operand
= exit
.operandForIndex(index
);
288 switch (recovery
.technique()) {
290 case UnboxedInt32InGPR
:
292 if (exit
.isVariable(index
) && poisonedVirtualRegisters
[exit
.variableForIndex(index
)]) {
293 m_jit
.storePtr(recovery
.gpr(), scratchDataBuffer
+ currentPoisonIndex
);
294 m_poisonScratchIndices
[exit
.variableForIndex(index
)] = currentPoisonIndex
;
295 currentPoisonIndex
++;
297 m_jit
.storePtr(recovery
.gpr(), AssemblyHelpers::addressFor((VirtualRegister
)operand
));
304 // At this point all GPRs are available for scratch use.
307 // 7) Box all doubles (relies on there being more GPRs than FPRs)
309 for (int index
= 0; index
< exit
.numberOfRecoveries(); ++index
) {
310 const ValueRecovery
& recovery
= exit
.valueRecovery(index
);
311 if (recovery
.technique() != InFPR
)
313 FPRReg fpr
= recovery
.fpr();
314 GPRReg gpr
= GPRInfo::toRegister(FPRInfo::toIndex(fpr
));
315 m_jit
.boxDouble(fpr
, gpr
);
318 // 8) Dump all doubles into the register file, or to the scratch storage if
319 // the destination virtual register is poisoned.
321 for (int index
= 0; index
< exit
.numberOfRecoveries(); ++index
) {
322 const ValueRecovery
& recovery
= exit
.valueRecovery(index
);
323 if (recovery
.technique() != InFPR
)
325 GPRReg gpr
= GPRInfo::toRegister(FPRInfo::toIndex(recovery
.fpr()));
326 if (exit
.isVariable(index
) && poisonedVirtualRegisters
[exit
.variableForIndex(index
)]) {
327 m_jit
.storePtr(gpr
, scratchDataBuffer
+ currentPoisonIndex
);
328 m_poisonScratchIndices
[exit
.variableForIndex(index
)] = currentPoisonIndex
;
329 currentPoisonIndex
++;
331 m_jit
.storePtr(gpr
, AssemblyHelpers::addressFor((VirtualRegister
)exit
.operandForIndex(index
)));
335 // At this point all GPRs and FPRs are available for scratch use.
337 // 9) Box all unboxed doubles in the register file.
338 if (haveUnboxedDoubles
) {
339 for (int index
= 0; index
< exit
.numberOfRecoveries(); ++index
) {
340 const ValueRecovery
& recovery
= exit
.valueRecovery(index
);
341 if (recovery
.technique() != AlreadyInRegisterFileAsUnboxedDouble
)
343 m_jit
.loadDouble(AssemblyHelpers::addressFor((VirtualRegister
)exit
.operandForIndex(index
)), FPRInfo::fpRegT0
);
344 m_jit
.boxDouble(FPRInfo::fpRegT0
, GPRInfo::regT0
);
345 m_jit
.storePtr(GPRInfo::regT0
, AssemblyHelpers::addressFor((VirtualRegister
)exit
.operandForIndex(index
)));
349 ASSERT(currentPoisonIndex
== numberOfPoisonedVirtualRegisters
);
351 // 10) Reshuffle displaced virtual registers. Optimize for the case that
352 // the number of displaced virtual registers is not more than the number
353 // of available physical registers.
355 if (numberOfDisplacedVirtualRegisters
) {
356 if (numberOfDisplacedVirtualRegisters
<= GPRInfo::numberOfRegisters
) {
357 // So far this appears to be the case that triggers all the time, but
358 // that is far from guaranteed.
360 unsigned displacementIndex
= 0;
361 for (int index
= 0; index
< exit
.numberOfRecoveries(); ++index
) {
362 const ValueRecovery
& recovery
= exit
.valueRecovery(index
);
363 switch (recovery
.technique()) {
364 case DisplacedInRegisterFile
:
365 m_jit
.loadPtr(AssemblyHelpers::addressFor(recovery
.virtualRegister()), GPRInfo::toRegister(displacementIndex
++));
368 case Int32DisplacedInRegisterFile
: {
369 GPRReg gpr
= GPRInfo::toRegister(displacementIndex
++);
370 m_jit
.load32(AssemblyHelpers::addressFor(recovery
.virtualRegister()), gpr
);
371 m_jit
.orPtr(GPRInfo::tagTypeNumberRegister
, gpr
);
375 case DoubleDisplacedInRegisterFile
: {
376 GPRReg gpr
= GPRInfo::toRegister(displacementIndex
++);
377 m_jit
.loadPtr(AssemblyHelpers::addressFor(recovery
.virtualRegister()), gpr
);
378 m_jit
.subPtr(GPRInfo::tagTypeNumberRegister
, gpr
);
387 displacementIndex
= 0;
388 for (int index
= 0; index
< exit
.numberOfRecoveries(); ++index
) {
389 const ValueRecovery
& recovery
= exit
.valueRecovery(index
);
390 switch (recovery
.technique()) {
391 case DisplacedInRegisterFile
:
392 case Int32DisplacedInRegisterFile
:
393 case DoubleDisplacedInRegisterFile
:
394 m_jit
.storePtr(GPRInfo::toRegister(displacementIndex
++), AssemblyHelpers::addressFor((VirtualRegister
)exit
.operandForIndex(index
)));
402 // FIXME: This should use the shuffling algorithm that we use
403 // for speculative->non-speculative jumps, if we ever discover that
404 // some hot code with lots of live values that get displaced and
405 // spilled really enjoys frequently failing speculation.
407 // For now this code is engineered to be correct but probably not
408 // super. In particular, it correctly handles cases where for example
409 // the displacements are a permutation of the destination values, like
414 // It accomplishes this by simply lifting all of the virtual registers
415 // from their old (DFG JIT) locations and dropping them in a scratch
416 // location in memory, and then transferring from that scratch location
417 // to their new (old JIT) locations.
419 unsigned scratchIndex
= numberOfPoisonedVirtualRegisters
;
420 for (int index
= 0; index
< exit
.numberOfRecoveries(); ++index
) {
421 const ValueRecovery
& recovery
= exit
.valueRecovery(index
);
423 switch (recovery
.technique()) {
424 case DisplacedInRegisterFile
:
425 m_jit
.loadPtr(AssemblyHelpers::addressFor(recovery
.virtualRegister()), GPRInfo::regT0
);
426 m_jit
.storePtr(GPRInfo::regT0
, scratchDataBuffer
+ scratchIndex
++);
429 case Int32DisplacedInRegisterFile
: {
430 m_jit
.load32(AssemblyHelpers::addressFor(recovery
.virtualRegister()), GPRInfo::regT0
);
431 m_jit
.orPtr(GPRInfo::tagTypeNumberRegister
, GPRInfo::regT0
);
432 m_jit
.storePtr(GPRInfo::regT0
, scratchDataBuffer
+ scratchIndex
++);
436 case DoubleDisplacedInRegisterFile
: {
437 m_jit
.loadPtr(AssemblyHelpers::addressFor(recovery
.virtualRegister()), GPRInfo::regT0
);
438 m_jit
.subPtr(GPRInfo::tagTypeNumberRegister
, GPRInfo::regT0
);
439 m_jit
.storePtr(GPRInfo::regT0
, scratchDataBuffer
+ scratchIndex
++);
448 scratchIndex
= numberOfPoisonedVirtualRegisters
;
449 for (int index
= 0; index
< exit
.numberOfRecoveries(); ++index
) {
450 const ValueRecovery
& recovery
= exit
.valueRecovery(index
);
451 switch (recovery
.technique()) {
452 case DisplacedInRegisterFile
:
453 case Int32DisplacedInRegisterFile
:
454 case DoubleDisplacedInRegisterFile
:
455 m_jit
.loadPtr(scratchDataBuffer
+ scratchIndex
++, GPRInfo::regT0
);
456 m_jit
.storePtr(GPRInfo::regT0
, AssemblyHelpers::addressFor((VirtualRegister
)exit
.operandForIndex(index
)));
464 ASSERT(scratchIndex
== numberOfPoisonedVirtualRegisters
+ numberOfDisplacedVirtualRegisters
);
468 // 11) Dump all poisoned virtual registers.
470 if (numberOfPoisonedVirtualRegisters
) {
471 for (int virtualRegister
= 0; virtualRegister
< (int)exit
.m_variables
.size(); ++virtualRegister
) {
472 if (!poisonedVirtualRegisters
[virtualRegister
])
475 const ValueRecovery
& recovery
= exit
.m_variables
[virtualRegister
];
476 switch (recovery
.technique()) {
478 case UnboxedInt32InGPR
:
481 m_jit
.loadPtr(scratchDataBuffer
+ poisonIndex(virtualRegister
), GPRInfo::regT0
);
482 m_jit
.storePtr(GPRInfo::regT0
, AssemblyHelpers::addressFor((VirtualRegister
)virtualRegister
));
491 // 12) Dump all constants. Optimize for Undefined, since that's a constant we see
496 m_jit
.move(AssemblyHelpers::TrustedImmPtr(JSValue::encode(jsUndefined())), GPRInfo::regT0
);
498 for (int index
= 0; index
< exit
.numberOfRecoveries(); ++index
) {
499 const ValueRecovery
& recovery
= exit
.valueRecovery(index
);
500 if (recovery
.technique() != Constant
)
502 if (recovery
.constant().isUndefined())
503 m_jit
.storePtr(GPRInfo::regT0
, AssemblyHelpers::addressFor((VirtualRegister
)exit
.operandForIndex(index
)));
505 m_jit
.storePtr(AssemblyHelpers::TrustedImmPtr(JSValue::encode(recovery
.constant())), AssemblyHelpers::addressFor((VirtualRegister
)exit
.operandForIndex(index
)));
509 // 13) Adjust the old JIT's execute counter. Since we are exiting OSR, we know
510 // that all new calls into this code will go to the new JIT, so the execute
511 // counter only affects call frames that performed OSR exit and call frames
512 // that were still executing the old JIT at the time of another call frame's
513 // OSR exit. We want to ensure that the following is true:
515 // (a) Code the performs an OSR exit gets a chance to reenter optimized
516 // code eventually, since optimized code is faster. But we don't
517 // want to do such reentery too aggressively (see (c) below).
519 // (b) If there is code on the call stack that is still running the old
520 // JIT's code and has never OSR'd, then it should get a chance to
521 // perform OSR entry despite the fact that we've exited.
523 // (c) Code the performs an OSR exit should not immediately retry OSR
524 // entry, since both forms of OSR are expensive. OSR entry is
525 // particularly expensive.
527 // (d) Frequent OSR failures, even those that do not result in the code
528 // running in a hot loop, result in recompilation getting triggered.
530 // To ensure (c), we'd like to set the execute counter to
531 // counterValueForOptimizeAfterWarmUp(). This seems like it would endanger
532 // (a) and (b), since then every OSR exit would delay the opportunity for
533 // every call frame to perform OSR entry. Essentially, if OSR exit happens
534 // frequently and the function has few loops, then the counter will never
535 // become non-negative and OSR entry will never be triggered. OSR entry
536 // will only happen if a loop gets hot in the old JIT, which does a pretty
537 // good job of ensuring (a) and (b). But that doesn't take care of (d),
538 // since each speculation failure would reset the execute counter.
539 // So we check here if the number of speculation failures is significantly
540 // larger than the number of successes (we want 90% success rate), and if
541 // there have been a large enough number of failures. If so, we set the
542 // counter to 0; otherwise we set the counter to
543 // counterValueForOptimizeAfterWarmUp().
545 handleExitCounts(exit
);
547 // 14) Load the result of the last bytecode operation into regT0.
549 if (exit
.m_lastSetOperand
!= std::numeric_limits
<int>::max())
550 m_jit
.loadPtr(AssemblyHelpers::addressFor((VirtualRegister
)exit
.m_lastSetOperand
), GPRInfo::cachedResultRegister
);
552 // 15) Fix call frame(s).
554 ASSERT(m_jit
.baselineCodeBlock()->getJITType() == JITCode::BaselineJIT
);
555 m_jit
.storePtr(AssemblyHelpers::TrustedImmPtr(m_jit
.baselineCodeBlock()), AssemblyHelpers::addressFor((VirtualRegister
)RegisterFile::CodeBlock
));
557 for (CodeOrigin codeOrigin
= exit
.m_codeOrigin
; codeOrigin
.inlineCallFrame
; codeOrigin
= codeOrigin
.inlineCallFrame
->caller
) {
558 InlineCallFrame
* inlineCallFrame
= codeOrigin
.inlineCallFrame
;
559 CodeBlock
* baselineCodeBlock
= m_jit
.baselineCodeBlockFor(codeOrigin
);
560 CodeBlock
* baselineCodeBlockForCaller
= m_jit
.baselineCodeBlockFor(inlineCallFrame
->caller
);
561 Vector
<BytecodeAndMachineOffset
>& decodedCodeMap
= m_jit
.decodedCodeMapFor(baselineCodeBlockForCaller
);
562 unsigned returnBytecodeIndex
= inlineCallFrame
->caller
.bytecodeIndex
+ OPCODE_LENGTH(op_call
);
563 BytecodeAndMachineOffset
* mapping
= binarySearch
<BytecodeAndMachineOffset
, unsigned, BytecodeAndMachineOffset::getBytecodeIndex
>(decodedCodeMap
.begin(), decodedCodeMap
.size(), returnBytecodeIndex
);
566 ASSERT(mapping
->m_bytecodeIndex
== returnBytecodeIndex
);
568 void* jumpTarget
= baselineCodeBlockForCaller
->getJITCode().executableAddressAtOffset(mapping
->m_machineCodeOffset
);
570 GPRReg callerFrameGPR
;
571 if (inlineCallFrame
->caller
.inlineCallFrame
) {
572 m_jit
.addPtr(AssemblyHelpers::TrustedImm32(inlineCallFrame
->caller
.inlineCallFrame
->stackOffset
* sizeof(EncodedJSValue
)), GPRInfo::callFrameRegister
, GPRInfo::regT3
);
573 callerFrameGPR
= GPRInfo::regT3
;
575 callerFrameGPR
= GPRInfo::callFrameRegister
;
577 m_jit
.storePtr(AssemblyHelpers::TrustedImmPtr(baselineCodeBlock
), AssemblyHelpers::addressFor((VirtualRegister
)(inlineCallFrame
->stackOffset
+ RegisterFile::CodeBlock
)));
578 m_jit
.storePtr(AssemblyHelpers::TrustedImmPtr(inlineCallFrame
->callee
->scope()), AssemblyHelpers::addressFor((VirtualRegister
)(inlineCallFrame
->stackOffset
+ RegisterFile::ScopeChain
)));
579 m_jit
.storePtr(callerFrameGPR
, AssemblyHelpers::addressFor((VirtualRegister
)(inlineCallFrame
->stackOffset
+ RegisterFile::CallerFrame
)));
580 m_jit
.storePtr(AssemblyHelpers::TrustedImmPtr(jumpTarget
), AssemblyHelpers::addressFor((VirtualRegister
)(inlineCallFrame
->stackOffset
+ RegisterFile::ReturnPC
)));
581 m_jit
.store32(AssemblyHelpers::TrustedImm32(inlineCallFrame
->arguments
.size()), AssemblyHelpers::payloadFor((VirtualRegister
)(inlineCallFrame
->stackOffset
+ RegisterFile::ArgumentCount
)));
582 m_jit
.storePtr(AssemblyHelpers::TrustedImmPtr(inlineCallFrame
->callee
.get()), AssemblyHelpers::addressFor((VirtualRegister
)(inlineCallFrame
->stackOffset
+ RegisterFile::Callee
)));
585 if (exit
.m_codeOrigin
.inlineCallFrame
)
586 m_jit
.addPtr(AssemblyHelpers::TrustedImm32(exit
.m_codeOrigin
.inlineCallFrame
->stackOffset
* sizeof(EncodedJSValue
)), GPRInfo::callFrameRegister
);
588 // 16) Jump into the corresponding baseline JIT code.
590 CodeBlock
* baselineCodeBlock
= m_jit
.baselineCodeBlockFor(exit
.m_codeOrigin
);
591 Vector
<BytecodeAndMachineOffset
>& decodedCodeMap
= m_jit
.decodedCodeMapFor(baselineCodeBlock
);
593 BytecodeAndMachineOffset
* mapping
= binarySearch
<BytecodeAndMachineOffset
, unsigned, BytecodeAndMachineOffset::getBytecodeIndex
>(decodedCodeMap
.begin(), decodedCodeMap
.size(), exit
.m_codeOrigin
.bytecodeIndex
);
596 ASSERT(mapping
->m_bytecodeIndex
== exit
.m_codeOrigin
.bytecodeIndex
);
598 void* jumpTarget
= baselineCodeBlock
->getJITCode().executableAddressAtOffset(mapping
->m_machineCodeOffset
);
600 ASSERT(GPRInfo::regT1
!= GPRInfo::cachedResultRegister
);
602 m_jit
.move(AssemblyHelpers::TrustedImmPtr(jumpTarget
), GPRInfo::regT1
);
604 m_jit
.jump(GPRInfo::regT1
);
606 #if DFG_ENABLE(DEBUG_VERBOSE)
607 dataLog("-> %p\n", jumpTarget
);
611 } } // namespace JSC::DFG
613 #endif // ENABLE(DFG_JIT) && USE(JSVALUE64)