]> git.saurik.com Git - apple/javascriptcore.git/blame - dfg/DFGOSRExitCompiler64.cpp
JavaScriptCore-1097.3.tar.gz
[apple/javascriptcore.git] / dfg / DFGOSRExitCompiler64.cpp
CommitLineData
6fe7ccc8
A
1/*
2 * Copyright (C) 2011 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#include "config.h"
27#include "DFGOSRExitCompiler.h"
28
29#if ENABLE(DFG_JIT) && USE(JSVALUE64)
30
31#include "DFGOperations.h"
32
33namespace JSC { namespace DFG {
34
35void OSRExitCompiler::compileExit(const OSRExit& exit, SpeculationRecovery* recovery)
36{
37 // 1) Pro-forma stuff.
38#if DFG_ENABLE(DEBUG_VERBOSE)
39 dataLog("OSR exit for Node @%d (", (int)exit.m_nodeIndex);
40 for (CodeOrigin codeOrigin = exit.m_codeOrigin; ; codeOrigin = codeOrigin.inlineCallFrame->caller) {
41 dataLog("bc#%u", codeOrigin.bytecodeIndex);
42 if (!codeOrigin.inlineCallFrame)
43 break;
44 dataLog(" -> %p ", codeOrigin.inlineCallFrame->executable.get());
45 }
46 dataLog(") ");
47 exit.dump(WTF::dataFile());
48#endif
49#if DFG_ENABLE(VERBOSE_SPECULATION_FAILURE)
50 SpeculationFailureDebugInfo* debugInfo = new SpeculationFailureDebugInfo;
51 debugInfo->codeBlock = m_jit.codeBlock();
52 debugInfo->nodeIndex = exit.m_nodeIndex;
53
54 m_jit.debugCall(debugOperationPrintSpeculationFailure, debugInfo);
55#endif
56
57#if DFG_ENABLE(JIT_BREAK_ON_SPECULATION_FAILURE)
58 m_jit.breakpoint();
59#endif
60
61#if DFG_ENABLE(SUCCESS_STATS)
62 static SamplingCounter counter("SpeculationFailure");
63 m_jit.emitCount(counter);
64#endif
65
66 // 2) Perform speculation recovery. This only comes into play when an operation
67 // starts mutating state before verifying the speculation it has already made.
68
69 GPRReg alreadyBoxed = InvalidGPRReg;
70
71 if (recovery) {
72 switch (recovery->type()) {
73 case SpeculativeAdd:
74 m_jit.sub32(recovery->src(), recovery->dest());
75 m_jit.orPtr(GPRInfo::tagTypeNumberRegister, recovery->dest());
76 alreadyBoxed = recovery->dest();
77 break;
78
79 case BooleanSpeculationCheck:
80 m_jit.xorPtr(AssemblyHelpers::TrustedImm32(static_cast<int32_t>(ValueFalse)), recovery->dest());
81 break;
82
83 default:
84 break;
85 }
86 }
87
88 // 3) Refine some value profile, if appropriate.
89
90 if (!!exit.m_jsValueSource && !!exit.m_valueProfile) {
91 EncodedJSValue* bucket = exit.m_valueProfile.getSpecFailBucket(0);
92
93#if DFG_ENABLE(VERBOSE_SPECULATION_FAILURE)
94 dataLog(" (have exit profile, bucket %p) ", bucket);
95#endif
96
97 if (exit.m_jsValueSource.isAddress()) {
98 // We can't be sure that we have a spare register. So use the tagTypeNumberRegister,
99 // since we know how to restore it.
100 m_jit.loadPtr(AssemblyHelpers::Address(exit.m_jsValueSource.asAddress()), GPRInfo::tagTypeNumberRegister);
101 m_jit.storePtr(GPRInfo::tagTypeNumberRegister, bucket);
102 m_jit.move(AssemblyHelpers::TrustedImmPtr(bitwise_cast<void*>(TagTypeNumber)), GPRInfo::tagTypeNumberRegister);
103 } else
104 m_jit.storePtr(exit.m_jsValueSource.gpr(), bucket);
105 }
106
107 // 4) Figure out how many scratch slots we'll need. We need one for every GPR/FPR
108 // whose destination is now occupied by a DFG virtual register, and we need
109 // one for every displaced virtual register if there are more than
110 // GPRInfo::numberOfRegisters of them. Also see if there are any constants,
111 // any undefined slots, any FPR slots, and any unboxed ints.
112
113 Vector<bool> poisonedVirtualRegisters(exit.m_variables.size());
114 for (unsigned i = 0; i < poisonedVirtualRegisters.size(); ++i)
115 poisonedVirtualRegisters[i] = false;
116
117 unsigned numberOfPoisonedVirtualRegisters = 0;
118 unsigned numberOfDisplacedVirtualRegisters = 0;
119
120 // Booleans for fast checks. We expect that most OSR exits do not have to rebox
121 // Int32s, have no FPRs, and have no constants. If there are constants, we
122 // expect most of them to be jsUndefined(); if that's true then we handle that
123 // specially to minimize code size and execution time.
124 bool haveUnboxedInt32s = false;
125 bool haveUnboxedDoubles = false;
126 bool haveFPRs = false;
127 bool haveConstants = false;
128 bool haveUndefined = false;
129 bool haveUInt32s = false;
130
131 for (int index = 0; index < exit.numberOfRecoveries(); ++index) {
132 const ValueRecovery& recovery = exit.valueRecovery(index);
133 switch (recovery.technique()) {
134 case Int32DisplacedInRegisterFile:
135 case DoubleDisplacedInRegisterFile:
136 case DisplacedInRegisterFile:
137 numberOfDisplacedVirtualRegisters++;
138 ASSERT((int)recovery.virtualRegister() >= 0);
139
140 // See if we might like to store to this virtual register before doing
141 // virtual register shuffling. If so, we say that the virtual register
142 // is poisoned: it cannot be stored to until after displaced virtual
143 // registers are handled. We track poisoned virtual register carefully
144 // to ensure this happens efficiently. Note that we expect this case
145 // to be rare, so the handling of it is optimized for the cases in
146 // which it does not happen.
147 if (recovery.virtualRegister() < (int)exit.m_variables.size()) {
148 switch (exit.m_variables[recovery.virtualRegister()].technique()) {
149 case InGPR:
150 case UnboxedInt32InGPR:
151 case UInt32InGPR:
152 case InFPR:
153 if (!poisonedVirtualRegisters[recovery.virtualRegister()]) {
154 poisonedVirtualRegisters[recovery.virtualRegister()] = true;
155 numberOfPoisonedVirtualRegisters++;
156 }
157 break;
158 default:
159 break;
160 }
161 }
162 break;
163
164 case UnboxedInt32InGPR:
165 case AlreadyInRegisterFileAsUnboxedInt32:
166 haveUnboxedInt32s = true;
167 break;
168
169 case AlreadyInRegisterFileAsUnboxedDouble:
170 haveUnboxedDoubles = true;
171 break;
172
173 case UInt32InGPR:
174 haveUInt32s = true;
175 break;
176
177 case InFPR:
178 haveFPRs = true;
179 break;
180
181 case Constant:
182 haveConstants = true;
183 if (recovery.constant().isUndefined())
184 haveUndefined = true;
185 break;
186
187 default:
188 break;
189 }
190 }
191
192#if DFG_ENABLE(DEBUG_VERBOSE)
193 dataLog(" ");
194 if (numberOfPoisonedVirtualRegisters)
195 dataLog("Poisoned=%u ", numberOfPoisonedVirtualRegisters);
196 if (numberOfDisplacedVirtualRegisters)
197 dataLog("Displaced=%u ", numberOfDisplacedVirtualRegisters);
198 if (haveUnboxedInt32s)
199 dataLog("UnboxedInt32 ");
200 if (haveUnboxedDoubles)
201 dataLog("UnboxedDoubles ");
202 if (haveUInt32s)
203 dataLog("UInt32 ");
204 if (haveFPRs)
205 dataLog("FPR ");
206 if (haveConstants)
207 dataLog("Constants ");
208 if (haveUndefined)
209 dataLog("Undefined ");
210 dataLog(" ");
211#endif
212
213 ScratchBuffer* scratchBuffer = m_jit.globalData()->scratchBufferForSize(sizeof(EncodedJSValue) * std::max(haveUInt32s ? 2u : 0u, numberOfPoisonedVirtualRegisters + (numberOfDisplacedVirtualRegisters <= GPRInfo::numberOfRegisters ? 0 : numberOfDisplacedVirtualRegisters)));
214 EncodedJSValue* scratchDataBuffer = scratchBuffer ? static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer()) : 0;
215
216 // From here on, the code assumes that it is profitable to maximize the distance
217 // between when something is computed and when it is stored.
218
219 // 5) Perform all reboxing of integers.
220
221 if (haveUnboxedInt32s || haveUInt32s) {
222 for (int index = 0; index < exit.numberOfRecoveries(); ++index) {
223 const ValueRecovery& recovery = exit.valueRecovery(index);
224 switch (recovery.technique()) {
225 case UnboxedInt32InGPR:
226 if (recovery.gpr() != alreadyBoxed)
227 m_jit.orPtr(GPRInfo::tagTypeNumberRegister, recovery.gpr());
228 break;
229
230 case AlreadyInRegisterFileAsUnboxedInt32:
231 m_jit.store32(AssemblyHelpers::TrustedImm32(static_cast<uint32_t>(TagTypeNumber >> 32)), AssemblyHelpers::tagFor(static_cast<VirtualRegister>(exit.operandForIndex(index))));
232 break;
233
234 case UInt32InGPR: {
235 // This occurs when the speculative JIT left an unsigned 32-bit integer
236 // in a GPR. If it's positive, we can just box the int. Otherwise we
237 // need to turn it into a boxed double.
238
239 // We don't try to be clever with register allocation here; we assume
240 // that the program is using FPRs and we don't try to figure out which
241 // ones it is using. Instead just temporarily save fpRegT0 and then
242 // restore it. This makes sense because this path is not cheap to begin
243 // with, and should happen very rarely.
244
245 GPRReg addressGPR = GPRInfo::regT0;
246 if (addressGPR == recovery.gpr())
247 addressGPR = GPRInfo::regT1;
248
249 m_jit.storePtr(addressGPR, scratchDataBuffer);
250 m_jit.move(AssemblyHelpers::TrustedImmPtr(scratchDataBuffer + 1), addressGPR);
251 m_jit.storeDouble(FPRInfo::fpRegT0, addressGPR);
252
253 AssemblyHelpers::Jump positive = m_jit.branch32(AssemblyHelpers::GreaterThanOrEqual, recovery.gpr(), AssemblyHelpers::TrustedImm32(0));
254
255 m_jit.convertInt32ToDouble(recovery.gpr(), FPRInfo::fpRegT0);
256 m_jit.addDouble(AssemblyHelpers::AbsoluteAddress(&AssemblyHelpers::twoToThe32), FPRInfo::fpRegT0);
257 m_jit.boxDouble(FPRInfo::fpRegT0, recovery.gpr());
258
259 AssemblyHelpers::Jump done = m_jit.jump();
260
261 positive.link(&m_jit);
262
263 m_jit.orPtr(GPRInfo::tagTypeNumberRegister, recovery.gpr());
264
265 done.link(&m_jit);
266
267 m_jit.loadDouble(addressGPR, FPRInfo::fpRegT0);
268 m_jit.loadPtr(scratchDataBuffer, addressGPR);
269 break;
270 }
271
272 default:
273 break;
274 }
275 }
276 }
277
278 // 6) Dump all non-poisoned GPRs. For poisoned GPRs, save them into the scratch storage.
279 // Note that GPRs do not have a fast change (like haveFPRs) because we expect that
280 // most OSR failure points will have at least one GPR that needs to be dumped.
281
282 initializePoisoned(exit.m_variables.size());
283 unsigned currentPoisonIndex = 0;
284
285 for (int index = 0; index < exit.numberOfRecoveries(); ++index) {
286 const ValueRecovery& recovery = exit.valueRecovery(index);
287 int operand = exit.operandForIndex(index);
288 switch (recovery.technique()) {
289 case InGPR:
290 case UnboxedInt32InGPR:
291 case UInt32InGPR:
292 if (exit.isVariable(index) && poisonedVirtualRegisters[exit.variableForIndex(index)]) {
293 m_jit.storePtr(recovery.gpr(), scratchDataBuffer + currentPoisonIndex);
294 m_poisonScratchIndices[exit.variableForIndex(index)] = currentPoisonIndex;
295 currentPoisonIndex++;
296 } else
297 m_jit.storePtr(recovery.gpr(), AssemblyHelpers::addressFor((VirtualRegister)operand));
298 break;
299 default:
300 break;
301 }
302 }
303
304 // At this point all GPRs are available for scratch use.
305
306 if (haveFPRs) {
307 // 7) Box all doubles (relies on there being more GPRs than FPRs)
308
309 for (int index = 0; index < exit.numberOfRecoveries(); ++index) {
310 const ValueRecovery& recovery = exit.valueRecovery(index);
311 if (recovery.technique() != InFPR)
312 continue;
313 FPRReg fpr = recovery.fpr();
314 GPRReg gpr = GPRInfo::toRegister(FPRInfo::toIndex(fpr));
315 m_jit.boxDouble(fpr, gpr);
316 }
317
318 // 8) Dump all doubles into the register file, or to the scratch storage if
319 // the destination virtual register is poisoned.
320
321 for (int index = 0; index < exit.numberOfRecoveries(); ++index) {
322 const ValueRecovery& recovery = exit.valueRecovery(index);
323 if (recovery.technique() != InFPR)
324 continue;
325 GPRReg gpr = GPRInfo::toRegister(FPRInfo::toIndex(recovery.fpr()));
326 if (exit.isVariable(index) && poisonedVirtualRegisters[exit.variableForIndex(index)]) {
327 m_jit.storePtr(gpr, scratchDataBuffer + currentPoisonIndex);
328 m_poisonScratchIndices[exit.variableForIndex(index)] = currentPoisonIndex;
329 currentPoisonIndex++;
330 } else
331 m_jit.storePtr(gpr, AssemblyHelpers::addressFor((VirtualRegister)exit.operandForIndex(index)));
332 }
333 }
334
335 // At this point all GPRs and FPRs are available for scratch use.
336
337 // 9) Box all unboxed doubles in the register file.
338 if (haveUnboxedDoubles) {
339 for (int index = 0; index < exit.numberOfRecoveries(); ++index) {
340 const ValueRecovery& recovery = exit.valueRecovery(index);
341 if (recovery.technique() != AlreadyInRegisterFileAsUnboxedDouble)
342 continue;
343 m_jit.loadDouble(AssemblyHelpers::addressFor((VirtualRegister)exit.operandForIndex(index)), FPRInfo::fpRegT0);
344 m_jit.boxDouble(FPRInfo::fpRegT0, GPRInfo::regT0);
345 m_jit.storePtr(GPRInfo::regT0, AssemblyHelpers::addressFor((VirtualRegister)exit.operandForIndex(index)));
346 }
347 }
348
349 ASSERT(currentPoisonIndex == numberOfPoisonedVirtualRegisters);
350
351 // 10) Reshuffle displaced virtual registers. Optimize for the case that
352 // the number of displaced virtual registers is not more than the number
353 // of available physical registers.
354
355 if (numberOfDisplacedVirtualRegisters) {
356 if (numberOfDisplacedVirtualRegisters <= GPRInfo::numberOfRegisters) {
357 // So far this appears to be the case that triggers all the time, but
358 // that is far from guaranteed.
359
360 unsigned displacementIndex = 0;
361 for (int index = 0; index < exit.numberOfRecoveries(); ++index) {
362 const ValueRecovery& recovery = exit.valueRecovery(index);
363 switch (recovery.technique()) {
364 case DisplacedInRegisterFile:
365 m_jit.loadPtr(AssemblyHelpers::addressFor(recovery.virtualRegister()), GPRInfo::toRegister(displacementIndex++));
366 break;
367
368 case Int32DisplacedInRegisterFile: {
369 GPRReg gpr = GPRInfo::toRegister(displacementIndex++);
370 m_jit.load32(AssemblyHelpers::addressFor(recovery.virtualRegister()), gpr);
371 m_jit.orPtr(GPRInfo::tagTypeNumberRegister, gpr);
372 break;
373 }
374
375 case DoubleDisplacedInRegisterFile: {
376 GPRReg gpr = GPRInfo::toRegister(displacementIndex++);
377 m_jit.loadPtr(AssemblyHelpers::addressFor(recovery.virtualRegister()), gpr);
378 m_jit.subPtr(GPRInfo::tagTypeNumberRegister, gpr);
379 break;
380 }
381
382 default:
383 break;
384 }
385 }
386
387 displacementIndex = 0;
388 for (int index = 0; index < exit.numberOfRecoveries(); ++index) {
389 const ValueRecovery& recovery = exit.valueRecovery(index);
390 switch (recovery.technique()) {
391 case DisplacedInRegisterFile:
392 case Int32DisplacedInRegisterFile:
393 case DoubleDisplacedInRegisterFile:
394 m_jit.storePtr(GPRInfo::toRegister(displacementIndex++), AssemblyHelpers::addressFor((VirtualRegister)exit.operandForIndex(index)));
395 break;
396
397 default:
398 break;
399 }
400 }
401 } else {
402 // FIXME: This should use the shuffling algorithm that we use
403 // for speculative->non-speculative jumps, if we ever discover that
404 // some hot code with lots of live values that get displaced and
405 // spilled really enjoys frequently failing speculation.
406
407 // For now this code is engineered to be correct but probably not
408 // super. In particular, it correctly handles cases where for example
409 // the displacements are a permutation of the destination values, like
410 //
411 // 1 -> 2
412 // 2 -> 1
413 //
414 // It accomplishes this by simply lifting all of the virtual registers
415 // from their old (DFG JIT) locations and dropping them in a scratch
416 // location in memory, and then transferring from that scratch location
417 // to their new (old JIT) locations.
418
419 unsigned scratchIndex = numberOfPoisonedVirtualRegisters;
420 for (int index = 0; index < exit.numberOfRecoveries(); ++index) {
421 const ValueRecovery& recovery = exit.valueRecovery(index);
422
423 switch (recovery.technique()) {
424 case DisplacedInRegisterFile:
425 m_jit.loadPtr(AssemblyHelpers::addressFor(recovery.virtualRegister()), GPRInfo::regT0);
426 m_jit.storePtr(GPRInfo::regT0, scratchDataBuffer + scratchIndex++);
427 break;
428
429 case Int32DisplacedInRegisterFile: {
430 m_jit.load32(AssemblyHelpers::addressFor(recovery.virtualRegister()), GPRInfo::regT0);
431 m_jit.orPtr(GPRInfo::tagTypeNumberRegister, GPRInfo::regT0);
432 m_jit.storePtr(GPRInfo::regT0, scratchDataBuffer + scratchIndex++);
433 break;
434 }
435
436 case DoubleDisplacedInRegisterFile: {
437 m_jit.loadPtr(AssemblyHelpers::addressFor(recovery.virtualRegister()), GPRInfo::regT0);
438 m_jit.subPtr(GPRInfo::tagTypeNumberRegister, GPRInfo::regT0);
439 m_jit.storePtr(GPRInfo::regT0, scratchDataBuffer + scratchIndex++);
440 break;
441 }
442
443 default:
444 break;
445 }
446 }
447
448 scratchIndex = numberOfPoisonedVirtualRegisters;
449 for (int index = 0; index < exit.numberOfRecoveries(); ++index) {
450 const ValueRecovery& recovery = exit.valueRecovery(index);
451 switch (recovery.technique()) {
452 case DisplacedInRegisterFile:
453 case Int32DisplacedInRegisterFile:
454 case DoubleDisplacedInRegisterFile:
455 m_jit.loadPtr(scratchDataBuffer + scratchIndex++, GPRInfo::regT0);
456 m_jit.storePtr(GPRInfo::regT0, AssemblyHelpers::addressFor((VirtualRegister)exit.operandForIndex(index)));
457 break;
458
459 default:
460 break;
461 }
462 }
463
464 ASSERT(scratchIndex == numberOfPoisonedVirtualRegisters + numberOfDisplacedVirtualRegisters);
465 }
466 }
467
468 // 11) Dump all poisoned virtual registers.
469
470 if (numberOfPoisonedVirtualRegisters) {
471 for (int virtualRegister = 0; virtualRegister < (int)exit.m_variables.size(); ++virtualRegister) {
472 if (!poisonedVirtualRegisters[virtualRegister])
473 continue;
474
475 const ValueRecovery& recovery = exit.m_variables[virtualRegister];
476 switch (recovery.technique()) {
477 case InGPR:
478 case UnboxedInt32InGPR:
479 case UInt32InGPR:
480 case InFPR:
481 m_jit.loadPtr(scratchDataBuffer + poisonIndex(virtualRegister), GPRInfo::regT0);
482 m_jit.storePtr(GPRInfo::regT0, AssemblyHelpers::addressFor((VirtualRegister)virtualRegister));
483 break;
484
485 default:
486 break;
487 }
488 }
489 }
490
491 // 12) Dump all constants. Optimize for Undefined, since that's a constant we see
492 // often.
493
494 if (haveConstants) {
495 if (haveUndefined)
496 m_jit.move(AssemblyHelpers::TrustedImmPtr(JSValue::encode(jsUndefined())), GPRInfo::regT0);
497
498 for (int index = 0; index < exit.numberOfRecoveries(); ++index) {
499 const ValueRecovery& recovery = exit.valueRecovery(index);
500 if (recovery.technique() != Constant)
501 continue;
502 if (recovery.constant().isUndefined())
503 m_jit.storePtr(GPRInfo::regT0, AssemblyHelpers::addressFor((VirtualRegister)exit.operandForIndex(index)));
504 else
505 m_jit.storePtr(AssemblyHelpers::TrustedImmPtr(JSValue::encode(recovery.constant())), AssemblyHelpers::addressFor((VirtualRegister)exit.operandForIndex(index)));
506 }
507 }
508
509 // 13) Adjust the old JIT's execute counter. Since we are exiting OSR, we know
510 // that all new calls into this code will go to the new JIT, so the execute
511 // counter only affects call frames that performed OSR exit and call frames
512 // that were still executing the old JIT at the time of another call frame's
513 // OSR exit. We want to ensure that the following is true:
514 //
515 // (a) Code the performs an OSR exit gets a chance to reenter optimized
516 // code eventually, since optimized code is faster. But we don't
517 // want to do such reentery too aggressively (see (c) below).
518 //
519 // (b) If there is code on the call stack that is still running the old
520 // JIT's code and has never OSR'd, then it should get a chance to
521 // perform OSR entry despite the fact that we've exited.
522 //
523 // (c) Code the performs an OSR exit should not immediately retry OSR
524 // entry, since both forms of OSR are expensive. OSR entry is
525 // particularly expensive.
526 //
527 // (d) Frequent OSR failures, even those that do not result in the code
528 // running in a hot loop, result in recompilation getting triggered.
529 //
530 // To ensure (c), we'd like to set the execute counter to
531 // counterValueForOptimizeAfterWarmUp(). This seems like it would endanger
532 // (a) and (b), since then every OSR exit would delay the opportunity for
533 // every call frame to perform OSR entry. Essentially, if OSR exit happens
534 // frequently and the function has few loops, then the counter will never
535 // become non-negative and OSR entry will never be triggered. OSR entry
536 // will only happen if a loop gets hot in the old JIT, which does a pretty
537 // good job of ensuring (a) and (b). But that doesn't take care of (d),
538 // since each speculation failure would reset the execute counter.
539 // So we check here if the number of speculation failures is significantly
540 // larger than the number of successes (we want 90% success rate), and if
541 // there have been a large enough number of failures. If so, we set the
542 // counter to 0; otherwise we set the counter to
543 // counterValueForOptimizeAfterWarmUp().
544
545 handleExitCounts(exit);
546
547 // 14) Load the result of the last bytecode operation into regT0.
548
549 if (exit.m_lastSetOperand != std::numeric_limits<int>::max())
550 m_jit.loadPtr(AssemblyHelpers::addressFor((VirtualRegister)exit.m_lastSetOperand), GPRInfo::cachedResultRegister);
551
552 // 15) Fix call frame(s).
553
554 ASSERT(m_jit.baselineCodeBlock()->getJITType() == JITCode::BaselineJIT);
555 m_jit.storePtr(AssemblyHelpers::TrustedImmPtr(m_jit.baselineCodeBlock()), AssemblyHelpers::addressFor((VirtualRegister)RegisterFile::CodeBlock));
556
557 for (CodeOrigin codeOrigin = exit.m_codeOrigin; codeOrigin.inlineCallFrame; codeOrigin = codeOrigin.inlineCallFrame->caller) {
558 InlineCallFrame* inlineCallFrame = codeOrigin.inlineCallFrame;
559 CodeBlock* baselineCodeBlock = m_jit.baselineCodeBlockFor(codeOrigin);
560 CodeBlock* baselineCodeBlockForCaller = m_jit.baselineCodeBlockFor(inlineCallFrame->caller);
561 Vector<BytecodeAndMachineOffset>& decodedCodeMap = m_jit.decodedCodeMapFor(baselineCodeBlockForCaller);
562 unsigned returnBytecodeIndex = inlineCallFrame->caller.bytecodeIndex + OPCODE_LENGTH(op_call);
563 BytecodeAndMachineOffset* mapping = binarySearch<BytecodeAndMachineOffset, unsigned, BytecodeAndMachineOffset::getBytecodeIndex>(decodedCodeMap.begin(), decodedCodeMap.size(), returnBytecodeIndex);
564
565 ASSERT(mapping);
566 ASSERT(mapping->m_bytecodeIndex == returnBytecodeIndex);
567
568 void* jumpTarget = baselineCodeBlockForCaller->getJITCode().executableAddressAtOffset(mapping->m_machineCodeOffset);
569
570 GPRReg callerFrameGPR;
571 if (inlineCallFrame->caller.inlineCallFrame) {
572 m_jit.addPtr(AssemblyHelpers::TrustedImm32(inlineCallFrame->caller.inlineCallFrame->stackOffset * sizeof(EncodedJSValue)), GPRInfo::callFrameRegister, GPRInfo::regT3);
573 callerFrameGPR = GPRInfo::regT3;
574 } else
575 callerFrameGPR = GPRInfo::callFrameRegister;
576
577 m_jit.storePtr(AssemblyHelpers::TrustedImmPtr(baselineCodeBlock), AssemblyHelpers::addressFor((VirtualRegister)(inlineCallFrame->stackOffset + RegisterFile::CodeBlock)));
578 m_jit.storePtr(AssemblyHelpers::TrustedImmPtr(inlineCallFrame->callee->scope()), AssemblyHelpers::addressFor((VirtualRegister)(inlineCallFrame->stackOffset + RegisterFile::ScopeChain)));
579 m_jit.storePtr(callerFrameGPR, AssemblyHelpers::addressFor((VirtualRegister)(inlineCallFrame->stackOffset + RegisterFile::CallerFrame)));
580 m_jit.storePtr(AssemblyHelpers::TrustedImmPtr(jumpTarget), AssemblyHelpers::addressFor((VirtualRegister)(inlineCallFrame->stackOffset + RegisterFile::ReturnPC)));
581 m_jit.store32(AssemblyHelpers::TrustedImm32(inlineCallFrame->arguments.size()), AssemblyHelpers::payloadFor((VirtualRegister)(inlineCallFrame->stackOffset + RegisterFile::ArgumentCount)));
582 m_jit.storePtr(AssemblyHelpers::TrustedImmPtr(inlineCallFrame->callee.get()), AssemblyHelpers::addressFor((VirtualRegister)(inlineCallFrame->stackOffset + RegisterFile::Callee)));
583 }
584
585 if (exit.m_codeOrigin.inlineCallFrame)
586 m_jit.addPtr(AssemblyHelpers::TrustedImm32(exit.m_codeOrigin.inlineCallFrame->stackOffset * sizeof(EncodedJSValue)), GPRInfo::callFrameRegister);
587
588 // 16) Jump into the corresponding baseline JIT code.
589
590 CodeBlock* baselineCodeBlock = m_jit.baselineCodeBlockFor(exit.m_codeOrigin);
591 Vector<BytecodeAndMachineOffset>& decodedCodeMap = m_jit.decodedCodeMapFor(baselineCodeBlock);
592
593 BytecodeAndMachineOffset* mapping = binarySearch<BytecodeAndMachineOffset, unsigned, BytecodeAndMachineOffset::getBytecodeIndex>(decodedCodeMap.begin(), decodedCodeMap.size(), exit.m_codeOrigin.bytecodeIndex);
594
595 ASSERT(mapping);
596 ASSERT(mapping->m_bytecodeIndex == exit.m_codeOrigin.bytecodeIndex);
597
598 void* jumpTarget = baselineCodeBlock->getJITCode().executableAddressAtOffset(mapping->m_machineCodeOffset);
599
600 ASSERT(GPRInfo::regT1 != GPRInfo::cachedResultRegister);
601
602 m_jit.move(AssemblyHelpers::TrustedImmPtr(jumpTarget), GPRInfo::regT1);
603
604 m_jit.jump(GPRInfo::regT1);
605
606#if DFG_ENABLE(DEBUG_VERBOSE)
607 dataLog("-> %p\n", jumpTarget);
608#endif
609}
610
611} } // namespace JSC::DFG
612
613#endif // ENABLE(DFG_JIT) && USE(JSVALUE64)