]> git.saurik.com Git - apple/javascriptcore.git/blob - dfg/DFGOSRExitCompiler64.cpp
JavaScriptCore-1218.0.1.tar.gz
[apple/javascriptcore.git] / dfg / DFGOSRExitCompiler64.cpp
1 /*
2 * Copyright (C) 2011, 2013 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26 #include "config.h"
27 #include "DFGOSRExitCompiler.h"
28
29 #if ENABLE(DFG_JIT) && USE(JSVALUE64)
30
31 #include "DFGOperations.h"
32 #include "Operations.h"
33 #include <wtf/DataLog.h>
34
35 namespace JSC { namespace DFG {
36
37 void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecovery>& operands, SpeculationRecovery* recovery)
38 {
39 // 1) Pro-forma stuff.
40 #if DFG_ENABLE(DEBUG_VERBOSE)
41 dataLogF("OSR exit for (");
42 for (CodeOrigin codeOrigin = exit.m_codeOrigin; ; codeOrigin = codeOrigin.inlineCallFrame->caller) {
43 dataLogF("bc#%u", codeOrigin.bytecodeIndex);
44 if (!codeOrigin.inlineCallFrame)
45 break;
46 dataLogF(" -> %p ", codeOrigin.inlineCallFrame->executable.get());
47 }
48 dataLogF(") ");
49 dumpOperands(operands, WTF::dataFile());
50 #endif
51
52 if (Options::printEachOSRExit()) {
53 SpeculationFailureDebugInfo* debugInfo = new SpeculationFailureDebugInfo;
54 debugInfo->codeBlock = m_jit.codeBlock();
55
56 m_jit.debugCall(debugOperationPrintSpeculationFailure, debugInfo);
57 }
58
59 #if DFG_ENABLE(JIT_BREAK_ON_SPECULATION_FAILURE)
60 m_jit.breakpoint();
61 #endif
62
63 #if DFG_ENABLE(SUCCESS_STATS)
64 static SamplingCounter counter("SpeculationFailure");
65 m_jit.emitCount(counter);
66 #endif
67
68 // 2) Perform speculation recovery. This only comes into play when an operation
69 // starts mutating state before verifying the speculation it has already made.
70
71 GPRReg alreadyBoxed = InvalidGPRReg;
72
73 if (recovery) {
74 switch (recovery->type()) {
75 case SpeculativeAdd:
76 m_jit.sub32(recovery->src(), recovery->dest());
77 m_jit.or64(GPRInfo::tagTypeNumberRegister, recovery->dest());
78 alreadyBoxed = recovery->dest();
79 break;
80
81 case BooleanSpeculationCheck:
82 m_jit.xor64(AssemblyHelpers::TrustedImm32(static_cast<int32_t>(ValueFalse)), recovery->dest());
83 break;
84
85 default:
86 break;
87 }
88 }
89
90 // 3) Refine some array and/or value profile, if appropriate.
91
92 if (!!exit.m_jsValueSource) {
93 if (exit.m_kind == BadCache || exit.m_kind == BadIndexingType) {
94 // If the instruction that this originated from has an array profile, then
95 // refine it. If it doesn't, then do nothing. The latter could happen for
96 // hoisted checks, or checks emitted for operations that didn't have array
97 // profiling - either ops that aren't array accesses at all, or weren't
98 // known to be array acceses in the bytecode. The latter case is a FIXME
99 // while the former case is an outcome of a CheckStructure not knowing why
100 // it was emitted (could be either due to an inline cache of a property
101 // property access, or due to an array profile).
102
103 CodeOrigin codeOrigin = exit.m_codeOriginForExitProfile;
104 if (ArrayProfile* arrayProfile = m_jit.baselineCodeBlockFor(codeOrigin)->getArrayProfile(codeOrigin.bytecodeIndex)) {
105 GPRReg usedRegister;
106 if (exit.m_jsValueSource.isAddress())
107 usedRegister = exit.m_jsValueSource.base();
108 else
109 usedRegister = exit.m_jsValueSource.gpr();
110
111 GPRReg scratch1;
112 GPRReg scratch2;
113 scratch1 = AssemblyHelpers::selectScratchGPR(usedRegister);
114 scratch2 = AssemblyHelpers::selectScratchGPR(usedRegister, scratch1);
115
116 #if CPU(ARM64)
117 m_jit.pushToSave(scratch1);
118 m_jit.pushToSave(scratch2);
119 #else
120 m_jit.push(scratch1);
121 m_jit.push(scratch2);
122 #endif
123
124 GPRReg value;
125 if (exit.m_jsValueSource.isAddress()) {
126 value = scratch1;
127 m_jit.loadPtr(AssemblyHelpers::Address(exit.m_jsValueSource.asAddress()), value);
128 } else
129 value = exit.m_jsValueSource.gpr();
130
131 m_jit.loadPtr(AssemblyHelpers::Address(value, JSCell::structureOffset()), scratch1);
132 m_jit.storePtr(scratch1, arrayProfile->addressOfLastSeenStructure());
133 m_jit.load8(AssemblyHelpers::Address(scratch1, Structure::indexingTypeOffset()), scratch1);
134 m_jit.move(AssemblyHelpers::TrustedImm32(1), scratch2);
135 m_jit.lshift32(scratch1, scratch2);
136 m_jit.or32(scratch2, AssemblyHelpers::AbsoluteAddress(arrayProfile->addressOfArrayModes()));
137
138 #if CPU(ARM64)
139 m_jit.popToRestore(scratch2);
140 m_jit.popToRestore(scratch1);
141 #else
142 m_jit.pop(scratch2);
143 m_jit.pop(scratch1);
144 #endif
145 }
146 }
147
148 if (!!exit.m_valueProfile) {
149 EncodedJSValue* bucket = exit.m_valueProfile.getSpecFailBucket(0);
150
151 if (exit.m_jsValueSource.isAddress()) {
152 // We can't be sure that we have a spare register. So use the tagTypeNumberRegister,
153 // since we know how to restore it.
154 m_jit.load64(AssemblyHelpers::Address(exit.m_jsValueSource.asAddress()), GPRInfo::tagTypeNumberRegister);
155 m_jit.store64(GPRInfo::tagTypeNumberRegister, bucket);
156 m_jit.move(AssemblyHelpers::TrustedImm64(TagTypeNumber), GPRInfo::tagTypeNumberRegister);
157 } else
158 m_jit.store64(exit.m_jsValueSource.gpr(), bucket);
159 }
160 }
161
162 // 4) Figure out how many scratch slots we'll need. We need one for every GPR/FPR
163 // whose destination is now occupied by a DFG virtual register, and we need
164 // one for every displaced virtual register if there are more than
165 // GPRInfo::numberOfRegisters of them. Also see if there are any constants,
166 // any undefined slots, any FPR slots, and any unboxed ints.
167
168 Vector<bool> poisonedVirtualRegisters(operands.numberOfLocals());
169 for (unsigned i = 0; i < poisonedVirtualRegisters.size(); ++i)
170 poisonedVirtualRegisters[i] = false;
171
172 unsigned numberOfPoisonedVirtualRegisters = 0;
173 unsigned numberOfDisplacedVirtualRegisters = 0;
174
175 // Booleans for fast checks. We expect that most OSR exits do not have to rebox
176 // Int32s, have no FPRs, and have no constants. If there are constants, we
177 // expect most of them to be jsUndefined(); if that's true then we handle that
178 // specially to minimize code size and execution time.
179 bool haveUnboxedInt32s = false;
180 bool haveUnboxedDoubles = false;
181 bool haveFPRs = false;
182 bool haveConstants = false;
183 bool haveUndefined = false;
184 bool haveUInt32s = false;
185 bool haveArguments = false;
186
187 for (size_t index = 0; index < operands.size(); ++index) {
188 const ValueRecovery& recovery = operands[index];
189 switch (recovery.technique()) {
190 case Int32DisplacedInJSStack:
191 case DoubleDisplacedInJSStack:
192 case DisplacedInJSStack:
193 numberOfDisplacedVirtualRegisters++;
194 ASSERT((int)recovery.virtualRegister() >= 0);
195
196 // See if we might like to store to this virtual register before doing
197 // virtual register shuffling. If so, we say that the virtual register
198 // is poisoned: it cannot be stored to until after displaced virtual
199 // registers are handled. We track poisoned virtual register carefully
200 // to ensure this happens efficiently. Note that we expect this case
201 // to be rare, so the handling of it is optimized for the cases in
202 // which it does not happen.
203 if (recovery.virtualRegister() < (int)operands.numberOfLocals()) {
204 switch (operands.local(recovery.virtualRegister()).technique()) {
205 case InGPR:
206 case UnboxedInt32InGPR:
207 case UInt32InGPR:
208 case InFPR:
209 if (!poisonedVirtualRegisters[recovery.virtualRegister()]) {
210 poisonedVirtualRegisters[recovery.virtualRegister()] = true;
211 numberOfPoisonedVirtualRegisters++;
212 }
213 break;
214 default:
215 break;
216 }
217 }
218 break;
219
220 case UnboxedInt32InGPR:
221 case AlreadyInJSStackAsUnboxedInt32:
222 haveUnboxedInt32s = true;
223 break;
224
225 case AlreadyInJSStackAsUnboxedDouble:
226 haveUnboxedDoubles = true;
227 break;
228
229 case UInt32InGPR:
230 haveUInt32s = true;
231 break;
232
233 case InFPR:
234 haveFPRs = true;
235 break;
236
237 case Constant:
238 haveConstants = true;
239 if (recovery.constant().isUndefined())
240 haveUndefined = true;
241 break;
242
243 case ArgumentsThatWereNotCreated:
244 haveArguments = true;
245 break;
246
247 default:
248 break;
249 }
250 }
251
252 #if DFG_ENABLE(DEBUG_VERBOSE)
253 dataLogF(" ");
254 if (numberOfPoisonedVirtualRegisters)
255 dataLogF("Poisoned=%u ", numberOfPoisonedVirtualRegisters);
256 if (numberOfDisplacedVirtualRegisters)
257 dataLogF("Displaced=%u ", numberOfDisplacedVirtualRegisters);
258 if (haveUnboxedInt32s)
259 dataLogF("UnboxedInt32 ");
260 if (haveUnboxedDoubles)
261 dataLogF("UnboxedDoubles ");
262 if (haveUInt32s)
263 dataLogF("UInt32 ");
264 if (haveFPRs)
265 dataLogF("FPR ");
266 if (haveConstants)
267 dataLogF("Constants ");
268 if (haveUndefined)
269 dataLogF("Undefined ");
270 dataLogF(" ");
271 #endif
272
273 ScratchBuffer* scratchBuffer = m_jit.vm()->scratchBufferForSize(sizeof(EncodedJSValue) * std::max(haveUInt32s ? 2u : 0u, numberOfPoisonedVirtualRegisters + (numberOfDisplacedVirtualRegisters <= GPRInfo::numberOfRegisters ? 0 : numberOfDisplacedVirtualRegisters)));
274 EncodedJSValue* scratchDataBuffer = scratchBuffer ? static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer()) : 0;
275
276 // From here on, the code assumes that it is profitable to maximize the distance
277 // between when something is computed and when it is stored.
278
279 // 5) Perform all reboxing of integers.
280
281 if (haveUnboxedInt32s || haveUInt32s) {
282 for (size_t index = 0; index < operands.size(); ++index) {
283 const ValueRecovery& recovery = operands[index];
284 switch (recovery.technique()) {
285 case UnboxedInt32InGPR:
286 if (recovery.gpr() != alreadyBoxed)
287 m_jit.or64(GPRInfo::tagTypeNumberRegister, recovery.gpr());
288 break;
289
290 case AlreadyInJSStackAsUnboxedInt32:
291 m_jit.store32(AssemblyHelpers::TrustedImm32(static_cast<uint32_t>(TagTypeNumber >> 32)), AssemblyHelpers::tagFor(static_cast<VirtualRegister>(operands.operandForIndex(index))));
292 break;
293
294 case UInt32InGPR: {
295 // This occurs when the speculative JIT left an unsigned 32-bit integer
296 // in a GPR. If it's positive, we can just box the int. Otherwise we
297 // need to turn it into a boxed double.
298
299 // We don't try to be clever with register allocation here; we assume
300 // that the program is using FPRs and we don't try to figure out which
301 // ones it is using. Instead just temporarily save fpRegT0 and then
302 // restore it. This makes sense because this path is not cheap to begin
303 // with, and should happen very rarely.
304
305 GPRReg addressGPR = GPRInfo::regT0;
306 if (addressGPR == recovery.gpr())
307 addressGPR = GPRInfo::regT1;
308
309 m_jit.store64(addressGPR, scratchDataBuffer);
310 m_jit.move(AssemblyHelpers::TrustedImmPtr(scratchDataBuffer + 1), addressGPR);
311 m_jit.storeDouble(FPRInfo::fpRegT0, addressGPR);
312
313 AssemblyHelpers::Jump positive = m_jit.branch32(AssemblyHelpers::GreaterThanOrEqual, recovery.gpr(), AssemblyHelpers::TrustedImm32(0));
314
315 m_jit.convertInt32ToDouble(recovery.gpr(), FPRInfo::fpRegT0);
316 m_jit.addDouble(AssemblyHelpers::AbsoluteAddress(&AssemblyHelpers::twoToThe32), FPRInfo::fpRegT0);
317 m_jit.boxDouble(FPRInfo::fpRegT0, recovery.gpr());
318
319 AssemblyHelpers::Jump done = m_jit.jump();
320
321 positive.link(&m_jit);
322
323 m_jit.or64(GPRInfo::tagTypeNumberRegister, recovery.gpr());
324
325 done.link(&m_jit);
326
327 m_jit.loadDouble(addressGPR, FPRInfo::fpRegT0);
328 m_jit.load64(scratchDataBuffer, addressGPR);
329 break;
330 }
331
332 default:
333 break;
334 }
335 }
336 }
337
338 // 6) Dump all non-poisoned GPRs. For poisoned GPRs, save them into the scratch storage.
339 // Note that GPRs do not have a fast change (like haveFPRs) because we expect that
340 // most OSR failure points will have at least one GPR that needs to be dumped.
341
342 initializePoisoned(operands.numberOfLocals());
343 unsigned currentPoisonIndex = 0;
344
345 for (size_t index = 0; index < operands.size(); ++index) {
346 const ValueRecovery& recovery = operands[index];
347 int operand = operands.operandForIndex(index);
348 switch (recovery.technique()) {
349 case InGPR:
350 case UnboxedInt32InGPR:
351 case UInt32InGPR:
352 if (operands.isVariable(index) && poisonedVirtualRegisters[operands.variableForIndex(index)]) {
353 m_jit.store64(recovery.gpr(), scratchDataBuffer + currentPoisonIndex);
354 m_poisonScratchIndices[operands.variableForIndex(index)] = currentPoisonIndex;
355 currentPoisonIndex++;
356 } else
357 m_jit.store64(recovery.gpr(), AssemblyHelpers::addressFor((VirtualRegister)operand));
358 break;
359 default:
360 break;
361 }
362 }
363
364 // At this point all GPRs are available for scratch use.
365
366 if (haveFPRs) {
367 // 7) Box all doubles (relies on there being more GPRs than FPRs)
368
369 for (size_t index = 0; index < operands.size(); ++index) {
370 const ValueRecovery& recovery = operands[index];
371 if (recovery.technique() != InFPR)
372 continue;
373 FPRReg fpr = recovery.fpr();
374 GPRReg gpr = GPRInfo::toRegister(FPRInfo::toIndex(fpr));
375 m_jit.boxDouble(fpr, gpr);
376 }
377
378 // 8) Dump all doubles into the stack, or to the scratch storage if
379 // the destination virtual register is poisoned.
380
381 for (size_t index = 0; index < operands.size(); ++index) {
382 const ValueRecovery& recovery = operands[index];
383 if (recovery.technique() != InFPR)
384 continue;
385 GPRReg gpr = GPRInfo::toRegister(FPRInfo::toIndex(recovery.fpr()));
386 if (operands.isVariable(index) && poisonedVirtualRegisters[operands.variableForIndex(index)]) {
387 m_jit.store64(gpr, scratchDataBuffer + currentPoisonIndex);
388 m_poisonScratchIndices[operands.variableForIndex(index)] = currentPoisonIndex;
389 currentPoisonIndex++;
390 } else
391 m_jit.store64(gpr, AssemblyHelpers::addressFor((VirtualRegister)operands.operandForIndex(index)));
392 }
393 }
394
395 // At this point all GPRs and FPRs are available for scratch use.
396
397 // 9) Box all unboxed doubles in the stack.
398 if (haveUnboxedDoubles) {
399 for (size_t index = 0; index < operands.size(); ++index) {
400 const ValueRecovery& recovery = operands[index];
401 if (recovery.technique() != AlreadyInJSStackAsUnboxedDouble)
402 continue;
403 m_jit.loadDouble(AssemblyHelpers::addressFor((VirtualRegister)operands.operandForIndex(index)), FPRInfo::fpRegT0);
404 m_jit.boxDouble(FPRInfo::fpRegT0, GPRInfo::regT0);
405 m_jit.store64(GPRInfo::regT0, AssemblyHelpers::addressFor((VirtualRegister)operands.operandForIndex(index)));
406 }
407 }
408
409 ASSERT(currentPoisonIndex == numberOfPoisonedVirtualRegisters);
410
411 // 10) Reshuffle displaced virtual registers. Optimize for the case that
412 // the number of displaced virtual registers is not more than the number
413 // of available physical registers.
414
415 if (numberOfDisplacedVirtualRegisters) {
416 if (numberOfDisplacedVirtualRegisters <= GPRInfo::numberOfRegisters) {
417 // So far this appears to be the case that triggers all the time, but
418 // that is far from guaranteed.
419
420 unsigned displacementIndex = 0;
421 for (size_t index = 0; index < operands.size(); ++index) {
422 const ValueRecovery& recovery = operands[index];
423 switch (recovery.technique()) {
424 case DisplacedInJSStack:
425 m_jit.load64(AssemblyHelpers::addressFor(recovery.virtualRegister()), GPRInfo::toRegister(displacementIndex++));
426 break;
427
428 case Int32DisplacedInJSStack: {
429 GPRReg gpr = GPRInfo::toRegister(displacementIndex++);
430 m_jit.load32(AssemblyHelpers::addressFor(recovery.virtualRegister()), gpr);
431 m_jit.or64(GPRInfo::tagTypeNumberRegister, gpr);
432 break;
433 }
434
435 case DoubleDisplacedInJSStack: {
436 GPRReg gpr = GPRInfo::toRegister(displacementIndex++);
437 m_jit.load64(AssemblyHelpers::addressFor(recovery.virtualRegister()), gpr);
438 m_jit.sub64(GPRInfo::tagTypeNumberRegister, gpr);
439 break;
440 }
441
442 default:
443 break;
444 }
445 }
446
447 displacementIndex = 0;
448 for (size_t index = 0; index < operands.size(); ++index) {
449 const ValueRecovery& recovery = operands[index];
450 switch (recovery.technique()) {
451 case DisplacedInJSStack:
452 case Int32DisplacedInJSStack:
453 case DoubleDisplacedInJSStack:
454 m_jit.store64(GPRInfo::toRegister(displacementIndex++), AssemblyHelpers::addressFor((VirtualRegister)operands.operandForIndex(index)));
455 break;
456
457 default:
458 break;
459 }
460 }
461 } else {
462 // FIXME: This should use the shuffling algorithm that we use
463 // for speculative->non-speculative jumps, if we ever discover that
464 // some hot code with lots of live values that get displaced and
465 // spilled really enjoys frequently failing speculation.
466
467 // For now this code is engineered to be correct but probably not
468 // super. In particular, it correctly handles cases where for example
469 // the displacements are a permutation of the destination values, like
470 //
471 // 1 -> 2
472 // 2 -> 1
473 //
474 // It accomplishes this by simply lifting all of the virtual registers
475 // from their old (DFG JIT) locations and dropping them in a scratch
476 // location in memory, and then transferring from that scratch location
477 // to their new (old JIT) locations.
478
479 unsigned scratchIndex = numberOfPoisonedVirtualRegisters;
480 for (size_t index = 0; index < operands.size(); ++index) {
481 const ValueRecovery& recovery = operands[index];
482
483 switch (recovery.technique()) {
484 case DisplacedInJSStack:
485 m_jit.load64(AssemblyHelpers::addressFor(recovery.virtualRegister()), GPRInfo::regT0);
486 m_jit.store64(GPRInfo::regT0, scratchDataBuffer + scratchIndex++);
487 break;
488
489 case Int32DisplacedInJSStack: {
490 m_jit.load32(AssemblyHelpers::addressFor(recovery.virtualRegister()), GPRInfo::regT0);
491 m_jit.or64(GPRInfo::tagTypeNumberRegister, GPRInfo::regT0);
492 m_jit.store64(GPRInfo::regT0, scratchDataBuffer + scratchIndex++);
493 break;
494 }
495
496 case DoubleDisplacedInJSStack: {
497 m_jit.load64(AssemblyHelpers::addressFor(recovery.virtualRegister()), GPRInfo::regT0);
498 m_jit.sub64(GPRInfo::tagTypeNumberRegister, GPRInfo::regT0);
499 m_jit.store64(GPRInfo::regT0, scratchDataBuffer + scratchIndex++);
500 break;
501 }
502
503 default:
504 break;
505 }
506 }
507
508 scratchIndex = numberOfPoisonedVirtualRegisters;
509 for (size_t index = 0; index < operands.size(); ++index) {
510 const ValueRecovery& recovery = operands[index];
511 switch (recovery.technique()) {
512 case DisplacedInJSStack:
513 case Int32DisplacedInJSStack:
514 case DoubleDisplacedInJSStack:
515 m_jit.load64(scratchDataBuffer + scratchIndex++, GPRInfo::regT0);
516 m_jit.store64(GPRInfo::regT0, AssemblyHelpers::addressFor((VirtualRegister)operands.operandForIndex(index)));
517 break;
518
519 default:
520 break;
521 }
522 }
523
524 ASSERT(scratchIndex == numberOfPoisonedVirtualRegisters + numberOfDisplacedVirtualRegisters);
525 }
526 }
527
528 // 11) Dump all poisoned virtual registers.
529
530 if (numberOfPoisonedVirtualRegisters) {
531 for (int virtualRegister = 0; virtualRegister < (int)operands.numberOfLocals(); ++virtualRegister) {
532 if (!poisonedVirtualRegisters[virtualRegister])
533 continue;
534
535 const ValueRecovery& recovery = operands.local(virtualRegister);
536 switch (recovery.technique()) {
537 case InGPR:
538 case UnboxedInt32InGPR:
539 case UInt32InGPR:
540 case InFPR:
541 m_jit.load64(scratchDataBuffer + poisonIndex(virtualRegister), GPRInfo::regT0);
542 m_jit.store64(GPRInfo::regT0, AssemblyHelpers::addressFor((VirtualRegister)virtualRegister));
543 break;
544
545 default:
546 break;
547 }
548 }
549 }
550
551 // 12) Dump all constants. Optimize for Undefined, since that's a constant we see
552 // often.
553
554 if (haveConstants) {
555 if (haveUndefined)
556 m_jit.move(AssemblyHelpers::TrustedImm64(JSValue::encode(jsUndefined())), GPRInfo::regT0);
557
558 for (size_t index = 0; index < operands.size(); ++index) {
559 const ValueRecovery& recovery = operands[index];
560 if (recovery.technique() != Constant)
561 continue;
562 if (recovery.constant().isUndefined())
563 m_jit.store64(GPRInfo::regT0, AssemblyHelpers::addressFor((VirtualRegister)operands.operandForIndex(index)));
564 else
565 m_jit.store64(AssemblyHelpers::TrustedImm64(JSValue::encode(recovery.constant())), AssemblyHelpers::addressFor((VirtualRegister)operands.operandForIndex(index)));
566 }
567 }
568
569 // 13) Adjust the old JIT's execute counter. Since we are exiting OSR, we know
570 // that all new calls into this code will go to the new JIT, so the execute
571 // counter only affects call frames that performed OSR exit and call frames
572 // that were still executing the old JIT at the time of another call frame's
573 // OSR exit. We want to ensure that the following is true:
574 //
575 // (a) Code the performs an OSR exit gets a chance to reenter optimized
576 // code eventually, since optimized code is faster. But we don't
577 // want to do such reentery too aggressively (see (c) below).
578 //
579 // (b) If there is code on the call stack that is still running the old
580 // JIT's code and has never OSR'd, then it should get a chance to
581 // perform OSR entry despite the fact that we've exited.
582 //
583 // (c) Code the performs an OSR exit should not immediately retry OSR
584 // entry, since both forms of OSR are expensive. OSR entry is
585 // particularly expensive.
586 //
587 // (d) Frequent OSR failures, even those that do not result in the code
588 // running in a hot loop, result in recompilation getting triggered.
589 //
590 // To ensure (c), we'd like to set the execute counter to
591 // counterValueForOptimizeAfterWarmUp(). This seems like it would endanger
592 // (a) and (b), since then every OSR exit would delay the opportunity for
593 // every call frame to perform OSR entry. Essentially, if OSR exit happens
594 // frequently and the function has few loops, then the counter will never
595 // become non-negative and OSR entry will never be triggered. OSR entry
596 // will only happen if a loop gets hot in the old JIT, which does a pretty
597 // good job of ensuring (a) and (b). But that doesn't take care of (d),
598 // since each speculation failure would reset the execute counter.
599 // So we check here if the number of speculation failures is significantly
600 // larger than the number of successes (we want 90% success rate), and if
601 // there have been a large enough number of failures. If so, we set the
602 // counter to 0; otherwise we set the counter to
603 // counterValueForOptimizeAfterWarmUp().
604
605 handleExitCounts(exit);
606
607 // 14) Reify inlined call frames.
608
609 ASSERT(m_jit.baselineCodeBlock()->getJITType() == JITCode::BaselineJIT);
610 m_jit.storePtr(AssemblyHelpers::TrustedImmPtr(m_jit.baselineCodeBlock()), AssemblyHelpers::addressFor((VirtualRegister)JSStack::CodeBlock));
611
612 for (CodeOrigin codeOrigin = exit.m_codeOrigin; codeOrigin.inlineCallFrame; codeOrigin = codeOrigin.inlineCallFrame->caller) {
613 InlineCallFrame* inlineCallFrame = codeOrigin.inlineCallFrame;
614 CodeBlock* baselineCodeBlock = m_jit.baselineCodeBlockFor(codeOrigin);
615 CodeBlock* baselineCodeBlockForCaller = m_jit.baselineCodeBlockFor(inlineCallFrame->caller);
616 Vector<BytecodeAndMachineOffset>& decodedCodeMap = m_jit.decodedCodeMapFor(baselineCodeBlockForCaller);
617 unsigned returnBytecodeIndex = inlineCallFrame->caller.bytecodeIndex + OPCODE_LENGTH(op_call);
618 BytecodeAndMachineOffset* mapping = binarySearch<BytecodeAndMachineOffset, unsigned>(decodedCodeMap, decodedCodeMap.size(), returnBytecodeIndex, BytecodeAndMachineOffset::getBytecodeIndex);
619
620 ASSERT(mapping);
621 ASSERT(mapping->m_bytecodeIndex == returnBytecodeIndex);
622
623 void* jumpTarget = baselineCodeBlockForCaller->getJITCode().executableAddressAtOffset(mapping->m_machineCodeOffset);
624
625 GPRReg callerFrameGPR;
626 if (inlineCallFrame->caller.inlineCallFrame) {
627 m_jit.addPtr(AssemblyHelpers::TrustedImm32(inlineCallFrame->caller.inlineCallFrame->stackOffset * sizeof(EncodedJSValue)), GPRInfo::callFrameRegister, GPRInfo::regT3);
628 callerFrameGPR = GPRInfo::regT3;
629 } else
630 callerFrameGPR = GPRInfo::callFrameRegister;
631
632 m_jit.storePtr(AssemblyHelpers::TrustedImmPtr(baselineCodeBlock), AssemblyHelpers::addressFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::CodeBlock)));
633 if (!inlineCallFrame->isClosureCall())
634 m_jit.store64(AssemblyHelpers::TrustedImm64(JSValue::encode(JSValue(inlineCallFrame->callee->scope()))), AssemblyHelpers::addressFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::ScopeChain)));
635 m_jit.store64(callerFrameGPR, AssemblyHelpers::addressFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::CallerFrame)));
636 m_jit.storePtr(AssemblyHelpers::TrustedImmPtr(jumpTarget), AssemblyHelpers::addressFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::ReturnPC)));
637 m_jit.store32(AssemblyHelpers::TrustedImm32(inlineCallFrame->arguments.size()), AssemblyHelpers::payloadFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::ArgumentCount)));
638 if (!inlineCallFrame->isClosureCall())
639 m_jit.store64(AssemblyHelpers::TrustedImm64(JSValue::encode(JSValue(inlineCallFrame->callee.get()))), AssemblyHelpers::addressFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::Callee)));
640 }
641
642 // 15) Create arguments if necessary and place them into the appropriate aliased
643 // registers.
644
645 if (haveArguments) {
646 HashSet<InlineCallFrame*, DefaultHash<InlineCallFrame*>::Hash,
647 NullableHashTraits<InlineCallFrame*> > didCreateArgumentsObject;
648
649 for (size_t index = 0; index < operands.size(); ++index) {
650 const ValueRecovery& recovery = operands[index];
651 if (recovery.technique() != ArgumentsThatWereNotCreated)
652 continue;
653 int operand = operands.operandForIndex(index);
654 // Find the right inline call frame.
655 InlineCallFrame* inlineCallFrame = 0;
656 for (InlineCallFrame* current = exit.m_codeOrigin.inlineCallFrame;
657 current;
658 current = current->caller.inlineCallFrame) {
659 if (current->stackOffset <= operand) {
660 inlineCallFrame = current;
661 break;
662 }
663 }
664
665 if (!m_jit.baselineCodeBlockFor(inlineCallFrame)->usesArguments())
666 continue;
667 int argumentsRegister = m_jit.argumentsRegisterFor(inlineCallFrame);
668 if (didCreateArgumentsObject.add(inlineCallFrame).isNewEntry) {
669 // We know this call frame optimized out an arguments object that
670 // the baseline JIT would have created. Do that creation now.
671 if (inlineCallFrame) {
672 m_jit.addPtr(AssemblyHelpers::TrustedImm32(inlineCallFrame->stackOffset * sizeof(EncodedJSValue)), GPRInfo::callFrameRegister, GPRInfo::regT0);
673 m_jit.setupArguments(GPRInfo::regT0);
674 } else
675 m_jit.setupArgumentsExecState();
676 m_jit.move(
677 AssemblyHelpers::TrustedImmPtr(
678 bitwise_cast<void*>(operationCreateArguments)),
679 GPRInfo::nonArgGPR0);
680 m_jit.call(GPRInfo::nonArgGPR0);
681 m_jit.store64(GPRInfo::returnValueGPR, AssemblyHelpers::addressFor(argumentsRegister));
682 m_jit.store64(
683 GPRInfo::returnValueGPR,
684 AssemblyHelpers::addressFor(unmodifiedArgumentsRegister(argumentsRegister)));
685 m_jit.move(GPRInfo::returnValueGPR, GPRInfo::regT0); // no-op move on almost all platforms.
686 }
687
688 m_jit.load64(AssemblyHelpers::addressFor(argumentsRegister), GPRInfo::regT0);
689 m_jit.store64(GPRInfo::regT0, AssemblyHelpers::addressFor(operand));
690 }
691 }
692
693 // 16) Load the result of the last bytecode operation into regT0.
694
695 if (exit.m_lastSetOperand != std::numeric_limits<int>::max())
696 m_jit.load64(AssemblyHelpers::addressFor((VirtualRegister)exit.m_lastSetOperand), GPRInfo::cachedResultRegister);
697
698 // 17) Adjust the call frame pointer.
699
700 if (exit.m_codeOrigin.inlineCallFrame)
701 m_jit.addPtr(AssemblyHelpers::TrustedImm32(exit.m_codeOrigin.inlineCallFrame->stackOffset * sizeof(EncodedJSValue)), GPRInfo::callFrameRegister);
702
703 // 18) Jump into the corresponding baseline JIT code.
704
705 CodeBlock* baselineCodeBlock = m_jit.baselineCodeBlockFor(exit.m_codeOrigin);
706 Vector<BytecodeAndMachineOffset>& decodedCodeMap = m_jit.decodedCodeMapFor(baselineCodeBlock);
707
708 BytecodeAndMachineOffset* mapping = binarySearch<BytecodeAndMachineOffset, unsigned>(decodedCodeMap, decodedCodeMap.size(), exit.m_codeOrigin.bytecodeIndex, BytecodeAndMachineOffset::getBytecodeIndex);
709
710 ASSERT(mapping);
711 ASSERT(mapping->m_bytecodeIndex == exit.m_codeOrigin.bytecodeIndex);
712
713 void* jumpTarget = baselineCodeBlock->getJITCode().executableAddressAtOffset(mapping->m_machineCodeOffset);
714
715 ASSERT(GPRInfo::regT1 != GPRInfo::cachedResultRegister);
716
717 m_jit.move(AssemblyHelpers::TrustedImmPtr(jumpTarget), GPRInfo::regT1);
718
719 m_jit.jump(GPRInfo::regT1);
720
721 #if DFG_ENABLE(DEBUG_VERBOSE)
722 dataLogF("-> %p\n", jumpTarget);
723 #endif
724 }
725
726 } } // namespace JSC::DFG
727
728 #endif // ENABLE(DFG_JIT) && USE(JSVALUE64)