/*
- * Copyright (C) 2011, 2013, 2014 Apple Inc. All rights reserved.
+ * Copyright (C) 2011, 2013-2015 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
{
m_jit.jitAssertTagsInPlace();
- // 1) Pro-forma stuff.
+ // Pro-forma stuff.
if (Options::printEachOSRExit()) {
SpeculationFailureDebugInfo* debugInfo = new SpeculationFailureDebugInfo;
debugInfo->codeBlock = m_jit.codeBlock();
m_jit.debugCall(debugOperationPrintSpeculationFailure, debugInfo);
}
- // Need to ensure that the stack pointer accounts for the worst-case stack usage at exit.
- m_jit.addPtr(
- CCallHelpers::TrustedImm32(
- -m_jit.codeBlock()->jitCode()->dfgCommon()->requiredRegisterCountForExit * sizeof(Register)),
- CCallHelpers::framePointerRegister, CCallHelpers::stackPointerRegister);
-
- // 2) Perform speculation recovery. This only comes into play when an operation
- // starts mutating state before verifying the speculation it has already made.
+ // Perform speculation recovery. This only comes into play when an operation
+ // starts mutating state before verifying the speculation it has already made.
if (recovery) {
switch (recovery->type()) {
}
}
- // 3) Refine some array and/or value profile, if appropriate.
+ // Refine some array and/or value profile, if appropriate.
if (!!exit.m_jsValueSource) {
if (exit.m_kind == BadCache || exit.m_kind == BadIndexingType) {
scratch1 = AssemblyHelpers::selectScratchGPR(usedRegister);
scratch2 = AssemblyHelpers::selectScratchGPR(usedRegister, scratch1);
-#if CPU(ARM64)
- m_jit.pushToSave(scratch1);
- m_jit.pushToSave(scratch2);
-#else
- m_jit.push(scratch1);
- m_jit.push(scratch2);
-#endif
+ if (isARM64()) {
+ m_jit.pushToSave(scratch1);
+ m_jit.pushToSave(scratch2);
+ } else {
+ m_jit.push(scratch1);
+ m_jit.push(scratch2);
+ }
GPRReg value;
if (exit.m_jsValueSource.isAddress()) {
m_jit.lshift32(scratch1, scratch2);
m_jit.or32(scratch2, AssemblyHelpers::AbsoluteAddress(arrayProfile->addressOfArrayModes()));
-#if CPU(ARM64)
- m_jit.popToRestore(scratch2);
- m_jit.popToRestore(scratch1);
-#else
- m_jit.pop(scratch2);
- m_jit.pop(scratch1);
-#endif
+ if (isARM64()) {
+ m_jit.popToRestore(scratch2);
+ m_jit.popToRestore(scratch1);
+ } else {
+ m_jit.pop(scratch2);
+ m_jit.pop(scratch1);
+ }
}
}
// variable" from "how was it represented", which will make it more difficult to add
// features in the future and it will make it harder to reason about bugs.
- // 4) Save all state from GPRs into the scratch buffer.
+ // Save all state from GPRs into the scratch buffer.
ScratchBuffer* scratchBuffer = m_jit.vm()->scratchBufferForSize(sizeof(EncodedJSValue) * operands.size());
EncodedJSValue* scratch = scratchBuffer ? static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer()) : 0;
// And voila, all GPRs are free to reuse.
- // 5) Save all state from FPRs into the scratch buffer.
+ // Save all state from FPRs into the scratch buffer.
for (size_t index = 0; index < operands.size(); ++index) {
const ValueRecovery& recovery = operands[index];
// Now, all FPRs are also free.
- // 6) Save all state from the stack into the scratch buffer. For simplicity we
- // do this even for state that's already in the right place on the stack.
- // It makes things simpler later.
+ // Save all state from the stack into the scratch buffer. For simplicity we
+ // do this even for state that's already in the right place on the stack.
+ // It makes things simpler later.
for (size_t index = 0; index < operands.size(); ++index) {
const ValueRecovery& recovery = operands[index];
}
}
- // 7) Do all data format conversions and store the results into the stack.
+ // Need to ensure that the stack pointer accounts for the worst-case stack usage at exit. This
+ // could toast some stack that the DFG used. We need to do it before storing to stack offsets
+ // used by baseline.
+ m_jit.addPtr(
+ CCallHelpers::TrustedImm32(
+ -m_jit.codeBlock()->jitCode()->dfgCommon()->requiredRegisterCountForExit * sizeof(Register)),
+ CCallHelpers::framePointerRegister, CCallHelpers::stackPointerRegister);
- bool haveArguments = false;
+ // Do all data format conversions and store the results into the stack.
for (size_t index = 0; index < operands.size(); ++index) {
const ValueRecovery& recovery = operands[index];
AssemblyHelpers::addressFor(operand));
break;
- case ArgumentsThatWereNotCreated:
- haveArguments = true;
- // We can't restore this yet but we can make sure that the stack appears
- // sane.
- m_jit.store64(
- AssemblyHelpers::TrustedImm64(JSValue::encode(JSValue())),
- AssemblyHelpers::addressFor(operand));
+ case DirectArgumentsThatWereNotCreated:
+ case ClonedArgumentsThatWereNotCreated:
+ // Don't do this, yet.
break;
default:
+ RELEASE_ASSERT_NOT_REACHED();
break;
}
}
- // 8) Adjust the old JIT's execute counter. Since we are exiting OSR, we know
- // that all new calls into this code will go to the new JIT, so the execute
- // counter only affects call frames that performed OSR exit and call frames
- // that were still executing the old JIT at the time of another call frame's
- // OSR exit. We want to ensure that the following is true:
+ // Now that things on the stack are recovered, do the arguments recovery. We assume that arguments
+ // recoveries don't recursively refer to each other. But, we don't try to assume that they only
+ // refer to certain ranges of locals. Hence why we need to do this here, once the stack is sensible.
+ // Note that we also roughly assume that the arguments might still be materialized outside of its
+ // inline call frame scope - but for now the DFG wouldn't do that.
+
+ emitRestoreArguments(operands);
+
+ // Adjust the old JIT's execute counter. Since we are exiting OSR, we know
+ // that all new calls into this code will go to the new JIT, so the execute
+ // counter only affects call frames that performed OSR exit and call frames
+ // that were still executing the old JIT at the time of another call frame's
+ // OSR exit. We want to ensure that the following is true:
//
- // (a) Code the performs an OSR exit gets a chance to reenter optimized
- // code eventually, since optimized code is faster. But we don't
- // want to do such reentery too aggressively (see (c) below).
+ // (a) Code the performs an OSR exit gets a chance to reenter optimized
+ // code eventually, since optimized code is faster. But we don't
+ // want to do such reentery too aggressively (see (c) below).
//
- // (b) If there is code on the call stack that is still running the old
- // JIT's code and has never OSR'd, then it should get a chance to
- // perform OSR entry despite the fact that we've exited.
+ // (b) If there is code on the call stack that is still running the old
+ // JIT's code and has never OSR'd, then it should get a chance to
+ // perform OSR entry despite the fact that we've exited.
//
- // (c) Code the performs an OSR exit should not immediately retry OSR
- // entry, since both forms of OSR are expensive. OSR entry is
- // particularly expensive.
+ // (c) Code the performs an OSR exit should not immediately retry OSR
+ // entry, since both forms of OSR are expensive. OSR entry is
+ // particularly expensive.
//
- // (d) Frequent OSR failures, even those that do not result in the code
- // running in a hot loop, result in recompilation getting triggered.
+ // (d) Frequent OSR failures, even those that do not result in the code
+ // running in a hot loop, result in recompilation getting triggered.
//
- // To ensure (c), we'd like to set the execute counter to
- // counterValueForOptimizeAfterWarmUp(). This seems like it would endanger
- // (a) and (b), since then every OSR exit would delay the opportunity for
- // every call frame to perform OSR entry. Essentially, if OSR exit happens
- // frequently and the function has few loops, then the counter will never
- // become non-negative and OSR entry will never be triggered. OSR entry
- // will only happen if a loop gets hot in the old JIT, which does a pretty
- // good job of ensuring (a) and (b). But that doesn't take care of (d),
- // since each speculation failure would reset the execute counter.
- // So we check here if the number of speculation failures is significantly
- // larger than the number of successes (we want 90% success rate), and if
- // there have been a large enough number of failures. If so, we set the
- // counter to 0; otherwise we set the counter to
- // counterValueForOptimizeAfterWarmUp().
+ // To ensure (c), we'd like to set the execute counter to
+ // counterValueForOptimizeAfterWarmUp(). This seems like it would endanger
+ // (a) and (b), since then every OSR exit would delay the opportunity for
+ // every call frame to perform OSR entry. Essentially, if OSR exit happens
+ // frequently and the function has few loops, then the counter will never
+ // become non-negative and OSR entry will never be triggered. OSR entry
+ // will only happen if a loop gets hot in the old JIT, which does a pretty
+ // good job of ensuring (a) and (b). But that doesn't take care of (d),
+ // since each speculation failure would reset the execute counter.
+ // So we check here if the number of speculation failures is significantly
+ // larger than the number of successes (we want 90% success rate), and if
+ // there have been a large enough number of failures. If so, we set the
+ // counter to 0; otherwise we set the counter to
+ // counterValueForOptimizeAfterWarmUp().
handleExitCounts(m_jit, exit);
- // 9) Reify inlined call frames.
+ // Reify inlined call frames.
reifyInlinedCallFrames(m_jit, exit);
- // 10) Create arguments if necessary and place them into the appropriate aliased
- // registers.
-
- if (haveArguments) {
- ArgumentsRecoveryGenerator argumentsRecovery;
-
- for (size_t index = 0; index < operands.size(); ++index) {
- const ValueRecovery& recovery = operands[index];
- if (recovery.technique() != ArgumentsThatWereNotCreated)
- continue;
- argumentsRecovery.generateFor(
- operands.operandForIndex(index), exit.m_codeOrigin, m_jit);
- }
- }
-
- // 12) And finish.
+ // And finish.
adjustAndJumpToTarget(m_jit, exit);
}