#if ENABLE(DFG_JIT) && USE(JSVALUE32_64)
#include "DFGOperations.h"
+#include "Operations.h"
+#include <wtf/DataLog.h>
namespace JSC { namespace DFG {
-void OSRExitCompiler::compileExit(const OSRExit& exit, SpeculationRecovery* recovery)
+void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecovery>& operands, SpeculationRecovery* recovery)
{
// 1) Pro-forma stuff.
#if DFG_ENABLE(DEBUG_VERBOSE)
- dataLog("OSR exit for Node @%d (", (int)exit.m_nodeIndex);
+ dataLogF("OSR exit (");
for (CodeOrigin codeOrigin = exit.m_codeOrigin; ; codeOrigin = codeOrigin.inlineCallFrame->caller) {
- dataLog("bc#%u", codeOrigin.bytecodeIndex);
+ dataLogF("bc#%u", codeOrigin.bytecodeIndex);
if (!codeOrigin.inlineCallFrame)
break;
- dataLog(" -> %p ", codeOrigin.inlineCallFrame->executable.get());
+ dataLogF(" -> %p ", codeOrigin.inlineCallFrame->executable.get());
}
- dataLog(") at JIT offset 0x%x ", m_jit.debugOffset());
- exit.dump(WTF::dataFile());
+ dataLogF(") at JIT offset 0x%x ", m_jit.debugOffset());
+ dumpOperands(operands, WTF::dataFile());
#endif
-#if DFG_ENABLE(VERBOSE_SPECULATION_FAILURE)
- SpeculationFailureDebugInfo* debugInfo = new SpeculationFailureDebugInfo;
- debugInfo->codeBlock = m_jit.codeBlock();
- debugInfo->nodeIndex = exit.m_nodeIndex;
- m_jit.debugCall(debugOperationPrintSpeculationFailure, debugInfo);
-#endif
+ if (Options::printEachOSRExit()) {
+ SpeculationFailureDebugInfo* debugInfo = new SpeculationFailureDebugInfo;
+ debugInfo->codeBlock = m_jit.codeBlock();
+
+ m_jit.debugCall(debugOperationPrintSpeculationFailure, debugInfo);
+ }
#if DFG_ENABLE(JIT_BREAK_ON_SPECULATION_FAILURE)
m_jit.breakpoint();
// 3) Refine some value profile, if appropriate.
- if (!!exit.m_jsValueSource && !!exit.m_valueProfile) {
- EncodedJSValue* bucket = exit.m_valueProfile.getSpecFailBucket(0);
+ if (!!exit.m_jsValueSource) {
+ if (exit.m_kind == BadCache || exit.m_kind == BadIndexingType) {
+ // If the instruction that this originated from has an array profile, then
+ // refine it. If it doesn't, then do nothing. The latter could happen for
+ // hoisted checks, or checks emitted for operations that didn't have array
+ // profiling - either ops that aren't array accesses at all, or weren't
+ // known to be array acceses in the bytecode. The latter case is a FIXME
+ // while the former case is an outcome of a CheckStructure not knowing why
+ // it was emitted (could be either due to an inline cache of a property
+ // property access, or due to an array profile).
+
+ // Note: We are free to assume that the jsValueSource is already known to
+ // be a cell since both BadCache and BadIndexingType exits occur after
+ // the cell check would have already happened.
+
+ CodeOrigin codeOrigin = exit.m_codeOriginForExitProfile;
+ if (ArrayProfile* arrayProfile = m_jit.baselineCodeBlockFor(codeOrigin)->getArrayProfile(codeOrigin.bytecodeIndex)) {
+ GPRReg usedRegister1;
+ GPRReg usedRegister2;
+ if (exit.m_jsValueSource.isAddress()) {
+ usedRegister1 = exit.m_jsValueSource.base();
+ usedRegister2 = InvalidGPRReg;
+ } else {
+ usedRegister1 = exit.m_jsValueSource.payloadGPR();
+ if (exit.m_jsValueSource.hasKnownTag())
+ usedRegister2 = InvalidGPRReg;
+ else
+ usedRegister2 = exit.m_jsValueSource.tagGPR();
+ }
+
+ GPRReg scratch1;
+ GPRReg scratch2;
+ scratch1 = AssemblyHelpers::selectScratchGPR(usedRegister1, usedRegister2);
+ scratch2 = AssemblyHelpers::selectScratchGPR(usedRegister1, usedRegister2, scratch1);
+
+#if CPU(ARM64)
+ m_jit.pushToSave(scratch1);
+ m_jit.pushToSave(scratch2);
+#else
+ m_jit.push(scratch1);
+ m_jit.push(scratch2);
+#endif
+
+ GPRReg value;
+ if (exit.m_jsValueSource.isAddress()) {
+ value = scratch1;
+ m_jit.loadPtr(AssemblyHelpers::Address(exit.m_jsValueSource.asAddress()), value);
+ } else
+ value = exit.m_jsValueSource.payloadGPR();
+
+ m_jit.loadPtr(AssemblyHelpers::Address(value, JSCell::structureOffset()), scratch1);
+ m_jit.storePtr(scratch1, arrayProfile->addressOfLastSeenStructure());
+ m_jit.load8(AssemblyHelpers::Address(scratch1, Structure::indexingTypeOffset()), scratch1);
+ m_jit.move(AssemblyHelpers::TrustedImm32(1), scratch2);
+ m_jit.lshift32(scratch1, scratch2);
+ m_jit.or32(scratch2, AssemblyHelpers::AbsoluteAddress(arrayProfile->addressOfArrayModes()));
+
+#if CPU(ARM64)
+ m_jit.popToRestore(scratch2);
+ m_jit.popToRestore(scratch1);
+#else
+ m_jit.pop(scratch2);
+ m_jit.pop(scratch1);
+#endif
+ }
+ }
- if (exit.m_jsValueSource.isAddress()) {
- // Save a register so we can use it.
- GPRReg scratch = GPRInfo::regT0;
- if (scratch == exit.m_jsValueSource.base())
- scratch = GPRInfo::regT1;
- ScratchBuffer* scratchBuffer = m_jit.globalData()->scratchBufferForSize(sizeof(uint32_t));
- EncodedJSValue* scratchDataBuffer = static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer());
- m_jit.store32(scratch, scratchDataBuffer);
- m_jit.load32(exit.m_jsValueSource.asAddress(OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)), scratch);
- m_jit.store32(scratch, &bitwise_cast<EncodedValueDescriptor*>(bucket)->asBits.tag);
- m_jit.load32(exit.m_jsValueSource.asAddress(OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)), scratch);
- m_jit.store32(scratch, &bitwise_cast<EncodedValueDescriptor*>(bucket)->asBits.payload);
- m_jit.load32(scratchDataBuffer, scratch);
- } else if (exit.m_jsValueSource.hasKnownTag()) {
- m_jit.store32(AssemblyHelpers::TrustedImm32(exit.m_jsValueSource.tag()), &bitwise_cast<EncodedValueDescriptor*>(bucket)->asBits.tag);
- m_jit.store32(exit.m_jsValueSource.payloadGPR(), &bitwise_cast<EncodedValueDescriptor*>(bucket)->asBits.payload);
- } else {
- m_jit.store32(exit.m_jsValueSource.tagGPR(), &bitwise_cast<EncodedValueDescriptor*>(bucket)->asBits.tag);
- m_jit.store32(exit.m_jsValueSource.payloadGPR(), &bitwise_cast<EncodedValueDescriptor*>(bucket)->asBits.payload);
+ if (!!exit.m_valueProfile) {
+ EncodedJSValue* bucket = exit.m_valueProfile.getSpecFailBucket(0);
+
+ if (exit.m_jsValueSource.isAddress()) {
+ // Save a register so we can use it.
+ GPRReg scratch = AssemblyHelpers::selectScratchGPR(exit.m_jsValueSource.base());
+
+#if CPU(ARM64)
+ m_jit.pushToSave(scratch);
+#else
+ m_jit.push(scratch);
+#endif
+
+ m_jit.load32(exit.m_jsValueSource.asAddress(OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)), scratch);
+ m_jit.store32(scratch, &bitwise_cast<EncodedValueDescriptor*>(bucket)->asBits.tag);
+ m_jit.load32(exit.m_jsValueSource.asAddress(OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)), scratch);
+ m_jit.store32(scratch, &bitwise_cast<EncodedValueDescriptor*>(bucket)->asBits.payload);
+
+#if CPU(ARM64)
+ m_jit.popToRestore(scratch);
+#else
+ m_jit.pop(scratch);
+#endif
+ } else if (exit.m_jsValueSource.hasKnownTag()) {
+ m_jit.store32(AssemblyHelpers::TrustedImm32(exit.m_jsValueSource.tag()), &bitwise_cast<EncodedValueDescriptor*>(bucket)->asBits.tag);
+ m_jit.store32(exit.m_jsValueSource.payloadGPR(), &bitwise_cast<EncodedValueDescriptor*>(bucket)->asBits.payload);
+ } else {
+ m_jit.store32(exit.m_jsValueSource.tagGPR(), &bitwise_cast<EncodedValueDescriptor*>(bucket)->asBits.tag);
+ m_jit.store32(exit.m_jsValueSource.payloadGPR(), &bitwise_cast<EncodedValueDescriptor*>(bucket)->asBits.payload);
+ }
}
}
// GPRInfo::numberOfRegisters of them. Also see if there are any constants,
// any undefined slots, any FPR slots, and any unboxed ints.
- Vector<bool> poisonedVirtualRegisters(exit.m_variables.size());
+ Vector<bool> poisonedVirtualRegisters(operands.numberOfLocals());
for (unsigned i = 0; i < poisonedVirtualRegisters.size(); ++i)
poisonedVirtualRegisters[i] = false;
// Int32s, have no FPRs, and have no constants. If there are constants, we
// expect most of them to be jsUndefined(); if that's true then we handle that
// specially to minimize code size and execution time.
- bool haveUnboxedInt32InRegisterFile = false;
- bool haveUnboxedCellInRegisterFile = false;
- bool haveUnboxedBooleanInRegisterFile = false;
+ bool haveUnboxedInt32InJSStack = false;
+ bool haveUnboxedCellInJSStack = false;
+ bool haveUnboxedBooleanInJSStack = false;
bool haveUInt32s = false;
bool haveFPRs = false;
bool haveConstants = false;
bool haveUndefined = false;
+ bool haveArguments = false;
- for (int index = 0; index < exit.numberOfRecoveries(); ++index) {
- const ValueRecovery& recovery = exit.valueRecovery(index);
+ for (size_t index = 0; index < operands.size(); ++index) {
+ const ValueRecovery& recovery = operands[index];
switch (recovery.technique()) {
- case DisplacedInRegisterFile:
- case Int32DisplacedInRegisterFile:
- case CellDisplacedInRegisterFile:
- case BooleanDisplacedInRegisterFile:
+ case DisplacedInJSStack:
+ case Int32DisplacedInJSStack:
+ case CellDisplacedInJSStack:
+ case BooleanDisplacedInJSStack:
numberOfDisplacedVirtualRegisters++;
ASSERT((int)recovery.virtualRegister() >= 0);
// to ensure this happens efficiently. Note that we expect this case
// to be rare, so the handling of it is optimized for the cases in
// which it does not happen.
- if (recovery.virtualRegister() < (int)exit.m_variables.size()) {
- switch (exit.m_variables[recovery.virtualRegister()].technique()) {
+ if (recovery.virtualRegister() < (int)operands.numberOfLocals()) {
+ switch (operands.local(recovery.virtualRegister()).technique()) {
case InGPR:
case UnboxedInt32InGPR:
case UnboxedBooleanInGPR:
haveUInt32s = true;
break;
- case AlreadyInRegisterFileAsUnboxedInt32:
- haveUnboxedInt32InRegisterFile = true;
+ case AlreadyInJSStackAsUnboxedInt32:
+ haveUnboxedInt32InJSStack = true;
break;
- case AlreadyInRegisterFileAsUnboxedCell:
- haveUnboxedCellInRegisterFile = true;
+ case AlreadyInJSStackAsUnboxedCell:
+ haveUnboxedCellInJSStack = true;
break;
- case AlreadyInRegisterFileAsUnboxedBoolean:
- haveUnboxedBooleanInRegisterFile = true;
+ case AlreadyInJSStackAsUnboxedBoolean:
+ haveUnboxedBooleanInJSStack = true;
break;
case InFPR:
haveUndefined = true;
break;
+ case ArgumentsThatWereNotCreated:
+ haveArguments = true;
+ break;
+
default:
break;
}
}
unsigned scratchBufferLengthBeforeUInt32s = numberOfPoisonedVirtualRegisters + ((numberOfDisplacedVirtualRegisters * 2) <= GPRInfo::numberOfRegisters ? 0 : numberOfDisplacedVirtualRegisters);
- ScratchBuffer* scratchBuffer = m_jit.globalData()->scratchBufferForSize(sizeof(EncodedJSValue) * (scratchBufferLengthBeforeUInt32s + (haveUInt32s ? 2 : 0)));
+ ScratchBuffer* scratchBuffer = m_jit.vm()->scratchBufferForSize(sizeof(EncodedJSValue) * (scratchBufferLengthBeforeUInt32s + (haveUInt32s ? 2 : 0)));
EncodedJSValue* scratchDataBuffer = scratchBuffer ? static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer()) : 0;
// From here on, the code assumes that it is profitable to maximize the distance
// 5) Perform all reboxing of integers and cells, except for those in registers.
- if (haveUnboxedInt32InRegisterFile || haveUnboxedCellInRegisterFile || haveUnboxedBooleanInRegisterFile) {
- for (int index = 0; index < exit.numberOfRecoveries(); ++index) {
- const ValueRecovery& recovery = exit.valueRecovery(index);
+ if (haveUnboxedInt32InJSStack || haveUnboxedCellInJSStack || haveUnboxedBooleanInJSStack) {
+ for (size_t index = 0; index < operands.size(); ++index) {
+ const ValueRecovery& recovery = operands[index];
switch (recovery.technique()) {
- case AlreadyInRegisterFileAsUnboxedInt32:
- m_jit.store32(AssemblyHelpers::TrustedImm32(JSValue::Int32Tag), AssemblyHelpers::tagFor(static_cast<VirtualRegister>(exit.operandForIndex(index))));
+ case AlreadyInJSStackAsUnboxedInt32:
+ m_jit.store32(AssemblyHelpers::TrustedImm32(JSValue::Int32Tag), AssemblyHelpers::tagFor(static_cast<VirtualRegister>(operands.operandForIndex(index))));
break;
- case AlreadyInRegisterFileAsUnboxedCell:
- m_jit.store32(AssemblyHelpers::TrustedImm32(JSValue::CellTag), AssemblyHelpers::tagFor(static_cast<VirtualRegister>(exit.operandForIndex(index))));
+ case AlreadyInJSStackAsUnboxedCell:
+ m_jit.store32(AssemblyHelpers::TrustedImm32(JSValue::CellTag), AssemblyHelpers::tagFor(static_cast<VirtualRegister>(operands.operandForIndex(index))));
break;
- case AlreadyInRegisterFileAsUnboxedBoolean:
- m_jit.store32(AssemblyHelpers::TrustedImm32(JSValue::BooleanTag), AssemblyHelpers::tagFor(static_cast<VirtualRegister>(exit.operandForIndex(index))));
+ case AlreadyInJSStackAsUnboxedBoolean:
+ m_jit.store32(AssemblyHelpers::TrustedImm32(JSValue::BooleanTag), AssemblyHelpers::tagFor(static_cast<VirtualRegister>(operands.operandForIndex(index))));
break;
default:
// Note that GPRs do not have a fast change (like haveFPRs) because we expect that
// most OSR failure points will have at least one GPR that needs to be dumped.
- initializePoisoned(exit.m_variables.size());
+ initializePoisoned(operands.numberOfLocals());
unsigned currentPoisonIndex = 0;
- for (int index = 0; index < exit.numberOfRecoveries(); ++index) {
- const ValueRecovery& recovery = exit.valueRecovery(index);
- int operand = exit.operandForIndex(index);
+ for (size_t index = 0; index < operands.size(); ++index) {
+ const ValueRecovery& recovery = operands[index];
+ int operand = operands.operandForIndex(index);
switch (recovery.technique()) {
case InGPR:
case UnboxedInt32InGPR:
case UnboxedBooleanInGPR:
- if (exit.isVariable(index) && poisonedVirtualRegisters[exit.variableForIndex(index)]) {
+ if (operands.isVariable(index) && poisonedVirtualRegisters[operands.variableForIndex(index)]) {
m_jit.store32(recovery.gpr(), reinterpret_cast<char*>(scratchDataBuffer + currentPoisonIndex) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload));
- m_poisonScratchIndices[exit.variableForIndex(index)] = currentPoisonIndex;
+ m_poisonScratchIndices[operands.variableForIndex(index)] = currentPoisonIndex;
currentPoisonIndex++;
} else {
uint32_t tag = JSValue::EmptyValueTag;
}
break;
case InPair:
- if (exit.isVariable(index) && poisonedVirtualRegisters[exit.variableForIndex(index)]) {
+ if (operands.isVariable(index) && poisonedVirtualRegisters[operands.variableForIndex(index)]) {
m_jit.store32(recovery.tagGPR(), reinterpret_cast<char*>(scratchDataBuffer + currentPoisonIndex) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag));
m_jit.store32(recovery.payloadGPR(), reinterpret_cast<char*>(scratchDataBuffer + currentPoisonIndex) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload));
- m_poisonScratchIndices[exit.variableForIndex(index)] = currentPoisonIndex;
+ m_poisonScratchIndices[operands.variableForIndex(index)] = currentPoisonIndex;
currentPoisonIndex++;
} else {
m_jit.store32(recovery.tagGPR(), AssemblyHelpers::tagFor((VirtualRegister)operand));
m_jit.convertInt32ToDouble(recovery.gpr(), FPRInfo::fpRegT0);
m_jit.addDouble(AssemblyHelpers::AbsoluteAddress(&AssemblyHelpers::twoToThe32), FPRInfo::fpRegT0);
- if (exit.isVariable(index) && poisonedVirtualRegisters[exit.variableForIndex(index)]) {
+ if (operands.isVariable(index) && poisonedVirtualRegisters[operands.variableForIndex(index)]) {
m_jit.move(AssemblyHelpers::TrustedImmPtr(scratchDataBuffer + currentPoisonIndex), addressGPR);
m_jit.storeDouble(FPRInfo::fpRegT0, addressGPR);
} else
positive.link(&m_jit);
- if (exit.isVariable(index) && poisonedVirtualRegisters[exit.variableForIndex(index)]) {
+ if (operands.isVariable(index) && poisonedVirtualRegisters[operands.variableForIndex(index)]) {
m_jit.store32(recovery.gpr(), reinterpret_cast<char*>(scratchDataBuffer + currentPoisonIndex) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload));
m_jit.store32(AssemblyHelpers::TrustedImm32(JSValue::Int32Tag), reinterpret_cast<char*>(scratchDataBuffer + currentPoisonIndex) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag));
} else {
m_jit.loadDouble(addressGPR, FPRInfo::fpRegT0);
m_jit.loadPtr(myScratch, addressGPR);
- if (exit.isVariable(index) && poisonedVirtualRegisters[exit.variableForIndex(index)]) {
- m_poisonScratchIndices[exit.variableForIndex(index)] = currentPoisonIndex;
+ if (operands.isVariable(index) && poisonedVirtualRegisters[operands.variableForIndex(index)]) {
+ m_poisonScratchIndices[operands.variableForIndex(index)] = currentPoisonIndex;
currentPoisonIndex++;
}
break;
}
}
- // 7) Dump all doubles into the register file, or to the scratch storage if the
+ // 7) Dump all doubles into the stack, or to the scratch storage if the
// destination virtual register is poisoned.
if (haveFPRs) {
- for (int index = 0; index < exit.numberOfRecoveries(); ++index) {
- const ValueRecovery& recovery = exit.valueRecovery(index);
+ for (size_t index = 0; index < operands.size(); ++index) {
+ const ValueRecovery& recovery = operands[index];
if (recovery.technique() != InFPR)
continue;
- if (exit.isVariable(index) && poisonedVirtualRegisters[exit.variableForIndex(index)]) {
+ if (operands.isVariable(index) && poisonedVirtualRegisters[operands.variableForIndex(index)]) {
m_jit.storeDouble(recovery.fpr(), scratchDataBuffer + currentPoisonIndex);
- m_poisonScratchIndices[exit.variableForIndex(index)] = currentPoisonIndex;
+ m_poisonScratchIndices[operands.variableForIndex(index)] = currentPoisonIndex;
currentPoisonIndex++;
} else
- m_jit.storeDouble(recovery.fpr(), AssemblyHelpers::addressFor((VirtualRegister)exit.operandForIndex(index)));
+ m_jit.storeDouble(recovery.fpr(), AssemblyHelpers::addressFor((VirtualRegister)operands.operandForIndex(index)));
}
}
// that is far from guaranteed.
unsigned displacementIndex = 0;
- for (int index = 0; index < exit.numberOfRecoveries(); ++index) {
- const ValueRecovery& recovery = exit.valueRecovery(index);
+ for (size_t index = 0; index < operands.size(); ++index) {
+ const ValueRecovery& recovery = operands[index];
switch (recovery.technique()) {
- case DisplacedInRegisterFile:
+ case DisplacedInJSStack:
m_jit.load32(AssemblyHelpers::payloadFor(recovery.virtualRegister()), GPRInfo::toRegister(displacementIndex++));
m_jit.load32(AssemblyHelpers::tagFor(recovery.virtualRegister()), GPRInfo::toRegister(displacementIndex++));
break;
- case Int32DisplacedInRegisterFile:
+ case Int32DisplacedInJSStack:
m_jit.load32(AssemblyHelpers::payloadFor(recovery.virtualRegister()), GPRInfo::toRegister(displacementIndex++));
m_jit.move(AssemblyHelpers::TrustedImm32(JSValue::Int32Tag), GPRInfo::toRegister(displacementIndex++));
break;
- case CellDisplacedInRegisterFile:
+ case CellDisplacedInJSStack:
m_jit.load32(AssemblyHelpers::payloadFor(recovery.virtualRegister()), GPRInfo::toRegister(displacementIndex++));
m_jit.move(AssemblyHelpers::TrustedImm32(JSValue::CellTag), GPRInfo::toRegister(displacementIndex++));
break;
- case BooleanDisplacedInRegisterFile:
+ case BooleanDisplacedInJSStack:
m_jit.load32(AssemblyHelpers::payloadFor(recovery.virtualRegister()), GPRInfo::toRegister(displacementIndex++));
m_jit.move(AssemblyHelpers::TrustedImm32(JSValue::BooleanTag), GPRInfo::toRegister(displacementIndex++));
break;
}
displacementIndex = 0;
- for (int index = 0; index < exit.numberOfRecoveries(); ++index) {
- const ValueRecovery& recovery = exit.valueRecovery(index);
+ for (size_t index = 0; index < operands.size(); ++index) {
+ const ValueRecovery& recovery = operands[index];
switch (recovery.technique()) {
- case DisplacedInRegisterFile:
- case Int32DisplacedInRegisterFile:
- case CellDisplacedInRegisterFile:
- case BooleanDisplacedInRegisterFile:
- m_jit.store32(GPRInfo::toRegister(displacementIndex++), AssemblyHelpers::payloadFor((VirtualRegister)exit.operandForIndex(index)));
- m_jit.store32(GPRInfo::toRegister(displacementIndex++), AssemblyHelpers::tagFor((VirtualRegister)exit.operandForIndex(index)));
+ case DisplacedInJSStack:
+ case Int32DisplacedInJSStack:
+ case CellDisplacedInJSStack:
+ case BooleanDisplacedInJSStack:
+ m_jit.store32(GPRInfo::toRegister(displacementIndex++), AssemblyHelpers::payloadFor((VirtualRegister)operands.operandForIndex(index)));
+ m_jit.store32(GPRInfo::toRegister(displacementIndex++), AssemblyHelpers::tagFor((VirtualRegister)operands.operandForIndex(index)));
break;
default:
break;
// to their new (old JIT) locations.
unsigned scratchIndex = numberOfPoisonedVirtualRegisters;
- for (int index = 0; index < exit.numberOfRecoveries(); ++index) {
- const ValueRecovery& recovery = exit.valueRecovery(index);
+ for (size_t index = 0; index < operands.size(); ++index) {
+ const ValueRecovery& recovery = operands[index];
switch (recovery.technique()) {
- case DisplacedInRegisterFile:
+ case DisplacedInJSStack:
m_jit.load32(AssemblyHelpers::payloadFor(recovery.virtualRegister()), GPRInfo::regT0);
m_jit.load32(AssemblyHelpers::tagFor(recovery.virtualRegister()), GPRInfo::regT1);
m_jit.store32(GPRInfo::regT0, reinterpret_cast<char*>(scratchDataBuffer + scratchIndex) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload));
m_jit.store32(GPRInfo::regT1, reinterpret_cast<char*>(scratchDataBuffer + scratchIndex) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag));
scratchIndex++;
break;
- case Int32DisplacedInRegisterFile:
- case CellDisplacedInRegisterFile:
- case BooleanDisplacedInRegisterFile:
+ case Int32DisplacedInJSStack:
+ case CellDisplacedInJSStack:
+ case BooleanDisplacedInJSStack:
m_jit.load32(AssemblyHelpers::payloadFor(recovery.virtualRegister()), GPRInfo::regT0);
m_jit.store32(GPRInfo::regT0, reinterpret_cast<char*>(scratchDataBuffer + scratchIndex++) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload));
break;
}
scratchIndex = numberOfPoisonedVirtualRegisters;
- for (int index = 0; index < exit.numberOfRecoveries(); ++index) {
- const ValueRecovery& recovery = exit.valueRecovery(index);
+ for (size_t index = 0; index < operands.size(); ++index) {
+ const ValueRecovery& recovery = operands[index];
switch (recovery.technique()) {
- case DisplacedInRegisterFile:
+ case DisplacedInJSStack:
m_jit.load32(reinterpret_cast<char*>(scratchDataBuffer + scratchIndex) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload), GPRInfo::regT0);
m_jit.load32(reinterpret_cast<char*>(scratchDataBuffer + scratchIndex) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag), GPRInfo::regT1);
- m_jit.store32(GPRInfo::regT0, AssemblyHelpers::payloadFor((VirtualRegister)exit.operandForIndex(index)));
- m_jit.store32(GPRInfo::regT1, AssemblyHelpers::tagFor((VirtualRegister)exit.operandForIndex(index)));
+ m_jit.store32(GPRInfo::regT0, AssemblyHelpers::payloadFor((VirtualRegister)operands.operandForIndex(index)));
+ m_jit.store32(GPRInfo::regT1, AssemblyHelpers::tagFor((VirtualRegister)operands.operandForIndex(index)));
scratchIndex++;
break;
- case Int32DisplacedInRegisterFile:
+ case Int32DisplacedInJSStack:
m_jit.load32(reinterpret_cast<char*>(scratchDataBuffer + scratchIndex++) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload), GPRInfo::regT0);
- m_jit.store32(AssemblyHelpers::TrustedImm32(JSValue::Int32Tag), AssemblyHelpers::tagFor((VirtualRegister)exit.operandForIndex(index)));
- m_jit.store32(GPRInfo::regT0, AssemblyHelpers::payloadFor((VirtualRegister)exit.operandForIndex(index)));
+ m_jit.store32(AssemblyHelpers::TrustedImm32(JSValue::Int32Tag), AssemblyHelpers::tagFor((VirtualRegister)operands.operandForIndex(index)));
+ m_jit.store32(GPRInfo::regT0, AssemblyHelpers::payloadFor((VirtualRegister)operands.operandForIndex(index)));
break;
- case CellDisplacedInRegisterFile:
+ case CellDisplacedInJSStack:
m_jit.load32(reinterpret_cast<char*>(scratchDataBuffer + scratchIndex++) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload), GPRInfo::regT0);
- m_jit.store32(AssemblyHelpers::TrustedImm32(JSValue::CellTag), AssemblyHelpers::tagFor((VirtualRegister)exit.operandForIndex(index)));
- m_jit.store32(GPRInfo::regT0, AssemblyHelpers::payloadFor((VirtualRegister)exit.operandForIndex(index)));
+ m_jit.store32(AssemblyHelpers::TrustedImm32(JSValue::CellTag), AssemblyHelpers::tagFor((VirtualRegister)operands.operandForIndex(index)));
+ m_jit.store32(GPRInfo::regT0, AssemblyHelpers::payloadFor((VirtualRegister)operands.operandForIndex(index)));
break;
- case BooleanDisplacedInRegisterFile:
+ case BooleanDisplacedInJSStack:
m_jit.load32(reinterpret_cast<char*>(scratchDataBuffer + scratchIndex++) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload), GPRInfo::regT0);
- m_jit.store32(AssemblyHelpers::TrustedImm32(JSValue::BooleanTag), AssemblyHelpers::tagFor((VirtualRegister)exit.operandForIndex(index)));
- m_jit.store32(GPRInfo::regT0, AssemblyHelpers::payloadFor((VirtualRegister)exit.operandForIndex(index)));
+ m_jit.store32(AssemblyHelpers::TrustedImm32(JSValue::BooleanTag), AssemblyHelpers::tagFor((VirtualRegister)operands.operandForIndex(index)));
+ m_jit.store32(GPRInfo::regT0, AssemblyHelpers::payloadFor((VirtualRegister)operands.operandForIndex(index)));
break;
default:
break;
// 9) Dump all poisoned virtual registers.
if (numberOfPoisonedVirtualRegisters) {
- for (int virtualRegister = 0; virtualRegister < (int)exit.m_variables.size(); ++virtualRegister) {
+ for (int virtualRegister = 0; virtualRegister < (int)operands.numberOfLocals(); ++virtualRegister) {
if (!poisonedVirtualRegisters[virtualRegister])
continue;
- const ValueRecovery& recovery = exit.m_variables[virtualRegister];
+ const ValueRecovery& recovery = operands.local(virtualRegister);
switch (recovery.technique()) {
case InGPR:
case UnboxedInt32InGPR:
m_jit.move(AssemblyHelpers::TrustedImm32(jsUndefined().tag()), GPRInfo::regT1);
}
- for (int index = 0; index < exit.numberOfRecoveries(); ++index) {
- const ValueRecovery& recovery = exit.valueRecovery(index);
+ for (size_t index = 0; index < operands.size(); ++index) {
+ const ValueRecovery& recovery = operands[index];
if (recovery.technique() != Constant)
continue;
if (recovery.constant().isUndefined()) {
- m_jit.store32(GPRInfo::regT0, AssemblyHelpers::payloadFor((VirtualRegister)exit.operandForIndex(index)));
- m_jit.store32(GPRInfo::regT1, AssemblyHelpers::tagFor((VirtualRegister)exit.operandForIndex(index)));
+ m_jit.store32(GPRInfo::regT0, AssemblyHelpers::payloadFor((VirtualRegister)operands.operandForIndex(index)));
+ m_jit.store32(GPRInfo::regT1, AssemblyHelpers::tagFor((VirtualRegister)operands.operandForIndex(index)));
} else {
- m_jit.store32(AssemblyHelpers::TrustedImm32(recovery.constant().payload()), AssemblyHelpers::payloadFor((VirtualRegister)exit.operandForIndex(index)));
- m_jit.store32(AssemblyHelpers::TrustedImm32(recovery.constant().tag()), AssemblyHelpers::tagFor((VirtualRegister)exit.operandForIndex(index)));
+ m_jit.store32(AssemblyHelpers::TrustedImm32(recovery.constant().payload()), AssemblyHelpers::payloadFor((VirtualRegister)operands.operandForIndex(index)));
+ m_jit.store32(AssemblyHelpers::TrustedImm32(recovery.constant().tag()), AssemblyHelpers::tagFor((VirtualRegister)operands.operandForIndex(index)));
}
}
}
- // 11) Adjust the old JIT's execute counter. Since we are exiting OSR, we know
+ // 12) Adjust the old JIT's execute counter. Since we are exiting OSR, we know
// that all new calls into this code will go to the new JIT, so the execute
// counter only affects call frames that performed OSR exit and call frames
// that were still executing the old JIT at the time of another call frame's
handleExitCounts(exit);
- // 12) Load the result of the last bytecode operation into regT0.
-
- if (exit.m_lastSetOperand != std::numeric_limits<int>::max()) {
- m_jit.load32(AssemblyHelpers::payloadFor((VirtualRegister)exit.m_lastSetOperand), GPRInfo::cachedResultRegister);
- m_jit.load32(AssemblyHelpers::tagFor((VirtualRegister)exit.m_lastSetOperand), GPRInfo::cachedResultRegister2);
- }
-
- // 13) Fix call frame (s).
+ // 13) Reify inlined call frames.
ASSERT(m_jit.baselineCodeBlock()->getJITType() == JITCode::BaselineJIT);
- m_jit.storePtr(AssemblyHelpers::TrustedImmPtr(m_jit.baselineCodeBlock()), AssemblyHelpers::addressFor((VirtualRegister)RegisterFile::CodeBlock));
+ m_jit.storePtr(AssemblyHelpers::TrustedImmPtr(m_jit.baselineCodeBlock()), AssemblyHelpers::addressFor((VirtualRegister)JSStack::CodeBlock));
for (CodeOrigin codeOrigin = exit.m_codeOrigin; codeOrigin.inlineCallFrame; codeOrigin = codeOrigin.inlineCallFrame->caller) {
InlineCallFrame* inlineCallFrame = codeOrigin.inlineCallFrame;
CodeBlock* baselineCodeBlockForCaller = m_jit.baselineCodeBlockFor(inlineCallFrame->caller);
Vector<BytecodeAndMachineOffset>& decodedCodeMap = m_jit.decodedCodeMapFor(baselineCodeBlockForCaller);
unsigned returnBytecodeIndex = inlineCallFrame->caller.bytecodeIndex + OPCODE_LENGTH(op_call);
- BytecodeAndMachineOffset* mapping = binarySearch<BytecodeAndMachineOffset, unsigned, BytecodeAndMachineOffset::getBytecodeIndex>(decodedCodeMap.begin(), decodedCodeMap.size(), returnBytecodeIndex);
+ BytecodeAndMachineOffset* mapping = binarySearch<BytecodeAndMachineOffset, unsigned>(decodedCodeMap, decodedCodeMap.size(), returnBytecodeIndex, BytecodeAndMachineOffset::getBytecodeIndex);
ASSERT(mapping);
ASSERT(mapping->m_bytecodeIndex == returnBytecodeIndex);
} else
callerFrameGPR = GPRInfo::callFrameRegister;
- m_jit.storePtr(AssemblyHelpers::TrustedImmPtr(baselineCodeBlock), AssemblyHelpers::addressFor((VirtualRegister)(inlineCallFrame->stackOffset + RegisterFile::CodeBlock)));
- m_jit.store32(AssemblyHelpers::TrustedImm32(JSValue::CellTag), AssemblyHelpers::tagFor((VirtualRegister)(inlineCallFrame->stackOffset + RegisterFile::ScopeChain)));
- m_jit.storePtr(AssemblyHelpers::TrustedImmPtr(inlineCallFrame->callee->scope()), AssemblyHelpers::payloadFor((VirtualRegister)(inlineCallFrame->stackOffset + RegisterFile::ScopeChain)));
- m_jit.store32(AssemblyHelpers::TrustedImm32(JSValue::CellTag), AssemblyHelpers::tagFor((VirtualRegister)(inlineCallFrame->stackOffset + RegisterFile::CallerFrame)));
- m_jit.storePtr(callerFrameGPR, AssemblyHelpers::payloadFor((VirtualRegister)(inlineCallFrame->stackOffset + RegisterFile::CallerFrame)));
- m_jit.storePtr(AssemblyHelpers::TrustedImmPtr(jumpTarget), AssemblyHelpers::payloadFor((VirtualRegister)(inlineCallFrame->stackOffset + RegisterFile::ReturnPC)));
- m_jit.store32(AssemblyHelpers::TrustedImm32(inlineCallFrame->arguments.size()), AssemblyHelpers::payloadFor((VirtualRegister)(inlineCallFrame->stackOffset + RegisterFile::ArgumentCount)));
- m_jit.store32(AssemblyHelpers::TrustedImm32(JSValue::CellTag), AssemblyHelpers::tagFor((VirtualRegister)(inlineCallFrame->stackOffset + RegisterFile::Callee)));
- m_jit.storePtr(AssemblyHelpers::TrustedImmPtr(inlineCallFrame->callee.get()), AssemblyHelpers::payloadFor((VirtualRegister)(inlineCallFrame->stackOffset + RegisterFile::Callee)));
+ m_jit.storePtr(AssemblyHelpers::TrustedImmPtr(baselineCodeBlock), AssemblyHelpers::addressFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::CodeBlock)));
+ m_jit.store32(AssemblyHelpers::TrustedImm32(JSValue::CellTag), AssemblyHelpers::tagFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::ScopeChain)));
+ if (!inlineCallFrame->isClosureCall())
+ m_jit.storePtr(AssemblyHelpers::TrustedImmPtr(inlineCallFrame->callee->scope()), AssemblyHelpers::payloadFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::ScopeChain)));
+ m_jit.store32(AssemblyHelpers::TrustedImm32(JSValue::CellTag), AssemblyHelpers::tagFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::CallerFrame)));
+ m_jit.storePtr(callerFrameGPR, AssemblyHelpers::payloadFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::CallerFrame)));
+ m_jit.storePtr(AssemblyHelpers::TrustedImmPtr(jumpTarget), AssemblyHelpers::payloadFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::ReturnPC)));
+ m_jit.store32(AssemblyHelpers::TrustedImm32(inlineCallFrame->arguments.size()), AssemblyHelpers::payloadFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::ArgumentCount)));
+ m_jit.store32(AssemblyHelpers::TrustedImm32(JSValue::CellTag), AssemblyHelpers::tagFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::Callee)));
+ if (!inlineCallFrame->isClosureCall())
+ m_jit.storePtr(AssemblyHelpers::TrustedImmPtr(inlineCallFrame->callee.get()), AssemblyHelpers::payloadFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::Callee)));
+ }
+
+ // 14) Create arguments if necessary and place them into the appropriate aliased
+ // registers.
+
+ if (haveArguments) {
+ HashSet<InlineCallFrame*, DefaultHash<InlineCallFrame*>::Hash,
+ NullableHashTraits<InlineCallFrame*> > didCreateArgumentsObject;
+
+ for (size_t index = 0; index < operands.size(); ++index) {
+ const ValueRecovery& recovery = operands[index];
+ if (recovery.technique() != ArgumentsThatWereNotCreated)
+ continue;
+ int operand = operands.operandForIndex(index);
+ // Find the right inline call frame.
+ InlineCallFrame* inlineCallFrame = 0;
+ for (InlineCallFrame* current = exit.m_codeOrigin.inlineCallFrame;
+ current;
+ current = current->caller.inlineCallFrame) {
+ if (current->stackOffset <= operand) {
+ inlineCallFrame = current;
+ break;
+ }
+ }
+
+ if (!m_jit.baselineCodeBlockFor(inlineCallFrame)->usesArguments())
+ continue;
+ int argumentsRegister = m_jit.argumentsRegisterFor(inlineCallFrame);
+ if (didCreateArgumentsObject.add(inlineCallFrame).isNewEntry) {
+ // We know this call frame optimized out an arguments object that
+ // the baseline JIT would have created. Do that creation now.
+ if (inlineCallFrame) {
+ m_jit.setupArgumentsWithExecState(
+ AssemblyHelpers::TrustedImmPtr(inlineCallFrame));
+ m_jit.move(
+ AssemblyHelpers::TrustedImmPtr(
+ bitwise_cast<void*>(operationCreateInlinedArguments)),
+ GPRInfo::nonArgGPR0);
+ } else {
+ m_jit.setupArgumentsExecState();
+ m_jit.move(
+ AssemblyHelpers::TrustedImmPtr(
+ bitwise_cast<void*>(operationCreateArguments)),
+ GPRInfo::nonArgGPR0);
+ }
+ m_jit.call(GPRInfo::nonArgGPR0);
+ m_jit.store32(
+ AssemblyHelpers::TrustedImm32(JSValue::CellTag),
+ AssemblyHelpers::tagFor(argumentsRegister));
+ m_jit.store32(
+ GPRInfo::returnValueGPR,
+ AssemblyHelpers::payloadFor(argumentsRegister));
+ m_jit.store32(
+ AssemblyHelpers::TrustedImm32(JSValue::CellTag),
+ AssemblyHelpers::tagFor(unmodifiedArgumentsRegister(argumentsRegister)));
+ m_jit.store32(
+ GPRInfo::returnValueGPR,
+ AssemblyHelpers::payloadFor(unmodifiedArgumentsRegister(argumentsRegister)));
+ m_jit.move(GPRInfo::returnValueGPR, GPRInfo::regT0); // no-op move on almost all platforms.
+ }
+
+ m_jit.load32(AssemblyHelpers::payloadFor(argumentsRegister), GPRInfo::regT0);
+ m_jit.store32(
+ AssemblyHelpers::TrustedImm32(JSValue::CellTag),
+ AssemblyHelpers::tagFor(operand));
+ m_jit.store32(GPRInfo::regT0, AssemblyHelpers::payloadFor(operand));
+ }
}
+ // 15) Load the result of the last bytecode operation into regT0.
+
+ if (exit.m_lastSetOperand != std::numeric_limits<int>::max()) {
+ m_jit.load32(AssemblyHelpers::payloadFor((VirtualRegister)exit.m_lastSetOperand), GPRInfo::cachedResultRegister);
+ m_jit.load32(AssemblyHelpers::tagFor((VirtualRegister)exit.m_lastSetOperand), GPRInfo::cachedResultRegister2);
+ }
+
+ // 16) Adjust the call frame pointer.
+
if (exit.m_codeOrigin.inlineCallFrame)
m_jit.addPtr(AssemblyHelpers::TrustedImm32(exit.m_codeOrigin.inlineCallFrame->stackOffset * sizeof(EncodedJSValue)), GPRInfo::callFrameRegister);
- // 14) Jump into the corresponding baseline JIT code.
+ // 17) Jump into the corresponding baseline JIT code.
CodeBlock* baselineCodeBlock = m_jit.baselineCodeBlockFor(exit.m_codeOrigin);
Vector<BytecodeAndMachineOffset>& decodedCodeMap = m_jit.decodedCodeMapFor(baselineCodeBlock);
- BytecodeAndMachineOffset* mapping = binarySearch<BytecodeAndMachineOffset, unsigned, BytecodeAndMachineOffset::getBytecodeIndex>(decodedCodeMap.begin(), decodedCodeMap.size(), exit.m_codeOrigin.bytecodeIndex);
+ BytecodeAndMachineOffset* mapping = binarySearch<BytecodeAndMachineOffset, unsigned>(decodedCodeMap, decodedCodeMap.size(), exit.m_codeOrigin.bytecodeIndex, BytecodeAndMachineOffset::getBytecodeIndex);
ASSERT(mapping);
ASSERT(mapping->m_bytecodeIndex == exit.m_codeOrigin.bytecodeIndex);
m_jit.jump(GPRInfo::regT2);
#if DFG_ENABLE(DEBUG_VERBOSE)
- dataLog(" -> %p\n", jumpTarget);
+ dataLogF(" -> %p\n", jumpTarget);
#endif
}