X-Git-Url: https://git.saurik.com/apple/javascriptcore.git/blobdiff_plain/a253471d7f8e4d91bf6ebabab00155c3b387d3d0..93a3786624b2768d89bfa27e46598dc64e2fb70a:/dfg/DFGOSRExitCompiler64.cpp diff --git a/dfg/DFGOSRExitCompiler64.cpp b/dfg/DFGOSRExitCompiler64.cpp index 6f116e4..b467879 100644 --- a/dfg/DFGOSRExitCompiler64.cpp +++ b/dfg/DFGOSRExitCompiler64.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2011 Apple Inc. All rights reserved. + * Copyright (C) 2011, 2013 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -29,30 +29,32 @@ #if ENABLE(DFG_JIT) && USE(JSVALUE64) #include "DFGOperations.h" +#include "Operations.h" +#include namespace JSC { namespace DFG { -void OSRExitCompiler::compileExit(const OSRExit& exit, SpeculationRecovery* recovery) +void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands& operands, SpeculationRecovery* recovery) { // 1) Pro-forma stuff. #if DFG_ENABLE(DEBUG_VERBOSE) - dataLog("OSR exit for Node @%d (", (int)exit.m_nodeIndex); + dataLogF("OSR exit for ("); for (CodeOrigin codeOrigin = exit.m_codeOrigin; ; codeOrigin = codeOrigin.inlineCallFrame->caller) { - dataLog("bc#%u", codeOrigin.bytecodeIndex); + dataLogF("bc#%u", codeOrigin.bytecodeIndex); if (!codeOrigin.inlineCallFrame) break; - dataLog(" -> %p ", codeOrigin.inlineCallFrame->executable.get()); + dataLogF(" -> %p ", codeOrigin.inlineCallFrame->executable.get()); } - dataLog(") "); - exit.dump(WTF::dataFile()); -#endif -#if DFG_ENABLE(VERBOSE_SPECULATION_FAILURE) - SpeculationFailureDebugInfo* debugInfo = new SpeculationFailureDebugInfo; - debugInfo->codeBlock = m_jit.codeBlock(); - debugInfo->nodeIndex = exit.m_nodeIndex; - - m_jit.debugCall(debugOperationPrintSpeculationFailure, debugInfo); + dataLogF(") "); + dumpOperands(operands, WTF::dataFile()); #endif + + if (Options::printEachOSRExit()) { + SpeculationFailureDebugInfo* debugInfo = new SpeculationFailureDebugInfo; + debugInfo->codeBlock = m_jit.codeBlock(); + + m_jit.debugCall(debugOperationPrintSpeculationFailure, debugInfo); + } #if DFG_ENABLE(JIT_BREAK_ON_SPECULATION_FAILURE) m_jit.breakpoint(); @@ -72,12 +74,12 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, SpeculationRecovery* reco switch (recovery->type()) { case SpeculativeAdd: m_jit.sub32(recovery->src(), recovery->dest()); - m_jit.orPtr(GPRInfo::tagTypeNumberRegister, recovery->dest()); + m_jit.or64(GPRInfo::tagTypeNumberRegister, recovery->dest()); alreadyBoxed = recovery->dest(); break; case BooleanSpeculationCheck: - m_jit.xorPtr(AssemblyHelpers::TrustedImm32(static_cast(ValueFalse)), recovery->dest()); + m_jit.xor64(AssemblyHelpers::TrustedImm32(static_cast(ValueFalse)), recovery->dest()); break; default: @@ -85,23 +87,76 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, SpeculationRecovery* reco } } - // 3) Refine some value profile, if appropriate. - - if (!!exit.m_jsValueSource && !!exit.m_valueProfile) { - EncodedJSValue* bucket = exit.m_valueProfile.getSpecFailBucket(0); - -#if DFG_ENABLE(VERBOSE_SPECULATION_FAILURE) - dataLog(" (have exit profile, bucket %p) ", bucket); + // 3) Refine some array and/or value profile, if appropriate. + + if (!!exit.m_jsValueSource) { + if (exit.m_kind == BadCache || exit.m_kind == BadIndexingType) { + // If the instruction that this originated from has an array profile, then + // refine it. If it doesn't, then do nothing. The latter could happen for + // hoisted checks, or checks emitted for operations that didn't have array + // profiling - either ops that aren't array accesses at all, or weren't + // known to be array acceses in the bytecode. The latter case is a FIXME + // while the former case is an outcome of a CheckStructure not knowing why + // it was emitted (could be either due to an inline cache of a property + // property access, or due to an array profile). + + CodeOrigin codeOrigin = exit.m_codeOriginForExitProfile; + if (ArrayProfile* arrayProfile = m_jit.baselineCodeBlockFor(codeOrigin)->getArrayProfile(codeOrigin.bytecodeIndex)) { + GPRReg usedRegister; + if (exit.m_jsValueSource.isAddress()) + usedRegister = exit.m_jsValueSource.base(); + else + usedRegister = exit.m_jsValueSource.gpr(); + + GPRReg scratch1; + GPRReg scratch2; + scratch1 = AssemblyHelpers::selectScratchGPR(usedRegister); + scratch2 = AssemblyHelpers::selectScratchGPR(usedRegister, scratch1); + +#if CPU(ARM64) + m_jit.pushToSave(scratch1); + m_jit.pushToSave(scratch2); +#else + m_jit.push(scratch1); + m_jit.push(scratch2); #endif + + GPRReg value; + if (exit.m_jsValueSource.isAddress()) { + value = scratch1; + m_jit.loadPtr(AssemblyHelpers::Address(exit.m_jsValueSource.asAddress()), value); + } else + value = exit.m_jsValueSource.gpr(); + + m_jit.loadPtr(AssemblyHelpers::Address(value, JSCell::structureOffset()), scratch1); + m_jit.storePtr(scratch1, arrayProfile->addressOfLastSeenStructure()); + m_jit.load8(AssemblyHelpers::Address(scratch1, Structure::indexingTypeOffset()), scratch1); + m_jit.move(AssemblyHelpers::TrustedImm32(1), scratch2); + m_jit.lshift32(scratch1, scratch2); + m_jit.or32(scratch2, AssemblyHelpers::AbsoluteAddress(arrayProfile->addressOfArrayModes())); + +#if CPU(ARM64) + m_jit.popToRestore(scratch2); + m_jit.popToRestore(scratch1); +#else + m_jit.pop(scratch2); + m_jit.pop(scratch1); +#endif + } + } + + if (!!exit.m_valueProfile) { + EncodedJSValue* bucket = exit.m_valueProfile.getSpecFailBucket(0); - if (exit.m_jsValueSource.isAddress()) { - // We can't be sure that we have a spare register. So use the tagTypeNumberRegister, - // since we know how to restore it. - m_jit.loadPtr(AssemblyHelpers::Address(exit.m_jsValueSource.asAddress()), GPRInfo::tagTypeNumberRegister); - m_jit.storePtr(GPRInfo::tagTypeNumberRegister, bucket); - m_jit.move(AssemblyHelpers::TrustedImmPtr(bitwise_cast(TagTypeNumber)), GPRInfo::tagTypeNumberRegister); - } else - m_jit.storePtr(exit.m_jsValueSource.gpr(), bucket); + if (exit.m_jsValueSource.isAddress()) { + // We can't be sure that we have a spare register. So use the tagTypeNumberRegister, + // since we know how to restore it. + m_jit.load64(AssemblyHelpers::Address(exit.m_jsValueSource.asAddress()), GPRInfo::tagTypeNumberRegister); + m_jit.store64(GPRInfo::tagTypeNumberRegister, bucket); + m_jit.move(AssemblyHelpers::TrustedImm64(TagTypeNumber), GPRInfo::tagTypeNumberRegister); + } else + m_jit.store64(exit.m_jsValueSource.gpr(), bucket); + } } // 4) Figure out how many scratch slots we'll need. We need one for every GPR/FPR @@ -110,7 +165,7 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, SpeculationRecovery* reco // GPRInfo::numberOfRegisters of them. Also see if there are any constants, // any undefined slots, any FPR slots, and any unboxed ints. - Vector poisonedVirtualRegisters(exit.m_variables.size()); + Vector poisonedVirtualRegisters(operands.numberOfLocals()); for (unsigned i = 0; i < poisonedVirtualRegisters.size(); ++i) poisonedVirtualRegisters[i] = false; @@ -127,13 +182,14 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, SpeculationRecovery* reco bool haveConstants = false; bool haveUndefined = false; bool haveUInt32s = false; + bool haveArguments = false; - for (int index = 0; index < exit.numberOfRecoveries(); ++index) { - const ValueRecovery& recovery = exit.valueRecovery(index); + for (size_t index = 0; index < operands.size(); ++index) { + const ValueRecovery& recovery = operands[index]; switch (recovery.technique()) { - case Int32DisplacedInRegisterFile: - case DoubleDisplacedInRegisterFile: - case DisplacedInRegisterFile: + case Int32DisplacedInJSStack: + case DoubleDisplacedInJSStack: + case DisplacedInJSStack: numberOfDisplacedVirtualRegisters++; ASSERT((int)recovery.virtualRegister() >= 0); @@ -144,8 +200,8 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, SpeculationRecovery* reco // to ensure this happens efficiently. Note that we expect this case // to be rare, so the handling of it is optimized for the cases in // which it does not happen. - if (recovery.virtualRegister() < (int)exit.m_variables.size()) { - switch (exit.m_variables[recovery.virtualRegister()].technique()) { + if (recovery.virtualRegister() < (int)operands.numberOfLocals()) { + switch (operands.local(recovery.virtualRegister()).technique()) { case InGPR: case UnboxedInt32InGPR: case UInt32InGPR: @@ -162,11 +218,11 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, SpeculationRecovery* reco break; case UnboxedInt32InGPR: - case AlreadyInRegisterFileAsUnboxedInt32: + case AlreadyInJSStackAsUnboxedInt32: haveUnboxedInt32s = true; break; - case AlreadyInRegisterFileAsUnboxedDouble: + case AlreadyInJSStackAsUnboxedDouble: haveUnboxedDoubles = true; break; @@ -184,33 +240,37 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, SpeculationRecovery* reco haveUndefined = true; break; + case ArgumentsThatWereNotCreated: + haveArguments = true; + break; + default: break; } } #if DFG_ENABLE(DEBUG_VERBOSE) - dataLog(" "); + dataLogF(" "); if (numberOfPoisonedVirtualRegisters) - dataLog("Poisoned=%u ", numberOfPoisonedVirtualRegisters); + dataLogF("Poisoned=%u ", numberOfPoisonedVirtualRegisters); if (numberOfDisplacedVirtualRegisters) - dataLog("Displaced=%u ", numberOfDisplacedVirtualRegisters); + dataLogF("Displaced=%u ", numberOfDisplacedVirtualRegisters); if (haveUnboxedInt32s) - dataLog("UnboxedInt32 "); + dataLogF("UnboxedInt32 "); if (haveUnboxedDoubles) - dataLog("UnboxedDoubles "); + dataLogF("UnboxedDoubles "); if (haveUInt32s) - dataLog("UInt32 "); + dataLogF("UInt32 "); if (haveFPRs) - dataLog("FPR "); + dataLogF("FPR "); if (haveConstants) - dataLog("Constants "); + dataLogF("Constants "); if (haveUndefined) - dataLog("Undefined "); - dataLog(" "); + dataLogF("Undefined "); + dataLogF(" "); #endif - ScratchBuffer* scratchBuffer = m_jit.globalData()->scratchBufferForSize(sizeof(EncodedJSValue) * std::max(haveUInt32s ? 2u : 0u, numberOfPoisonedVirtualRegisters + (numberOfDisplacedVirtualRegisters <= GPRInfo::numberOfRegisters ? 0 : numberOfDisplacedVirtualRegisters))); + ScratchBuffer* scratchBuffer = m_jit.vm()->scratchBufferForSize(sizeof(EncodedJSValue) * std::max(haveUInt32s ? 2u : 0u, numberOfPoisonedVirtualRegisters + (numberOfDisplacedVirtualRegisters <= GPRInfo::numberOfRegisters ? 0 : numberOfDisplacedVirtualRegisters))); EncodedJSValue* scratchDataBuffer = scratchBuffer ? static_cast(scratchBuffer->dataBuffer()) : 0; // From here on, the code assumes that it is profitable to maximize the distance @@ -219,16 +279,16 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, SpeculationRecovery* reco // 5) Perform all reboxing of integers. if (haveUnboxedInt32s || haveUInt32s) { - for (int index = 0; index < exit.numberOfRecoveries(); ++index) { - const ValueRecovery& recovery = exit.valueRecovery(index); + for (size_t index = 0; index < operands.size(); ++index) { + const ValueRecovery& recovery = operands[index]; switch (recovery.technique()) { case UnboxedInt32InGPR: if (recovery.gpr() != alreadyBoxed) - m_jit.orPtr(GPRInfo::tagTypeNumberRegister, recovery.gpr()); + m_jit.or64(GPRInfo::tagTypeNumberRegister, recovery.gpr()); break; - case AlreadyInRegisterFileAsUnboxedInt32: - m_jit.store32(AssemblyHelpers::TrustedImm32(static_cast(TagTypeNumber >> 32)), AssemblyHelpers::tagFor(static_cast(exit.operandForIndex(index)))); + case AlreadyInJSStackAsUnboxedInt32: + m_jit.store32(AssemblyHelpers::TrustedImm32(static_cast(TagTypeNumber >> 32)), AssemblyHelpers::tagFor(static_cast(operands.operandForIndex(index)))); break; case UInt32InGPR: { @@ -246,7 +306,7 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, SpeculationRecovery* reco if (addressGPR == recovery.gpr()) addressGPR = GPRInfo::regT1; - m_jit.storePtr(addressGPR, scratchDataBuffer); + m_jit.store64(addressGPR, scratchDataBuffer); m_jit.move(AssemblyHelpers::TrustedImmPtr(scratchDataBuffer + 1), addressGPR); m_jit.storeDouble(FPRInfo::fpRegT0, addressGPR); @@ -260,12 +320,12 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, SpeculationRecovery* reco positive.link(&m_jit); - m_jit.orPtr(GPRInfo::tagTypeNumberRegister, recovery.gpr()); + m_jit.or64(GPRInfo::tagTypeNumberRegister, recovery.gpr()); done.link(&m_jit); m_jit.loadDouble(addressGPR, FPRInfo::fpRegT0); - m_jit.loadPtr(scratchDataBuffer, addressGPR); + m_jit.load64(scratchDataBuffer, addressGPR); break; } @@ -279,22 +339,22 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, SpeculationRecovery* reco // Note that GPRs do not have a fast change (like haveFPRs) because we expect that // most OSR failure points will have at least one GPR that needs to be dumped. - initializePoisoned(exit.m_variables.size()); + initializePoisoned(operands.numberOfLocals()); unsigned currentPoisonIndex = 0; - for (int index = 0; index < exit.numberOfRecoveries(); ++index) { - const ValueRecovery& recovery = exit.valueRecovery(index); - int operand = exit.operandForIndex(index); + for (size_t index = 0; index < operands.size(); ++index) { + const ValueRecovery& recovery = operands[index]; + int operand = operands.operandForIndex(index); switch (recovery.technique()) { case InGPR: case UnboxedInt32InGPR: case UInt32InGPR: - if (exit.isVariable(index) && poisonedVirtualRegisters[exit.variableForIndex(index)]) { - m_jit.storePtr(recovery.gpr(), scratchDataBuffer + currentPoisonIndex); - m_poisonScratchIndices[exit.variableForIndex(index)] = currentPoisonIndex; + if (operands.isVariable(index) && poisonedVirtualRegisters[operands.variableForIndex(index)]) { + m_jit.store64(recovery.gpr(), scratchDataBuffer + currentPoisonIndex); + m_poisonScratchIndices[operands.variableForIndex(index)] = currentPoisonIndex; currentPoisonIndex++; } else - m_jit.storePtr(recovery.gpr(), AssemblyHelpers::addressFor((VirtualRegister)operand)); + m_jit.store64(recovery.gpr(), AssemblyHelpers::addressFor((VirtualRegister)operand)); break; default: break; @@ -306,8 +366,8 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, SpeculationRecovery* reco if (haveFPRs) { // 7) Box all doubles (relies on there being more GPRs than FPRs) - for (int index = 0; index < exit.numberOfRecoveries(); ++index) { - const ValueRecovery& recovery = exit.valueRecovery(index); + for (size_t index = 0; index < operands.size(); ++index) { + const ValueRecovery& recovery = operands[index]; if (recovery.technique() != InFPR) continue; FPRReg fpr = recovery.fpr(); @@ -315,34 +375,34 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, SpeculationRecovery* reco m_jit.boxDouble(fpr, gpr); } - // 8) Dump all doubles into the register file, or to the scratch storage if + // 8) Dump all doubles into the stack, or to the scratch storage if // the destination virtual register is poisoned. - for (int index = 0; index < exit.numberOfRecoveries(); ++index) { - const ValueRecovery& recovery = exit.valueRecovery(index); + for (size_t index = 0; index < operands.size(); ++index) { + const ValueRecovery& recovery = operands[index]; if (recovery.technique() != InFPR) continue; GPRReg gpr = GPRInfo::toRegister(FPRInfo::toIndex(recovery.fpr())); - if (exit.isVariable(index) && poisonedVirtualRegisters[exit.variableForIndex(index)]) { - m_jit.storePtr(gpr, scratchDataBuffer + currentPoisonIndex); - m_poisonScratchIndices[exit.variableForIndex(index)] = currentPoisonIndex; + if (operands.isVariable(index) && poisonedVirtualRegisters[operands.variableForIndex(index)]) { + m_jit.store64(gpr, scratchDataBuffer + currentPoisonIndex); + m_poisonScratchIndices[operands.variableForIndex(index)] = currentPoisonIndex; currentPoisonIndex++; } else - m_jit.storePtr(gpr, AssemblyHelpers::addressFor((VirtualRegister)exit.operandForIndex(index))); + m_jit.store64(gpr, AssemblyHelpers::addressFor((VirtualRegister)operands.operandForIndex(index))); } } // At this point all GPRs and FPRs are available for scratch use. - // 9) Box all unboxed doubles in the register file. + // 9) Box all unboxed doubles in the stack. if (haveUnboxedDoubles) { - for (int index = 0; index < exit.numberOfRecoveries(); ++index) { - const ValueRecovery& recovery = exit.valueRecovery(index); - if (recovery.technique() != AlreadyInRegisterFileAsUnboxedDouble) + for (size_t index = 0; index < operands.size(); ++index) { + const ValueRecovery& recovery = operands[index]; + if (recovery.technique() != AlreadyInJSStackAsUnboxedDouble) continue; - m_jit.loadDouble(AssemblyHelpers::addressFor((VirtualRegister)exit.operandForIndex(index)), FPRInfo::fpRegT0); + m_jit.loadDouble(AssemblyHelpers::addressFor((VirtualRegister)operands.operandForIndex(index)), FPRInfo::fpRegT0); m_jit.boxDouble(FPRInfo::fpRegT0, GPRInfo::regT0); - m_jit.storePtr(GPRInfo::regT0, AssemblyHelpers::addressFor((VirtualRegister)exit.operandForIndex(index))); + m_jit.store64(GPRInfo::regT0, AssemblyHelpers::addressFor((VirtualRegister)operands.operandForIndex(index))); } } @@ -358,24 +418,24 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, SpeculationRecovery* reco // that is far from guaranteed. unsigned displacementIndex = 0; - for (int index = 0; index < exit.numberOfRecoveries(); ++index) { - const ValueRecovery& recovery = exit.valueRecovery(index); + for (size_t index = 0; index < operands.size(); ++index) { + const ValueRecovery& recovery = operands[index]; switch (recovery.technique()) { - case DisplacedInRegisterFile: - m_jit.loadPtr(AssemblyHelpers::addressFor(recovery.virtualRegister()), GPRInfo::toRegister(displacementIndex++)); + case DisplacedInJSStack: + m_jit.load64(AssemblyHelpers::addressFor(recovery.virtualRegister()), GPRInfo::toRegister(displacementIndex++)); break; - case Int32DisplacedInRegisterFile: { + case Int32DisplacedInJSStack: { GPRReg gpr = GPRInfo::toRegister(displacementIndex++); m_jit.load32(AssemblyHelpers::addressFor(recovery.virtualRegister()), gpr); - m_jit.orPtr(GPRInfo::tagTypeNumberRegister, gpr); + m_jit.or64(GPRInfo::tagTypeNumberRegister, gpr); break; } - case DoubleDisplacedInRegisterFile: { + case DoubleDisplacedInJSStack: { GPRReg gpr = GPRInfo::toRegister(displacementIndex++); - m_jit.loadPtr(AssemblyHelpers::addressFor(recovery.virtualRegister()), gpr); - m_jit.subPtr(GPRInfo::tagTypeNumberRegister, gpr); + m_jit.load64(AssemblyHelpers::addressFor(recovery.virtualRegister()), gpr); + m_jit.sub64(GPRInfo::tagTypeNumberRegister, gpr); break; } @@ -385,13 +445,13 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, SpeculationRecovery* reco } displacementIndex = 0; - for (int index = 0; index < exit.numberOfRecoveries(); ++index) { - const ValueRecovery& recovery = exit.valueRecovery(index); + for (size_t index = 0; index < operands.size(); ++index) { + const ValueRecovery& recovery = operands[index]; switch (recovery.technique()) { - case DisplacedInRegisterFile: - case Int32DisplacedInRegisterFile: - case DoubleDisplacedInRegisterFile: - m_jit.storePtr(GPRInfo::toRegister(displacementIndex++), AssemblyHelpers::addressFor((VirtualRegister)exit.operandForIndex(index))); + case DisplacedInJSStack: + case Int32DisplacedInJSStack: + case DoubleDisplacedInJSStack: + m_jit.store64(GPRInfo::toRegister(displacementIndex++), AssemblyHelpers::addressFor((VirtualRegister)operands.operandForIndex(index))); break; default: @@ -417,26 +477,26 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, SpeculationRecovery* reco // to their new (old JIT) locations. unsigned scratchIndex = numberOfPoisonedVirtualRegisters; - for (int index = 0; index < exit.numberOfRecoveries(); ++index) { - const ValueRecovery& recovery = exit.valueRecovery(index); + for (size_t index = 0; index < operands.size(); ++index) { + const ValueRecovery& recovery = operands[index]; switch (recovery.technique()) { - case DisplacedInRegisterFile: - m_jit.loadPtr(AssemblyHelpers::addressFor(recovery.virtualRegister()), GPRInfo::regT0); - m_jit.storePtr(GPRInfo::regT0, scratchDataBuffer + scratchIndex++); + case DisplacedInJSStack: + m_jit.load64(AssemblyHelpers::addressFor(recovery.virtualRegister()), GPRInfo::regT0); + m_jit.store64(GPRInfo::regT0, scratchDataBuffer + scratchIndex++); break; - case Int32DisplacedInRegisterFile: { + case Int32DisplacedInJSStack: { m_jit.load32(AssemblyHelpers::addressFor(recovery.virtualRegister()), GPRInfo::regT0); - m_jit.orPtr(GPRInfo::tagTypeNumberRegister, GPRInfo::regT0); - m_jit.storePtr(GPRInfo::regT0, scratchDataBuffer + scratchIndex++); + m_jit.or64(GPRInfo::tagTypeNumberRegister, GPRInfo::regT0); + m_jit.store64(GPRInfo::regT0, scratchDataBuffer + scratchIndex++); break; } - case DoubleDisplacedInRegisterFile: { - m_jit.loadPtr(AssemblyHelpers::addressFor(recovery.virtualRegister()), GPRInfo::regT0); - m_jit.subPtr(GPRInfo::tagTypeNumberRegister, GPRInfo::regT0); - m_jit.storePtr(GPRInfo::regT0, scratchDataBuffer + scratchIndex++); + case DoubleDisplacedInJSStack: { + m_jit.load64(AssemblyHelpers::addressFor(recovery.virtualRegister()), GPRInfo::regT0); + m_jit.sub64(GPRInfo::tagTypeNumberRegister, GPRInfo::regT0); + m_jit.store64(GPRInfo::regT0, scratchDataBuffer + scratchIndex++); break; } @@ -446,14 +506,14 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, SpeculationRecovery* reco } scratchIndex = numberOfPoisonedVirtualRegisters; - for (int index = 0; index < exit.numberOfRecoveries(); ++index) { - const ValueRecovery& recovery = exit.valueRecovery(index); + for (size_t index = 0; index < operands.size(); ++index) { + const ValueRecovery& recovery = operands[index]; switch (recovery.technique()) { - case DisplacedInRegisterFile: - case Int32DisplacedInRegisterFile: - case DoubleDisplacedInRegisterFile: - m_jit.loadPtr(scratchDataBuffer + scratchIndex++, GPRInfo::regT0); - m_jit.storePtr(GPRInfo::regT0, AssemblyHelpers::addressFor((VirtualRegister)exit.operandForIndex(index))); + case DisplacedInJSStack: + case Int32DisplacedInJSStack: + case DoubleDisplacedInJSStack: + m_jit.load64(scratchDataBuffer + scratchIndex++, GPRInfo::regT0); + m_jit.store64(GPRInfo::regT0, AssemblyHelpers::addressFor((VirtualRegister)operands.operandForIndex(index))); break; default: @@ -468,18 +528,18 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, SpeculationRecovery* reco // 11) Dump all poisoned virtual registers. if (numberOfPoisonedVirtualRegisters) { - for (int virtualRegister = 0; virtualRegister < (int)exit.m_variables.size(); ++virtualRegister) { + for (int virtualRegister = 0; virtualRegister < (int)operands.numberOfLocals(); ++virtualRegister) { if (!poisonedVirtualRegisters[virtualRegister]) continue; - const ValueRecovery& recovery = exit.m_variables[virtualRegister]; + const ValueRecovery& recovery = operands.local(virtualRegister); switch (recovery.technique()) { case InGPR: case UnboxedInt32InGPR: case UInt32InGPR: case InFPR: - m_jit.loadPtr(scratchDataBuffer + poisonIndex(virtualRegister), GPRInfo::regT0); - m_jit.storePtr(GPRInfo::regT0, AssemblyHelpers::addressFor((VirtualRegister)virtualRegister)); + m_jit.load64(scratchDataBuffer + poisonIndex(virtualRegister), GPRInfo::regT0); + m_jit.store64(GPRInfo::regT0, AssemblyHelpers::addressFor((VirtualRegister)virtualRegister)); break; default: @@ -493,16 +553,16 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, SpeculationRecovery* reco if (haveConstants) { if (haveUndefined) - m_jit.move(AssemblyHelpers::TrustedImmPtr(JSValue::encode(jsUndefined())), GPRInfo::regT0); + m_jit.move(AssemblyHelpers::TrustedImm64(JSValue::encode(jsUndefined())), GPRInfo::regT0); - for (int index = 0; index < exit.numberOfRecoveries(); ++index) { - const ValueRecovery& recovery = exit.valueRecovery(index); + for (size_t index = 0; index < operands.size(); ++index) { + const ValueRecovery& recovery = operands[index]; if (recovery.technique() != Constant) continue; if (recovery.constant().isUndefined()) - m_jit.storePtr(GPRInfo::regT0, AssemblyHelpers::addressFor((VirtualRegister)exit.operandForIndex(index))); + m_jit.store64(GPRInfo::regT0, AssemblyHelpers::addressFor((VirtualRegister)operands.operandForIndex(index))); else - m_jit.storePtr(AssemblyHelpers::TrustedImmPtr(JSValue::encode(recovery.constant())), AssemblyHelpers::addressFor((VirtualRegister)exit.operandForIndex(index))); + m_jit.store64(AssemblyHelpers::TrustedImm64(JSValue::encode(recovery.constant())), AssemblyHelpers::addressFor((VirtualRegister)operands.operandForIndex(index))); } } @@ -544,15 +604,10 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, SpeculationRecovery* reco handleExitCounts(exit); - // 14) Load the result of the last bytecode operation into regT0. - - if (exit.m_lastSetOperand != std::numeric_limits::max()) - m_jit.loadPtr(AssemblyHelpers::addressFor((VirtualRegister)exit.m_lastSetOperand), GPRInfo::cachedResultRegister); - - // 15) Fix call frame(s). + // 14) Reify inlined call frames. ASSERT(m_jit.baselineCodeBlock()->getJITType() == JITCode::BaselineJIT); - m_jit.storePtr(AssemblyHelpers::TrustedImmPtr(m_jit.baselineCodeBlock()), AssemblyHelpers::addressFor((VirtualRegister)RegisterFile::CodeBlock)); + m_jit.storePtr(AssemblyHelpers::TrustedImmPtr(m_jit.baselineCodeBlock()), AssemblyHelpers::addressFor((VirtualRegister)JSStack::CodeBlock)); for (CodeOrigin codeOrigin = exit.m_codeOrigin; codeOrigin.inlineCallFrame; codeOrigin = codeOrigin.inlineCallFrame->caller) { InlineCallFrame* inlineCallFrame = codeOrigin.inlineCallFrame; @@ -560,7 +615,7 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, SpeculationRecovery* reco CodeBlock* baselineCodeBlockForCaller = m_jit.baselineCodeBlockFor(inlineCallFrame->caller); Vector& decodedCodeMap = m_jit.decodedCodeMapFor(baselineCodeBlockForCaller); unsigned returnBytecodeIndex = inlineCallFrame->caller.bytecodeIndex + OPCODE_LENGTH(op_call); - BytecodeAndMachineOffset* mapping = binarySearch(decodedCodeMap.begin(), decodedCodeMap.size(), returnBytecodeIndex); + BytecodeAndMachineOffset* mapping = binarySearch(decodedCodeMap, decodedCodeMap.size(), returnBytecodeIndex, BytecodeAndMachineOffset::getBytecodeIndex); ASSERT(mapping); ASSERT(mapping->m_bytecodeIndex == returnBytecodeIndex); @@ -574,23 +629,83 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, SpeculationRecovery* reco } else callerFrameGPR = GPRInfo::callFrameRegister; - m_jit.storePtr(AssemblyHelpers::TrustedImmPtr(baselineCodeBlock), AssemblyHelpers::addressFor((VirtualRegister)(inlineCallFrame->stackOffset + RegisterFile::CodeBlock))); - m_jit.storePtr(AssemblyHelpers::TrustedImmPtr(inlineCallFrame->callee->scope()), AssemblyHelpers::addressFor((VirtualRegister)(inlineCallFrame->stackOffset + RegisterFile::ScopeChain))); - m_jit.storePtr(callerFrameGPR, AssemblyHelpers::addressFor((VirtualRegister)(inlineCallFrame->stackOffset + RegisterFile::CallerFrame))); - m_jit.storePtr(AssemblyHelpers::TrustedImmPtr(jumpTarget), AssemblyHelpers::addressFor((VirtualRegister)(inlineCallFrame->stackOffset + RegisterFile::ReturnPC))); - m_jit.store32(AssemblyHelpers::TrustedImm32(inlineCallFrame->arguments.size()), AssemblyHelpers::payloadFor((VirtualRegister)(inlineCallFrame->stackOffset + RegisterFile::ArgumentCount))); - m_jit.storePtr(AssemblyHelpers::TrustedImmPtr(inlineCallFrame->callee.get()), AssemblyHelpers::addressFor((VirtualRegister)(inlineCallFrame->stackOffset + RegisterFile::Callee))); + m_jit.storePtr(AssemblyHelpers::TrustedImmPtr(baselineCodeBlock), AssemblyHelpers::addressFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::CodeBlock))); + if (!inlineCallFrame->isClosureCall()) + m_jit.store64(AssemblyHelpers::TrustedImm64(JSValue::encode(JSValue(inlineCallFrame->callee->scope()))), AssemblyHelpers::addressFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::ScopeChain))); + m_jit.store64(callerFrameGPR, AssemblyHelpers::addressFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::CallerFrame))); + m_jit.storePtr(AssemblyHelpers::TrustedImmPtr(jumpTarget), AssemblyHelpers::addressFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::ReturnPC))); + m_jit.store32(AssemblyHelpers::TrustedImm32(inlineCallFrame->arguments.size()), AssemblyHelpers::payloadFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::ArgumentCount))); + if (!inlineCallFrame->isClosureCall()) + m_jit.store64(AssemblyHelpers::TrustedImm64(JSValue::encode(JSValue(inlineCallFrame->callee.get()))), AssemblyHelpers::addressFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::Callee))); } + // 15) Create arguments if necessary and place them into the appropriate aliased + // registers. + + if (haveArguments) { + HashSet::Hash, + NullableHashTraits > didCreateArgumentsObject; + + for (size_t index = 0; index < operands.size(); ++index) { + const ValueRecovery& recovery = operands[index]; + if (recovery.technique() != ArgumentsThatWereNotCreated) + continue; + int operand = operands.operandForIndex(index); + // Find the right inline call frame. + InlineCallFrame* inlineCallFrame = 0; + for (InlineCallFrame* current = exit.m_codeOrigin.inlineCallFrame; + current; + current = current->caller.inlineCallFrame) { + if (current->stackOffset <= operand) { + inlineCallFrame = current; + break; + } + } + + if (!m_jit.baselineCodeBlockFor(inlineCallFrame)->usesArguments()) + continue; + int argumentsRegister = m_jit.argumentsRegisterFor(inlineCallFrame); + if (didCreateArgumentsObject.add(inlineCallFrame).isNewEntry) { + // We know this call frame optimized out an arguments object that + // the baseline JIT would have created. Do that creation now. + if (inlineCallFrame) { + m_jit.addPtr(AssemblyHelpers::TrustedImm32(inlineCallFrame->stackOffset * sizeof(EncodedJSValue)), GPRInfo::callFrameRegister, GPRInfo::regT0); + m_jit.setupArguments(GPRInfo::regT0); + } else + m_jit.setupArgumentsExecState(); + m_jit.move( + AssemblyHelpers::TrustedImmPtr( + bitwise_cast(operationCreateArguments)), + GPRInfo::nonArgGPR0); + m_jit.call(GPRInfo::nonArgGPR0); + m_jit.store64(GPRInfo::returnValueGPR, AssemblyHelpers::addressFor(argumentsRegister)); + m_jit.store64( + GPRInfo::returnValueGPR, + AssemblyHelpers::addressFor(unmodifiedArgumentsRegister(argumentsRegister))); + m_jit.move(GPRInfo::returnValueGPR, GPRInfo::regT0); // no-op move on almost all platforms. + } + + m_jit.load64(AssemblyHelpers::addressFor(argumentsRegister), GPRInfo::regT0); + m_jit.store64(GPRInfo::regT0, AssemblyHelpers::addressFor(operand)); + } + } + + // 16) Load the result of the last bytecode operation into regT0. + + if (exit.m_lastSetOperand != std::numeric_limits::max()) + m_jit.load64(AssemblyHelpers::addressFor((VirtualRegister)exit.m_lastSetOperand), GPRInfo::cachedResultRegister); + + // 17) Adjust the call frame pointer. + if (exit.m_codeOrigin.inlineCallFrame) m_jit.addPtr(AssemblyHelpers::TrustedImm32(exit.m_codeOrigin.inlineCallFrame->stackOffset * sizeof(EncodedJSValue)), GPRInfo::callFrameRegister); - // 16) Jump into the corresponding baseline JIT code. + // 18) Jump into the corresponding baseline JIT code. CodeBlock* baselineCodeBlock = m_jit.baselineCodeBlockFor(exit.m_codeOrigin); Vector& decodedCodeMap = m_jit.decodedCodeMapFor(baselineCodeBlock); - BytecodeAndMachineOffset* mapping = binarySearch(decodedCodeMap.begin(), decodedCodeMap.size(), exit.m_codeOrigin.bytecodeIndex); + BytecodeAndMachineOffset* mapping = binarySearch(decodedCodeMap, decodedCodeMap.size(), exit.m_codeOrigin.bytecodeIndex, BytecodeAndMachineOffset::getBytecodeIndex); ASSERT(mapping); ASSERT(mapping->m_bytecodeIndex == exit.m_codeOrigin.bytecodeIndex); @@ -604,7 +719,7 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, SpeculationRecovery* reco m_jit.jump(GPRInfo::regT1); #if DFG_ENABLE(DEBUG_VERBOSE) - dataLog("-> %p\n", jumpTarget); + dataLogF("-> %p\n", jumpTarget); #endif }