/*
- * Copyright (C) 2011 Apple Inc. All rights reserved.
+ * Copyright (C) 2011-2013, 2015 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
#if ENABLE(DFG_JIT)
#include "CallFrame.h"
+#include "DFGCommon.h"
+#include "DFGJITCode.h"
+#include "DFGOSRExitPreparation.h"
#include "LinkBuffer.h"
+#include "OperandsInlines.h"
+#include "JSCInlines.h"
#include "RepatchBuffer.h"
+#include <wtf/StringPrintStream.h>
namespace JSC { namespace DFG {
+void OSRExitCompiler::emitRestoreArguments(const Operands<ValueRecovery>& operands)
+{
+ HashMap<MinifiedID, int> alreadyAllocatedArguments; // Maps phantom arguments node ID to operand.
+ for (size_t index = 0; index < operands.size(); ++index) {
+ const ValueRecovery& recovery = operands[index];
+ int operand = operands.operandForIndex(index);
+
+ if (recovery.technique() != DirectArgumentsThatWereNotCreated
+ && recovery.technique() != ClonedArgumentsThatWereNotCreated)
+ continue;
+
+ MinifiedID id = recovery.nodeID();
+ auto iter = alreadyAllocatedArguments.find(id);
+ if (iter != alreadyAllocatedArguments.end()) {
+ JSValueRegs regs = JSValueRegs::withTwoAvailableRegs(GPRInfo::regT0, GPRInfo::regT1);
+ m_jit.loadValue(CCallHelpers::addressFor(iter->value), regs);
+ m_jit.storeValue(regs, CCallHelpers::addressFor(operand));
+ continue;
+ }
+
+ InlineCallFrame* inlineCallFrame =
+ m_jit.codeBlock()->jitCode()->dfg()->minifiedDFG.at(id)->inlineCallFrame();
+
+ int stackOffset;
+ if (inlineCallFrame)
+ stackOffset = inlineCallFrame->stackOffset;
+ else
+ stackOffset = 0;
+
+ if (!inlineCallFrame || inlineCallFrame->isClosureCall) {
+ m_jit.loadPtr(
+ AssemblyHelpers::addressFor(stackOffset + JSStack::Callee),
+ GPRInfo::regT0);
+ } else {
+ m_jit.move(
+ AssemblyHelpers::TrustedImmPtr(inlineCallFrame->calleeRecovery.constant().asCell()),
+ GPRInfo::regT0);
+ }
+
+ if (!inlineCallFrame || inlineCallFrame->isVarargs()) {
+ m_jit.load32(
+ AssemblyHelpers::payloadFor(stackOffset + JSStack::ArgumentCount),
+ GPRInfo::regT1);
+ } else {
+ m_jit.move(
+ AssemblyHelpers::TrustedImm32(inlineCallFrame->arguments.size()),
+ GPRInfo::regT1);
+ }
+
+ m_jit.setupArgumentsWithExecState(
+ AssemblyHelpers::TrustedImmPtr(inlineCallFrame), GPRInfo::regT0, GPRInfo::regT1);
+ switch (recovery.technique()) {
+ case DirectArgumentsThatWereNotCreated:
+ m_jit.move(AssemblyHelpers::TrustedImmPtr(bitwise_cast<void*>(operationCreateDirectArgumentsDuringExit)), GPRInfo::nonArgGPR0);
+ break;
+ case ClonedArgumentsThatWereNotCreated:
+ m_jit.move(AssemblyHelpers::TrustedImmPtr(bitwise_cast<void*>(operationCreateClonedArgumentsDuringExit)), GPRInfo::nonArgGPR0);
+ break;
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ break;
+ }
+ m_jit.call(GPRInfo::nonArgGPR0);
+ m_jit.storeCell(GPRInfo::returnValueGPR, AssemblyHelpers::addressFor(operand));
+
+ alreadyAllocatedArguments.add(id, operand);
+ }
+}
+
extern "C" {
void compileOSRExit(ExecState* exec)
{
+ SamplingRegion samplingRegion("DFG OSR Exit Compilation");
+
CodeBlock* codeBlock = exec->codeBlock();
ASSERT(codeBlock);
- ASSERT(codeBlock->getJITType() == JITCode::DFGJIT);
+ ASSERT(codeBlock->jitType() == JITCode::DFGJIT);
- JSGlobalData* globalData = &exec->globalData();
+ VM* vm = &exec->vm();
- uint32_t exitIndex = globalData->osrExitIndex;
- OSRExit& exit = codeBlock->osrExit(exitIndex);
+ // It's sort of preferable that we don't GC while in here. Anyways, doing so wouldn't
+ // really be profitable.
+ DeferGCForAWhile deferGC(vm->heap);
+
+ uint32_t exitIndex = vm->osrExitIndex;
+ OSRExit& exit = codeBlock->jitCode()->dfg()->osrExit[exitIndex];
- // Make sure all code on our inline stack is JIT compiled. This is necessary since
- // we may opt to inline a code block even before it had ever been compiled by the
- // JIT, but our OSR exit infrastructure currently only works if the target of the
- // OSR exit is JIT code. This could be changed since there is nothing particularly
- // hard about doing an OSR exit into the interpreter, but for now this seems to make
- // sense in that if we're OSR exiting from inlined code of a DFG code block, then
- // probably it's a good sign that the thing we're exiting into is hot. Even more
- // interestingly, since the code was inlined, it may never otherwise get JIT
- // compiled since the act of inlining it may ensure that it otherwise never runs.
- for (CodeOrigin codeOrigin = exit.m_codeOrigin; codeOrigin.inlineCallFrame; codeOrigin = codeOrigin.inlineCallFrame->caller) {
- static_cast<FunctionExecutable*>(codeOrigin.inlineCallFrame->executable.get())
- ->baselineCodeBlockFor(codeOrigin.inlineCallFrame->isCall ? CodeForCall : CodeForConstruct)
- ->jitCompile(*globalData);
- }
+ prepareCodeOriginForOSRExit(exec, exit.m_codeOrigin);
+
+ // Compute the value recoveries.
+ Operands<ValueRecovery> operands;
+ codeBlock->jitCode()->dfg()->variableEventStream.reconstruct(codeBlock, exit.m_codeOrigin, codeBlock->jitCode()->dfg()->minifiedDFG, exit.m_streamIndex, operands);
SpeculationRecovery* recovery = 0;
- if (exit.m_recoveryIndex)
- recovery = &codeBlock->speculationRecovery(exit.m_recoveryIndex - 1);
-
-#if DFG_ENABLE(DEBUG_VERBOSE)
- dataLog("Generating OSR exit #%u (bc#%u, @%u, %s) for code block %p.\n", exitIndex, exit.m_codeOrigin.bytecodeIndex, exit.m_nodeIndex, exitKindToString(exit.m_kind), codeBlock);
-#endif
+ if (exit.m_recoveryIndex != UINT_MAX)
+ recovery = &codeBlock->jitCode()->dfg()->speculationRecovery[exit.m_recoveryIndex];
{
- AssemblyHelpers jit(globalData, codeBlock);
+ CCallHelpers jit(vm, codeBlock);
OSRExitCompiler exitCompiler(jit);
jit.jitAssertHasValidCallFrame();
- exitCompiler.compileExit(exit, recovery);
- LinkBuffer patchBuffer(*globalData, &jit, codeBlock);
- exit.m_code = patchBuffer.finalizeCode();
-
-#if DFG_ENABLE(DEBUG_VERBOSE)
- dataLog("OSR exit code at [%p, %p).\n", patchBuffer.debugAddress(), static_cast<char*>(patchBuffer.debugAddress()) + patchBuffer.debugSize());
-#endif
+ if (vm->m_perBytecodeProfiler && codeBlock->jitCode()->dfgCommon()->compilation) {
+ Profiler::Database& database = *vm->m_perBytecodeProfiler;
+ Profiler::Compilation* compilation = codeBlock->jitCode()->dfgCommon()->compilation.get();
+
+ Profiler::OSRExit* profilerExit = compilation->addOSRExit(
+ exitIndex, Profiler::OriginStack(database, codeBlock, exit.m_codeOrigin),
+ exit.m_kind, exit.m_kind == UncountableInvalidation);
+ jit.add64(CCallHelpers::TrustedImm32(1), CCallHelpers::AbsoluteAddress(profilerExit->counterAddress()));
+ }
+
+ exitCompiler.compileExit(exit, operands, recovery);
+
+ LinkBuffer patchBuffer(*vm, jit, codeBlock);
+ exit.m_code = FINALIZE_CODE_IF(
+ shouldShowDisassembly() || Options::verboseOSR(),
+ patchBuffer,
+ ("DFG OSR exit #%u (%s, %s) from %s, with operands = %s",
+ exitIndex, toCString(exit.m_codeOrigin).data(),
+ exitKindToString(exit.m_kind), toCString(*codeBlock).data(),
+ toCString(ignoringContext<DumpContext>(operands)).data()));
}
{
RepatchBuffer repatchBuffer(codeBlock);
- repatchBuffer.relink(exit.m_check.codeLocationForRepatch(codeBlock), CodeLocationLabel(exit.m_code.code()));
+ repatchBuffer.relink(exit.codeLocationForRepatch(codeBlock), CodeLocationLabel(exit.m_code.code()));
}
- globalData->osrExitJumpDestination = exit.m_code.code().executableAddress();
+ vm->osrExitJumpDestination = exit.m_code.code().executableAddress();
}
} // extern "C"
-void OSRExitCompiler::handleExitCounts(const OSRExit& exit)
-{
- m_jit.add32(AssemblyHelpers::TrustedImm32(1), AssemblyHelpers::AbsoluteAddress(&exit.m_count));
-
- m_jit.move(AssemblyHelpers::TrustedImmPtr(m_jit.codeBlock()), GPRInfo::regT0);
-
- AssemblyHelpers::JumpList tooFewFails;
-
- if (exit.m_kind == InadequateCoverage) {
- // Proceed based on the assumption that we can profitably optimize this code once
- // it has executed enough times.
-
- m_jit.load32(AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfForcedOSRExitCounter()), GPRInfo::regT2);
- m_jit.load32(AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfSpeculativeSuccessCounter()), GPRInfo::regT1);
- m_jit.add32(AssemblyHelpers::TrustedImm32(1), GPRInfo::regT2);
- m_jit.add32(AssemblyHelpers::TrustedImm32(-1), GPRInfo::regT1);
- m_jit.store32(GPRInfo::regT2, AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfForcedOSRExitCounter()));
- m_jit.store32(GPRInfo::regT1, AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfSpeculativeSuccessCounter()));
-
- tooFewFails.append(m_jit.branch32(AssemblyHelpers::BelowOrEqual, GPRInfo::regT2, AssemblyHelpers::TrustedImm32(Options::forcedOSRExitCountForReoptimization)));
- } else {
- // Proceed based on the assumption that we can handle these exits so long as they
- // don't get too frequent.
-
- m_jit.load32(AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfSpeculativeFailCounter()), GPRInfo::regT2);
- m_jit.load32(AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfSpeculativeSuccessCounter()), GPRInfo::regT1);
- m_jit.add32(AssemblyHelpers::TrustedImm32(1), GPRInfo::regT2);
- m_jit.add32(AssemblyHelpers::TrustedImm32(-1), GPRInfo::regT1);
- m_jit.store32(GPRInfo::regT2, AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfSpeculativeFailCounter()));
- m_jit.store32(GPRInfo::regT1, AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfSpeculativeSuccessCounter()));
-
- m_jit.move(AssemblyHelpers::TrustedImmPtr(m_jit.baselineCodeBlock()), GPRInfo::regT0);
-
- tooFewFails.append(m_jit.branch32(AssemblyHelpers::BelowOrEqual, GPRInfo::regT2, AssemblyHelpers::TrustedImm32(m_jit.codeBlock()->largeFailCountThreshold())));
- m_jit.mul32(AssemblyHelpers::TrustedImm32(Options::desiredSpeculativeSuccessFailRatio), GPRInfo::regT2, GPRInfo::regT2);
-
- tooFewFails.append(m_jit.branch32(AssemblyHelpers::BelowOrEqual, GPRInfo::regT2, GPRInfo::regT1));
- }
-
- // Reoptimize as soon as possible.
- m_jit.store32(AssemblyHelpers::TrustedImm32(0), AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecuteCounter()));
- m_jit.store32(AssemblyHelpers::TrustedImm32(0), AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecutionActiveThreshold()));
- AssemblyHelpers::Jump doneAdjusting = m_jit.jump();
-
- tooFewFails.link(&m_jit);
-
- // Adjust the execution counter such that the target is to only optimize after a while.
- int32_t targetValue =
- ExecutionCounter::applyMemoryUsageHeuristicsAndConvertToInt(
- m_jit.baselineCodeBlock()->counterValueForOptimizeAfterLongWarmUp(),
- m_jit.baselineCodeBlock());
- m_jit.store32(AssemblyHelpers::TrustedImm32(-targetValue), AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecuteCounter()));
- m_jit.store32(AssemblyHelpers::TrustedImm32(targetValue), AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecutionActiveThreshold()));
- m_jit.store32(AssemblyHelpers::TrustedImm32(ExecutionCounter::formattedTotalCount(targetValue)), AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecutionTotalCount()));
-
- doneAdjusting.link(&m_jit);
-}
-
} } // namespace JSC::DFG
#endif // ENABLE(DFG_JIT)