/*
- * Copyright (C) 2011, 2013, 2014 Apple Inc. All rights reserved.
+ * Copyright (C) 2011, 2013-2015 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
, m_blockHeads(dfg.numBlocks())
{
if (shouldShowDisassembly() || m_graph.m_vm.m_perBytecodeProfiler)
- m_disassembler = adoptPtr(new Disassembler(dfg));
+ m_disassembler = std::make_unique<Disassembler>(dfg);
}
JITCompiler::~JITCompiler()
void JITCompiler::compileExceptionHandlers()
{
- if (m_exceptionChecks.empty() && m_exceptionChecksWithCallFrameRollback.empty())
- return;
-
- Jump doLookup;
-
if (!m_exceptionChecksWithCallFrameRollback.empty()) {
m_exceptionChecksWithCallFrameRollback.link(this);
- emitGetCallerFrameFromCallFrameHeaderPtr(GPRInfo::argumentGPR1);
- doLookup = jump();
- }
- if (!m_exceptionChecks.empty())
- m_exceptionChecks.link(this);
+ // lookupExceptionHandlerFromCallerFrame is passed two arguments, the VM and the exec (the CallFrame*).
+ move(TrustedImmPtr(vm()), GPRInfo::argumentGPR0);
+ move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1);
+ addPtr(TrustedImm32(m_graph.stackPointerOffset() * sizeof(Register)), GPRInfo::callFrameRegister, stackPointerRegister);
+
+#if CPU(X86)
+ // FIXME: should use the call abstraction, but this is currently in the SpeculativeJIT layer!
+ poke(GPRInfo::argumentGPR0);
+ poke(GPRInfo::argumentGPR1, 1);
+#endif
+ m_calls.append(CallLinkRecord(call(), lookupExceptionHandlerFromCallerFrame));
- // lookupExceptionHandler is passed two arguments, the VM and the exec (the CallFrame*).
- move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1);
+ jumpToExceptionHandler();
+ }
- if (doLookup.isSet())
- doLookup.link(this);
+ if (!m_exceptionChecks.empty()) {
+ m_exceptionChecks.link(this);
- move(TrustedImmPtr(vm()), GPRInfo::argumentGPR0);
+ // lookupExceptionHandler is passed two arguments, the VM and the exec (the CallFrame*).
+ move(TrustedImmPtr(vm()), GPRInfo::argumentGPR0);
+ move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1);
#if CPU(X86)
- // FIXME: should use the call abstraction, but this is currently in the SpeculativeJIT layer!
- poke(GPRInfo::argumentGPR0);
- poke(GPRInfo::argumentGPR1, 1);
+ // FIXME: should use the call abstraction, but this is currently in the SpeculativeJIT layer!
+ poke(GPRInfo::argumentGPR0);
+ poke(GPRInfo::argumentGPR1, 1);
#endif
- m_calls.append(CallLinkRecord(call(), lookupExceptionHandler));
- jumpToExceptionHandler();
+ m_calls.append(CallLinkRecord(call(), lookupExceptionHandler));
+
+ jumpToExceptionHandler();
+ }
}
void JITCompiler::link(LinkBuffer& linkBuffer)
if (!m_graph.m_plan.inlineCallFrames->isEmpty())
m_jitCode->common.inlineCallFrames = m_graph.m_plan.inlineCallFrames;
- m_jitCode->common.machineCaptureStart = m_graph.m_machineCaptureStart;
- m_jitCode->common.slowArguments = WTF::move(m_graph.m_slowArguments);
-
#if USE(JSVALUE32_64)
m_jitCode->common.doubleConstants = WTF::move(m_graph.m_doubleConstants);
#endif
+
+ m_graph.registerFrozenValues();
BitVector usedJumpTables;
for (Bag<SwitchData>::iterator iter = m_graph.m_switchData.begin(); !!iter; ++iter) {
table.ctiOffsets[j] = table.ctiDefault;
for (unsigned j = data.cases.size(); j--;) {
SwitchCase& myCase = data.cases[j];
- table.ctiOffsets[myCase.value.switchLookupValue() - table.min] =
+ table.ctiOffsets[myCase.value.switchLookupValue(data.kind) - table.min] =
linkBuffer.locationOf(m_blockHeads[myCase.target.block->index]);
}
}
JSCallRecord& record = m_jsCalls[i];
CallLinkInfo& info = *record.m_info;
ThunkGenerator generator = linkThunkGeneratorFor(
- info.callType == CallLinkInfo::Construct ? CodeForConstruct : CodeForCall,
+ info.specializationKind(),
RegisterPreservationNotRequired);
linkBuffer.link(record.m_slowCall, FunctionPtr(m_vm->getCTIStub(generator).code().executableAddress()));
- info.callReturnLocation = linkBuffer.locationOfNearCall(record.m_slowCall);
- info.hotPathBegin = linkBuffer.locationOf(record.m_targetToCheck);
- info.hotPathOther = linkBuffer.locationOfNearCall(record.m_fastCall);
+ info.setCallLocations(linkBuffer.locationOfNearCall(record.m_slowCall),
+ linkBuffer.locationOf(record.m_targetToCheck),
+ linkBuffer.locationOfNearCall(record.m_fastCall));
}
MacroAssemblerCodeRef osrExitThunk = vm()->getCTIStub(osrExitGenerationThunkGenerator);
setStartOfCode();
compileEntry();
- m_speculative = adoptPtr(new SpeculativeJIT(*this));
+ m_speculative = std::make_unique<SpeculativeJIT>(*this);
+
+ // Plant a check that sufficient space is available in the JSStack.
+ addPtr(TrustedImm32(virtualRegisterForLocal(m_graph.requiredRegisterCountForExecutionAndExit() - 1).offset() * sizeof(Register)), GPRInfo::callFrameRegister, GPRInfo::regT1);
+ Jump stackOverflow = branchPtr(Above, AbsoluteAddress(m_vm->addressOfStackLimit()), GPRInfo::regT1);
+
addPtr(TrustedImm32(m_graph.stackPointerOffset() * sizeof(Register)), GPRInfo::callFrameRegister, stackPointerRegister);
checkStackPointerAlignment();
compileBody();
setEndOfMainPath();
+ // === Footer code generation ===
+ //
+ // Generate the stack overflow handling; if the stack check in the entry head fails,
+ // we need to call out to a helper function to throw the StackOverflowError.
+ stackOverflow.link(this);
+
+ emitStoreCodeOrigin(CodeOrigin(0));
+
+ if (maxFrameExtentForSlowPathCall)
+ addPtr(TrustedImm32(-maxFrameExtentForSlowPathCall), stackPointerRegister);
+
+ m_speculative->callOperationWithCallFrameRollbackOnException(operationThrowStackOverflowError, m_codeBlock);
+
// Generate slow path code.
m_speculative->runSlowPathGenerators();
// Create OSR entry trampolines if necessary.
m_speculative->createOSREntries();
setEndOfCode();
-}
-void JITCompiler::link()
-{
- OwnPtr<LinkBuffer> linkBuffer = adoptPtr(new LinkBuffer(*m_vm, *this, m_codeBlock, JITCompilationCanFail));
+ auto linkBuffer = std::make_unique<LinkBuffer>(*m_vm, *this, m_codeBlock, JITCompilationCanFail);
if (linkBuffer->didFailToAllocate()) {
- m_graph.m_plan.finalizer = adoptPtr(new FailedFinalizer(m_graph.m_plan));
+ m_graph.m_plan.finalizer = std::make_unique<FailedFinalizer>(m_graph.m_plan);
return;
}
disassemble(*linkBuffer);
- m_graph.m_plan.finalizer = adoptPtr(new JITFinalizer(
- m_graph.m_plan, m_jitCode.release(), linkBuffer.release()));
+ m_graph.m_plan.finalizer = std::make_unique<JITFinalizer>(
+ m_graph.m_plan, m_jitCode.release(), WTF::move(linkBuffer));
}
void JITCompiler::compileFunction()
checkStackPointerAlignment();
// === Function body code generation ===
- m_speculative = adoptPtr(new SpeculativeJIT(*this));
+ m_speculative = std::make_unique<SpeculativeJIT>(*this);
compileBody();
setEndOfMainPath();
#else
thunkReg = GPRInfo::regT5;
#endif
- move(TrustedImmPtr(m_vm->arityCheckFailReturnThunks->returnPCsFor(*m_vm, m_codeBlock->numParameters())), thunkReg);
+ CodeLocationLabel* arityThunkLabels =
+ m_vm->arityCheckFailReturnThunks->returnPCsFor(*m_vm, m_codeBlock->numParameters());
+ move(TrustedImmPtr(arityThunkLabels), thunkReg);
loadPtr(BaseIndex(thunkReg, GPRInfo::regT0, timesPtr()), thunkReg);
m_callArityFixup = call();
jump(fromArityCheck);
// Create OSR entry trampolines if necessary.
m_speculative->createOSREntries();
setEndOfCode();
-}
-void JITCompiler::linkFunction()
-{
// === Link ===
- OwnPtr<LinkBuffer> linkBuffer = adoptPtr(new LinkBuffer(*m_vm, *this, m_codeBlock, JITCompilationCanFail));
+ auto linkBuffer = std::make_unique<LinkBuffer>(*m_vm, *this, m_codeBlock, JITCompilationCanFail);
if (linkBuffer->didFailToAllocate()) {
- m_graph.m_plan.finalizer = adoptPtr(new FailedFinalizer(m_graph.m_plan));
+ m_graph.m_plan.finalizer = std::make_unique<FailedFinalizer>(m_graph.m_plan);
return;
}
link(*linkBuffer);
m_jitCode->shrinkToFit();
codeBlock()->shrinkToFit(CodeBlock::LateShrink);
- linkBuffer->link(m_callArityFixup, FunctionPtr((m_vm->getCTIStub(arityFixup)).code().executableAddress()));
+ linkBuffer->link(m_callArityFixup, FunctionPtr((m_vm->getCTIStub(arityFixupGenerator)).code().executableAddress()));
disassemble(*linkBuffer);
MacroAssemblerCodePtr withArityCheck = linkBuffer->locationOf(m_arityCheck);
- m_graph.m_plan.finalizer = adoptPtr(new JITFinalizer(
- m_graph.m_plan, m_jitCode.release(), linkBuffer.release(), withArityCheck));
+ m_graph.m_plan.finalizer = std::make_unique<JITFinalizer>(
+ m_graph.m_plan, m_jitCode.release(), WTF::move(linkBuffer), withArityCheck);
}
void JITCompiler::disassemble(LinkBuffer& linkBuffer)
{
- if (shouldShowDisassembly())
+ if (shouldShowDisassembly()) {
m_disassembler->dump(linkBuffer);
+ linkBuffer.didAlreadyDisassemble();
+ }
if (m_graph.m_plan.compilation)
m_disassembler->reportToProfiler(m_graph.m_plan.compilation.get(), linkBuffer);
#if USE(JSVALUE32_64)
void* JITCompiler::addressOfDoubleConstant(Node* node)
{
- ASSERT(m_graph.isNumberConstant(node));
- JSValue jsvalue = node->valueOfJSConstant(codeBlock());
- ASSERT(jsvalue.isDouble());
-
- double value = jsvalue.asDouble();
+ double value = node->asNumber();
int64_t valueBits = bitwise_cast<int64_t>(value);
auto it = m_graph.m_doubleConstantsMap.find(valueBits);
if (it != m_graph.m_doubleConstantsMap.end())
}
#endif
+void JITCompiler::noticeOSREntry(BasicBlock& basicBlock, JITCompiler::Label blockHead, LinkBuffer& linkBuffer)
+{
+ // OSR entry is not allowed into blocks deemed unreachable by control flow analysis.
+ if (!basicBlock.intersectionOfCFAHasVisited)
+ return;
+
+ OSREntryData* entry = m_jitCode->appendOSREntryData(basicBlock.bytecodeBegin, linkBuffer.offsetOf(blockHead));
+
+ entry->m_expectedValues = basicBlock.intersectionOfPastValuesAtHead;
+
+ // Fix the expected values: in our protocol, a dead variable will have an expected
+ // value of (None, []). But the old JIT may stash some values there. So we really
+ // need (Top, TOP).
+ for (size_t argument = 0; argument < basicBlock.variablesAtHead.numberOfArguments(); ++argument) {
+ Node* node = basicBlock.variablesAtHead.argument(argument);
+ if (!node || !node->shouldGenerate())
+ entry->m_expectedValues.argument(argument).makeHeapTop();
+ }
+ for (size_t local = 0; local < basicBlock.variablesAtHead.numberOfLocals(); ++local) {
+ Node* node = basicBlock.variablesAtHead.local(local);
+ if (!node || !node->shouldGenerate())
+ entry->m_expectedValues.local(local).makeHeapTop();
+ else {
+ VariableAccessData* variable = node->variableAccessData();
+ entry->m_machineStackUsed.set(variable->machineLocal().toLocal());
+
+ switch (variable->flushFormat()) {
+ case FlushedDouble:
+ entry->m_localsForcedDouble.set(local);
+ break;
+ case FlushedInt52:
+ entry->m_localsForcedMachineInt.set(local);
+ break;
+ default:
+ break;
+ }
+
+ if (variable->local() != variable->machineLocal()) {
+ entry->m_reshufflings.append(
+ OSREntryReshuffling(
+ variable->local().offset(), variable->machineLocal().offset()));
+ }
+ }
+ }
+
+ entry->m_reshufflings.shrinkToFit();
+}
+
} } // namespace JSC::DFG
#endif // ENABLE(DFG_JIT)