/*
- * Copyright (C) 2011 Apple Inc. All rights reserved.
+ * Copyright (C) 2011, 2013-2015 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
#if ENABLE(DFG_JIT)
+#include "ArityCheckFailReturnThunks.h"
#include "CodeBlock.h"
+#include "DFGFailedFinalizer.h"
+#include "DFGInlineCacheWrapperInlines.h"
+#include "DFGJITCode.h"
+#include "DFGJITFinalizer.h"
#include "DFGOSRExitCompiler.h"
#include "DFGOperations.h"
#include "DFGRegisterBank.h"
+#include "DFGSlowPathGenerator.h"
#include "DFGSpeculativeJIT.h"
#include "DFGThunks.h"
-#include "JSGlobalData.h"
+#include "JSCJSValueInlines.h"
#include "LinkBuffer.h"
+#include "MaxFrameExtentForSlowPathCall.h"
+#include "JSCInlines.h"
+#include "VM.h"
namespace JSC { namespace DFG {
+JITCompiler::JITCompiler(Graph& dfg)
+ : CCallHelpers(&dfg.m_vm, dfg.m_codeBlock)
+ , m_graph(dfg)
+ , m_jitCode(adoptRef(new JITCode()))
+ , m_blockHeads(dfg.numBlocks())
+{
+ if (shouldShowDisassembly() || m_graph.m_vm.m_perBytecodeProfiler)
+ m_disassembler = std::make_unique<Disassembler>(dfg);
+}
+
+JITCompiler::~JITCompiler()
+{
+}
+
void JITCompiler::linkOSRExits()
{
- for (unsigned i = 0; i < codeBlock()->numberOfOSRExits(); ++i) {
- OSRExit& exit = codeBlock()->osrExit(i);
- exit.m_check.initialJump().link(this);
+ ASSERT(m_jitCode->osrExit.size() == m_exitCompilationInfo.size());
+ if (m_graph.compilation()) {
+ for (unsigned i = 0; i < m_jitCode->osrExit.size(); ++i) {
+ OSRExitCompilationInfo& info = m_exitCompilationInfo[i];
+ Vector<Label> labels;
+ if (!info.m_failureJumps.empty()) {
+ for (unsigned j = 0; j < info.m_failureJumps.jumps().size(); ++j)
+ labels.append(info.m_failureJumps.jumps()[j].label());
+ } else
+ labels.append(info.m_replacementSource);
+ m_exitSiteLabels.append(labels);
+ }
+ }
+
+ for (unsigned i = 0; i < m_jitCode->osrExit.size(); ++i) {
+ OSRExit& exit = m_jitCode->osrExit[i];
+ OSRExitCompilationInfo& info = m_exitCompilationInfo[i];
+ JumpList& failureJumps = info.m_failureJumps;
+ if (!failureJumps.empty())
+ failureJumps.link(this);
+ else
+ info.m_replacementDestination = label();
jitAssertHasValidCallFrame();
- store32(TrustedImm32(i), &globalData()->osrExitIndex);
- exit.m_check.switchToLateJump(patchableJump());
+ store32(TrustedImm32(i), &vm()->osrExitIndex);
+ exit.setPatchableCodeOffset(patchableJump());
}
}
void JITCompiler::compileEntry()
{
// This code currently matches the old JIT. In the function header we need to
- // pop the return address (since we do not allow any recursion on the machine
- // stack), and perform a fast register file check.
+ // save return address and call frame via the prologue and perform a fast stack check.
// FIXME: https://bugs.webkit.org/show_bug.cgi?id=56292
- // We'll need to convert the remaining cti_ style calls (specifically the register file
+ // We'll need to convert the remaining cti_ style calls (specifically the stack
// check) which will be dependent on stack layout. (We'd need to account for this in
// both normal return code and when jumping to an exception handler).
- preserveReturnAddressAfterCall(GPRInfo::regT2);
- emitPutToCallFrameHeader(GPRInfo::regT2, RegisterFile::ReturnPC);
- emitPutImmediateToCallFrameHeader(m_codeBlock, RegisterFile::CodeBlock);
+ emitFunctionPrologue();
+ emitPutImmediateToCallFrameHeader(m_codeBlock, JSStack::CodeBlock);
+ jitAssertTagsInPlace();
}
-void JITCompiler::compileBody(SpeculativeJIT& speculative)
+void JITCompiler::compileBody()
{
// We generate the speculative code path, followed by OSR exit code to return
// to the old JIT code if speculations fail.
-#if DFG_ENABLE(JIT_BREAK_ON_EVERY_FUNCTION)
- // Handy debug tool!
- breakpoint();
-#endif
-
- addPtr(TrustedImm32(1), AbsoluteAddress(codeBlock()->addressOfSpeculativeSuccessCounter()));
-
- bool compiledSpeculative = speculative.compile();
+ bool compiledSpeculative = m_speculative->compile();
ASSERT_UNUSED(compiledSpeculative, compiledSpeculative);
+}
- linkOSRExits();
+void JITCompiler::compileExceptionHandlers()
+{
+ if (!m_exceptionChecksWithCallFrameRollback.empty()) {
+ m_exceptionChecksWithCallFrameRollback.link(this);
- // Iterate over the m_calls vector, checking for jumps to link.
- bool didLinkExceptionCheck = false;
- for (unsigned i = 0; i < m_exceptionChecks.size(); ++i) {
- Jump& exceptionCheck = m_exceptionChecks[i].m_exceptionCheck;
- if (exceptionCheck.isSet()) {
- exceptionCheck.link(this);
- didLinkExceptionCheck = true;
- }
+ // lookupExceptionHandlerFromCallerFrame is passed two arguments, the VM and the exec (the CallFrame*).
+ move(TrustedImmPtr(vm()), GPRInfo::argumentGPR0);
+ move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1);
+ addPtr(TrustedImm32(m_graph.stackPointerOffset() * sizeof(Register)), GPRInfo::callFrameRegister, stackPointerRegister);
+
+#if CPU(X86)
+ // FIXME: should use the call abstraction, but this is currently in the SpeculativeJIT layer!
+ poke(GPRInfo::argumentGPR0);
+ poke(GPRInfo::argumentGPR1, 1);
+#endif
+ m_calls.append(CallLinkRecord(call(), lookupExceptionHandlerFromCallerFrame));
+
+ jumpToExceptionHandler();
}
- // If any exception checks were linked, generate code to lookup a handler.
- if (didLinkExceptionCheck) {
- // lookupExceptionHandler is passed two arguments, exec (the CallFrame*), and
- // the index into the CodeBlock's callReturnIndexVector corresponding to the
- // call that threw the exception (this was set in nonPreservedNonReturnGPR, when
- // the exception check was planted).
- move(GPRInfo::nonPreservedNonReturnGPR, GPRInfo::argumentGPR1);
- move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
+ if (!m_exceptionChecks.empty()) {
+ m_exceptionChecks.link(this);
+
+ // lookupExceptionHandler is passed two arguments, the VM and the exec (the CallFrame*).
+ move(TrustedImmPtr(vm()), GPRInfo::argumentGPR0);
+ move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1);
+
#if CPU(X86)
// FIXME: should use the call abstraction, but this is currently in the SpeculativeJIT layer!
poke(GPRInfo::argumentGPR0);
poke(GPRInfo::argumentGPR1, 1);
#endif
m_calls.append(CallLinkRecord(call(), lookupExceptionHandler));
- // lookupExceptionHandler leaves the handler CallFrame* in the returnValueGPR,
- // and the address of the handler in returnValueGPR2.
- jump(GPRInfo::returnValueGPR2);
+
+ jumpToExceptionHandler();
}
}
void JITCompiler::link(LinkBuffer& linkBuffer)
{
// Link the code, populate data in CodeBlock data structures.
-#if DFG_ENABLE(DEBUG_VERBOSE)
- dataLog("JIT code for %p start at [%p, %p). Size = %zu.\n", m_codeBlock, linkBuffer.debugAddress(), static_cast<char*>(linkBuffer.debugAddress()) + linkBuffer.debugSize(), linkBuffer.debugSize());
+ m_jitCode->common.frameRegisterCount = m_graph.frameRegisterCount();
+ m_jitCode->common.requiredRegisterCountForExit = m_graph.requiredRegisterCountForExit();
+
+ if (!m_graph.m_plan.inlineCallFrames->isEmpty())
+ m_jitCode->common.inlineCallFrames = m_graph.m_plan.inlineCallFrames;
+
+#if USE(JSVALUE32_64)
+ m_jitCode->common.doubleConstants = WTF::move(m_graph.m_doubleConstants);
#endif
+
+ m_graph.registerFrozenValues();
+
+ BitVector usedJumpTables;
+ for (Bag<SwitchData>::iterator iter = m_graph.m_switchData.begin(); !!iter; ++iter) {
+ SwitchData& data = **iter;
+ if (!data.didUseJumpTable)
+ continue;
+
+ if (data.kind == SwitchString)
+ continue;
+
+ RELEASE_ASSERT(data.kind == SwitchImm || data.kind == SwitchChar);
+
+ usedJumpTables.set(data.switchTableIndex);
+ SimpleJumpTable& table = m_codeBlock->switchJumpTable(data.switchTableIndex);
+ table.ctiDefault = linkBuffer.locationOf(m_blockHeads[data.fallThrough.block->index]);
+ table.ctiOffsets.grow(table.branchOffsets.size());
+ for (unsigned j = table.ctiOffsets.size(); j--;)
+ table.ctiOffsets[j] = table.ctiDefault;
+ for (unsigned j = data.cases.size(); j--;) {
+ SwitchCase& myCase = data.cases[j];
+ table.ctiOffsets[myCase.value.switchLookupValue(data.kind) - table.min] =
+ linkBuffer.locationOf(m_blockHeads[myCase.target.block->index]);
+ }
+ }
+
+ for (unsigned i = m_codeBlock->numberOfSwitchJumpTables(); i--;) {
+ if (usedJumpTables.get(i))
+ continue;
+
+ m_codeBlock->switchJumpTable(i).clear();
+ }
+
+ // NOTE: we cannot clear string switch tables because (1) we're running concurrently
+ // and we cannot deref StringImpl's and (2) it would be weird to deref those
+ // StringImpl's since we refer to them.
+ for (Bag<SwitchData>::iterator switchDataIter = m_graph.m_switchData.begin(); !!switchDataIter; ++switchDataIter) {
+ SwitchData& data = **switchDataIter;
+ if (!data.didUseJumpTable)
+ continue;
+
+ if (data.kind != SwitchString)
+ continue;
+
+ StringJumpTable& table = m_codeBlock->stringSwitchJumpTable(data.switchTableIndex);
+ table.ctiDefault = linkBuffer.locationOf(m_blockHeads[data.fallThrough.block->index]);
+ StringJumpTable::StringOffsetTable::iterator iter;
+ StringJumpTable::StringOffsetTable::iterator end = table.offsetTable.end();
+ for (iter = table.offsetTable.begin(); iter != end; ++iter)
+ iter->value.ctiOffset = table.ctiDefault;
+ for (unsigned j = data.cases.size(); j--;) {
+ SwitchCase& myCase = data.cases[j];
+ iter = table.offsetTable.find(myCase.value.stringImpl());
+ RELEASE_ASSERT(iter != end);
+ iter->value.ctiOffset = linkBuffer.locationOf(m_blockHeads[myCase.target.block->index]);
+ }
+ }
// Link all calls out from the JIT code to their respective functions.
for (unsigned i = 0; i < m_calls.size(); ++i)
linkBuffer.link(m_calls[i].m_call, m_calls[i].m_function);
- if (m_codeBlock->needsCallReturnIndices()) {
- m_codeBlock->callReturnIndexVector().reserveCapacity(m_exceptionChecks.size());
- for (unsigned i = 0; i < m_exceptionChecks.size(); ++i) {
- unsigned returnAddressOffset = linkBuffer.returnAddressOffset(m_exceptionChecks[i].m_call);
- CodeOrigin codeOrigin = m_exceptionChecks[i].m_codeOrigin;
- while (codeOrigin.inlineCallFrame)
- codeOrigin = codeOrigin.inlineCallFrame->caller;
- unsigned exceptionInfo = codeOrigin.bytecodeIndex;
- m_codeBlock->callReturnIndexVector().append(CallReturnOffsetToBytecodeOffset(returnAddressOffset, exceptionInfo));
- }
- }
+ for (unsigned i = m_getByIds.size(); i--;)
+ m_getByIds[i].finalize(linkBuffer);
+ for (unsigned i = m_putByIds.size(); i--;)
+ m_putByIds[i].finalize(linkBuffer);
- Vector<CodeOriginAtCallReturnOffset>& codeOrigins = m_codeBlock->codeOrigins();
- codeOrigins.resize(m_exceptionChecks.size());
-
- for (unsigned i = 0; i < m_exceptionChecks.size(); ++i) {
- CallExceptionRecord& record = m_exceptionChecks[i];
- unsigned returnAddressOffset = linkBuffer.returnAddressOffset(m_exceptionChecks[i].m_call);
- codeOrigins[i].codeOrigin = record.m_codeOrigin;
- codeOrigins[i].callReturnOffset = returnAddressOffset;
- record.m_token.assertCodeOriginIndex(i);
- }
-
- m_codeBlock->setNumberOfStructureStubInfos(m_propertyAccesses.size());
- for (unsigned i = 0; i < m_propertyAccesses.size(); ++i) {
- StructureStubInfo& info = m_codeBlock->structureStubInfo(i);
- CodeLocationCall callReturnLocation = linkBuffer.locationOf(m_propertyAccesses[i].m_functionCall);
- info.codeOrigin = m_propertyAccesses[i].m_codeOrigin;
+ for (unsigned i = 0; i < m_ins.size(); ++i) {
+ StructureStubInfo& info = *m_ins[i].m_stubInfo;
+ CodeLocationCall callReturnLocation = linkBuffer.locationOf(m_ins[i].m_slowPathGenerator->call());
+ info.patch.deltaCallToDone = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_ins[i].m_done));
+ info.patch.deltaCallToJump = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_ins[i].m_jump));
info.callReturnLocation = callReturnLocation;
- info.patch.dfg.deltaCheckImmToCall = differenceBetweenCodePtr(linkBuffer.locationOf(m_propertyAccesses[i].m_deltaCheckImmToCall), callReturnLocation);
- info.patch.dfg.deltaCallToStructCheck = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_propertyAccesses[i].m_deltaCallToStructCheck));
-#if USE(JSVALUE64)
- info.patch.dfg.deltaCallToLoadOrStore = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_propertyAccesses[i].m_deltaCallToLoadOrStore));
-#else
- info.patch.dfg.deltaCallToTagLoadOrStore = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_propertyAccesses[i].m_deltaCallToTagLoadOrStore));
- info.patch.dfg.deltaCallToPayloadLoadOrStore = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_propertyAccesses[i].m_deltaCallToPayloadLoadOrStore));
-#endif
- info.patch.dfg.deltaCallToSlowCase = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_propertyAccesses[i].m_deltaCallToSlowCase));
- info.patch.dfg.deltaCallToDone = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_propertyAccesses[i].m_deltaCallToDone));
- info.patch.dfg.baseGPR = m_propertyAccesses[i].m_baseGPR;
-#if USE(JSVALUE64)
- info.patch.dfg.valueGPR = m_propertyAccesses[i].m_valueGPR;
-#else
- info.patch.dfg.valueTagGPR = m_propertyAccesses[i].m_valueTagGPR;
- info.patch.dfg.valueGPR = m_propertyAccesses[i].m_valueGPR;
-#endif
- info.patch.dfg.scratchGPR = m_propertyAccesses[i].m_scratchGPR;
- info.patch.dfg.registersFlushed = m_propertyAccesses[i].m_registerMode == PropertyAccessRecord::RegistersFlushed;
+ info.patch.deltaCallToSlowCase = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_ins[i].m_slowPathGenerator->label()));
}
- m_codeBlock->setNumberOfCallLinkInfos(m_jsCalls.size());
for (unsigned i = 0; i < m_jsCalls.size(); ++i) {
- CallLinkInfo& info = m_codeBlock->callLinkInfo(i);
- info.callType = m_jsCalls[i].m_callType;
- info.isDFG = true;
- info.callReturnLocation = CodeLocationLabel(linkBuffer.locationOf(m_jsCalls[i].m_slowCall));
- info.hotPathBegin = linkBuffer.locationOf(m_jsCalls[i].m_targetToCheck);
- info.hotPathOther = linkBuffer.locationOfNearCall(m_jsCalls[i].m_fastCall);
+ JSCallRecord& record = m_jsCalls[i];
+ CallLinkInfo& info = *record.m_info;
+ ThunkGenerator generator = linkThunkGeneratorFor(
+ info.specializationKind(),
+ RegisterPreservationNotRequired);
+ linkBuffer.link(record.m_slowCall, FunctionPtr(m_vm->getCTIStub(generator).code().executableAddress()));
+ info.setCallLocations(linkBuffer.locationOfNearCall(record.m_slowCall),
+ linkBuffer.locationOf(record.m_targetToCheck),
+ linkBuffer.locationOfNearCall(record.m_fastCall));
}
- MacroAssemblerCodeRef osrExitThunk = globalData()->getCTIStub(osrExitGenerationThunkGenerator);
+ MacroAssemblerCodeRef osrExitThunk = vm()->getCTIStub(osrExitGenerationThunkGenerator);
CodeLocationLabel target = CodeLocationLabel(osrExitThunk.code());
- for (unsigned i = 0; i < codeBlock()->numberOfOSRExits(); ++i) {
- OSRExit& exit = codeBlock()->osrExit(i);
- linkBuffer.link(exit.m_check.lateJump(), target);
- exit.m_check.correctLateJump(linkBuffer);
+ for (unsigned i = 0; i < m_jitCode->osrExit.size(); ++i) {
+ OSRExit& exit = m_jitCode->osrExit[i];
+ OSRExitCompilationInfo& info = m_exitCompilationInfo[i];
+ linkBuffer.link(exit.getPatchableCodeOffsetAsJump(), target);
+ exit.correctJump(linkBuffer);
+ if (info.m_replacementSource.isSet()) {
+ m_jitCode->common.jumpReplacements.append(JumpReplacement(
+ linkBuffer.locationOf(info.m_replacementSource),
+ linkBuffer.locationOf(info.m_replacementDestination)));
+ }
}
- codeBlock()->shrinkWeakReferencesToFit();
- codeBlock()->shrinkWeakReferenceTransitionsToFit();
+ if (m_graph.compilation()) {
+ ASSERT(m_exitSiteLabels.size() == m_jitCode->osrExit.size());
+ for (unsigned i = 0; i < m_exitSiteLabels.size(); ++i) {
+ Vector<Label>& labels = m_exitSiteLabels[i];
+ Vector<const void*> addresses;
+ for (unsigned j = 0; j < labels.size(); ++j)
+ addresses.append(linkBuffer.locationOf(labels[j]).executableAddress());
+ m_graph.compilation()->addOSRExitSite(addresses);
+ }
+ } else
+ ASSERT(!m_exitSiteLabels.size());
+
+ m_jitCode->common.compilation = m_graph.compilation();
+
}
-bool JITCompiler::compile(JITCode& entry)
+void JITCompiler::compile()
{
+ SamplingRegion samplingRegion("DFG Backend");
+
+ setStartOfCode();
compileEntry();
- SpeculativeJIT speculative(*this);
- compileBody(speculative);
+ m_speculative = std::make_unique<SpeculativeJIT>(*this);
+
+ // Plant a check that sufficient space is available in the JSStack.
+ addPtr(TrustedImm32(virtualRegisterForLocal(m_graph.requiredRegisterCountForExecutionAndExit() - 1).offset() * sizeof(Register)), GPRInfo::callFrameRegister, GPRInfo::regT1);
+ Jump stackOverflow = branchPtr(Above, AbsoluteAddress(m_vm->addressOfStackLimit()), GPRInfo::regT1);
+
+ addPtr(TrustedImm32(m_graph.stackPointerOffset() * sizeof(Register)), GPRInfo::callFrameRegister, stackPointerRegister);
+ checkStackPointerAlignment();
+ compileBody();
+ setEndOfMainPath();
+
+ // === Footer code generation ===
+ //
+ // Generate the stack overflow handling; if the stack check in the entry head fails,
+ // we need to call out to a helper function to throw the StackOverflowError.
+ stackOverflow.link(this);
+ emitStoreCodeOrigin(CodeOrigin(0));
+
+ if (maxFrameExtentForSlowPathCall)
+ addPtr(TrustedImm32(-maxFrameExtentForSlowPathCall), stackPointerRegister);
+
+ m_speculative->callOperationWithCallFrameRollbackOnException(operationThrowStackOverflowError, m_codeBlock);
+
+ // Generate slow path code.
+ m_speculative->runSlowPathGenerators();
+
+ compileExceptionHandlers();
+ linkOSRExits();
+
// Create OSR entry trampolines if necessary.
- speculative.createOSREntries();
+ m_speculative->createOSREntries();
+ setEndOfCode();
- LinkBuffer linkBuffer(*m_globalData, this, m_codeBlock, JITCompilationCanFail);
- if (linkBuffer.didFailToAllocate())
- return false;
- link(linkBuffer);
- speculative.linkOSREntries(linkBuffer);
+ auto linkBuffer = std::make_unique<LinkBuffer>(*m_vm, *this, m_codeBlock, JITCompilationCanFail);
+ if (linkBuffer->didFailToAllocate()) {
+ m_graph.m_plan.finalizer = std::make_unique<FailedFinalizer>(m_graph.m_plan);
+ return;
+ }
+
+ link(*linkBuffer);
+ m_speculative->linkOSREntries(*linkBuffer);
+
+ m_jitCode->shrinkToFit();
+ codeBlock()->shrinkToFit(CodeBlock::LateShrink);
- entry = JITCode(linkBuffer.finalizeCode(), JITCode::DFGJIT);
- return true;
+ disassemble(*linkBuffer);
+
+ m_graph.m_plan.finalizer = std::make_unique<JITFinalizer>(
+ m_graph.m_plan, m_jitCode.release(), WTF::move(linkBuffer));
}
-bool JITCompiler::compileFunction(JITCode& entry, MacroAssemblerCodePtr& entryWithArityCheck)
+void JITCompiler::compileFunction()
{
+ SamplingRegion samplingRegion("DFG Backend");
+
+ setStartOfCode();
compileEntry();
// === Function header code generation ===
// If we needed to perform an arity check we will already have moved the return address,
// so enter after this.
Label fromArityCheck(this);
- // Plant a check that sufficient space is available in the RegisterFile.
- // FIXME: https://bugs.webkit.org/show_bug.cgi?id=56291
- addPtr(TrustedImm32(m_codeBlock->m_numCalleeRegisters * sizeof(Register)), GPRInfo::callFrameRegister, GPRInfo::regT1);
- Jump registerFileCheck = branchPtr(Below, AbsoluteAddress(m_globalData->interpreter->registerFile().addressOfEnd()), GPRInfo::regT1);
- // Return here after register file check.
- Label fromRegisterFileCheck = label();
+ // Plant a check that sufficient space is available in the JSStack.
+ addPtr(TrustedImm32(virtualRegisterForLocal(m_graph.requiredRegisterCountForExecutionAndExit() - 1).offset() * sizeof(Register)), GPRInfo::callFrameRegister, GPRInfo::regT1);
+ Jump stackOverflow = branchPtr(Above, AbsoluteAddress(m_vm->addressOfStackLimit()), GPRInfo::regT1);
+ // Move the stack pointer down to accommodate locals
+ addPtr(TrustedImm32(m_graph.stackPointerOffset() * sizeof(Register)), GPRInfo::callFrameRegister, stackPointerRegister);
+ checkStackPointerAlignment();
// === Function body code generation ===
- SpeculativeJIT speculative(*this);
- compileBody(speculative);
+ m_speculative = std::make_unique<SpeculativeJIT>(*this);
+ compileBody();
+ setEndOfMainPath();
// === Function footer code generation ===
//
- // Generate code to perform the slow register file check (if the fast one in
+ // Generate code to perform the stack overflow handling (if the stack check in
// the function header fails), and generate the entry point with arity check.
//
- // Generate the register file check; if the fast check in the function head fails,
- // we need to call out to a helper function to check whether more space is available.
- // FIXME: change this from a cti call to a DFG style operation (normal C calling conventions).
- registerFileCheck.link(this);
- move(stackPointerRegister, GPRInfo::argumentGPR0);
- poke(GPRInfo::callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof(void*));
-
- CallBeginToken token = beginCall();
- Call callRegisterFileCheck = call();
- notifyCall(callRegisterFileCheck, CodeOrigin(0), token);
- jump(fromRegisterFileCheck);
+ // Generate the stack overflow handling; if the stack check in the function head fails,
+ // we need to call out to a helper function to throw the StackOverflowError.
+ stackOverflow.link(this);
+
+ emitStoreCodeOrigin(CodeOrigin(0));
+
+ if (maxFrameExtentForSlowPathCall)
+ addPtr(TrustedImm32(-maxFrameExtentForSlowPathCall), stackPointerRegister);
+
+ m_speculative->callOperationWithCallFrameRollbackOnException(operationThrowStackOverflowError, m_codeBlock);
// The fast entry point into a function does not check the correct number of arguments
// have been passed to the call (we only use the fast entry point where we can statically
// determine the correct number of arguments have been passed, or have already checked).
// In cases where an arity check is necessary, we enter here.
// FIXME: change this from a cti call to a DFG style operation (normal C calling conventions).
- Label arityCheck = label();
+ m_arityCheck = label();
compileEntry();
- load32(AssemblyHelpers::payloadFor((VirtualRegister)RegisterFile::ArgumentCount), GPRInfo::regT1);
+ load32(AssemblyHelpers::payloadFor((VirtualRegister)JSStack::ArgumentCount), GPRInfo::regT1);
branch32(AboveOrEqual, GPRInfo::regT1, TrustedImm32(m_codeBlock->numParameters())).linkTo(fromArityCheck, this);
- move(stackPointerRegister, GPRInfo::argumentGPR0);
- poke(GPRInfo::callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof(void*));
- token = beginCall();
- Call callArityCheck = call();
- notifyCall(callArityCheck, CodeOrigin(0), token);
- move(GPRInfo::regT0, GPRInfo::callFrameRegister);
+ emitStoreCodeOrigin(CodeOrigin(0));
+ if (maxFrameExtentForSlowPathCall)
+ addPtr(TrustedImm32(-maxFrameExtentForSlowPathCall), stackPointerRegister);
+ m_speculative->callOperationWithCallFrameRollbackOnException(m_codeBlock->m_isConstructor ? operationConstructArityCheck : operationCallArityCheck, GPRInfo::regT0);
+ if (maxFrameExtentForSlowPathCall)
+ addPtr(TrustedImm32(maxFrameExtentForSlowPathCall), stackPointerRegister);
+ branchTest32(Zero, GPRInfo::regT0).linkTo(fromArityCheck, this);
+ emitStoreCodeOrigin(CodeOrigin(0));
+ GPRReg thunkReg;
+#if USE(JSVALUE64)
+ thunkReg = GPRInfo::regT7;
+#else
+ thunkReg = GPRInfo::regT5;
+#endif
+ CodeLocationLabel* arityThunkLabels =
+ m_vm->arityCheckFailReturnThunks->returnPCsFor(*m_vm, m_codeBlock->numParameters());
+ move(TrustedImmPtr(arityThunkLabels), thunkReg);
+ loadPtr(BaseIndex(thunkReg, GPRInfo::regT0, timesPtr()), thunkReg);
+ m_callArityFixup = call();
jump(fromArityCheck);
+ // Generate slow path code.
+ m_speculative->runSlowPathGenerators();
+
+ compileExceptionHandlers();
+ linkOSRExits();
+
// Create OSR entry trampolines if necessary.
- speculative.createOSREntries();
-
+ m_speculative->createOSREntries();
+ setEndOfCode();
// === Link ===
- LinkBuffer linkBuffer(*m_globalData, this, m_codeBlock, JITCompilationCanFail);
- if (linkBuffer.didFailToAllocate())
- return false;
- link(linkBuffer);
- speculative.linkOSREntries(linkBuffer);
+ auto linkBuffer = std::make_unique<LinkBuffer>(*m_vm, *this, m_codeBlock, JITCompilationCanFail);
+ if (linkBuffer->didFailToAllocate()) {
+ m_graph.m_plan.finalizer = std::make_unique<FailedFinalizer>(m_graph.m_plan);
+ return;
+ }
+ link(*linkBuffer);
+ m_speculative->linkOSREntries(*linkBuffer);
+
+ m_jitCode->shrinkToFit();
+ codeBlock()->shrinkToFit(CodeBlock::LateShrink);
+
+ linkBuffer->link(m_callArityFixup, FunctionPtr((m_vm->getCTIStub(arityFixupGenerator)).code().executableAddress()));
+
+ disassemble(*linkBuffer);
+
+ MacroAssemblerCodePtr withArityCheck = linkBuffer->locationOf(m_arityCheck);
+
+ m_graph.m_plan.finalizer = std::make_unique<JITFinalizer>(
+ m_graph.m_plan, m_jitCode.release(), WTF::move(linkBuffer), withArityCheck);
+}
+
+void JITCompiler::disassemble(LinkBuffer& linkBuffer)
+{
+ if (shouldShowDisassembly()) {
+ m_disassembler->dump(linkBuffer);
+ linkBuffer.didAlreadyDisassemble();
+ }
- // FIXME: switch the register file check & arity check over to DFGOpertaion style calls, not JIT stubs.
- linkBuffer.link(callRegisterFileCheck, cti_register_file_check);
- linkBuffer.link(callArityCheck, m_codeBlock->m_isConstructor ? cti_op_construct_arityCheck : cti_op_call_arityCheck);
+ if (m_graph.m_plan.compilation)
+ m_disassembler->reportToProfiler(m_graph.m_plan.compilation.get(), linkBuffer);
+}
+
+#if USE(JSVALUE32_64)
+void* JITCompiler::addressOfDoubleConstant(Node* node)
+{
+ double value = node->asNumber();
+ int64_t valueBits = bitwise_cast<int64_t>(value);
+ auto it = m_graph.m_doubleConstantsMap.find(valueBits);
+ if (it != m_graph.m_doubleConstantsMap.end())
+ return it->second;
+
+ if (!m_graph.m_doubleConstants)
+ m_graph.m_doubleConstants = std::make_unique<Bag<double>>();
+
+ double* addressInConstantPool = m_graph.m_doubleConstants->add();
+ *addressInConstantPool = value;
+ m_graph.m_doubleConstantsMap[valueBits] = addressInConstantPool;
+ return addressInConstantPool;
+}
+#endif
- entryWithArityCheck = linkBuffer.locationOf(arityCheck);
- entry = JITCode(linkBuffer.finalizeCode(), JITCode::DFGJIT);
- return true;
+void JITCompiler::noticeOSREntry(BasicBlock& basicBlock, JITCompiler::Label blockHead, LinkBuffer& linkBuffer)
+{
+ // OSR entry is not allowed into blocks deemed unreachable by control flow analysis.
+ if (!basicBlock.intersectionOfCFAHasVisited)
+ return;
+
+ OSREntryData* entry = m_jitCode->appendOSREntryData(basicBlock.bytecodeBegin, linkBuffer.offsetOf(blockHead));
+
+ entry->m_expectedValues = basicBlock.intersectionOfPastValuesAtHead;
+
+ // Fix the expected values: in our protocol, a dead variable will have an expected
+ // value of (None, []). But the old JIT may stash some values there. So we really
+ // need (Top, TOP).
+ for (size_t argument = 0; argument < basicBlock.variablesAtHead.numberOfArguments(); ++argument) {
+ Node* node = basicBlock.variablesAtHead.argument(argument);
+ if (!node || !node->shouldGenerate())
+ entry->m_expectedValues.argument(argument).makeHeapTop();
+ }
+ for (size_t local = 0; local < basicBlock.variablesAtHead.numberOfLocals(); ++local) {
+ Node* node = basicBlock.variablesAtHead.local(local);
+ if (!node || !node->shouldGenerate())
+ entry->m_expectedValues.local(local).makeHeapTop();
+ else {
+ VariableAccessData* variable = node->variableAccessData();
+ entry->m_machineStackUsed.set(variable->machineLocal().toLocal());
+
+ switch (variable->flushFormat()) {
+ case FlushedDouble:
+ entry->m_localsForcedDouble.set(local);
+ break;
+ case FlushedInt52:
+ entry->m_localsForcedMachineInt.set(local);
+ break;
+ default:
+ break;
+ }
+
+ if (variable->local() != variable->machineLocal()) {
+ entry->m_reshufflings.append(
+ OSREntryReshuffling(
+ variable->local().offset(), variable->machineLocal().offset()));
+ }
+ }
+ }
+
+ entry->m_reshufflings.shrinkToFit();
}
} } // namespace JSC::DFG