/*
- * Copyright (C) 2011 Apple Inc. All rights reserved.
+ * Copyright (C) 2011, 2013-2015 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
#if ENABLE(DFG_JIT)
+#include "ArityCheckFailReturnThunks.h"
#include "CodeBlock.h"
-#include "DFGJITCodeGenerator.h"
-#include "DFGNonSpeculativeJIT.h"
+#include "DFGFailedFinalizer.h"
+#include "DFGInlineCacheWrapperInlines.h"
+#include "DFGJITCode.h"
+#include "DFGJITFinalizer.h"
+#include "DFGOSRExitCompiler.h"
#include "DFGOperations.h"
#include "DFGRegisterBank.h"
+#include "DFGSlowPathGenerator.h"
#include "DFGSpeculativeJIT.h"
-#include "JSGlobalData.h"
+#include "DFGThunks.h"
+#include "JSCJSValueInlines.h"
#include "LinkBuffer.h"
+#include "MaxFrameExtentForSlowPathCall.h"
+#include "JSCInlines.h"
+#include "VM.h"
namespace JSC { namespace DFG {
-// This method used to fill a numeric value to a FPR when linking speculative -> non-speculative.
-void JITCompiler::fillNumericToDouble(NodeIndex nodeIndex, FPRReg fpr, GPRReg temporary)
+JITCompiler::JITCompiler(Graph& dfg)
+ : CCallHelpers(&dfg.m_vm, dfg.m_codeBlock)
+ , m_graph(dfg)
+ , m_jitCode(adoptRef(new JITCode()))
+ , m_blockHeads(dfg.numBlocks())
{
- Node& node = graph()[nodeIndex];
-
- if (node.isConstant()) {
- ASSERT(node.op == DoubleConstant);
- move(MacroAssembler::ImmPtr(reinterpret_cast<void*>(reinterpretDoubleToIntptr(valueOfDoubleConstant(nodeIndex)))), temporary);
- movePtrToDouble(temporary, fpr);
- } else {
- loadPtr(addressFor(node.virtualRegister()), temporary);
- Jump isInteger = branchPtr(MacroAssembler::AboveOrEqual, temporary, GPRInfo::tagTypeNumberRegister);
- jitAssertIsJSDouble(temporary);
- addPtr(GPRInfo::tagTypeNumberRegister, temporary);
- movePtrToDouble(temporary, fpr);
- Jump hasUnboxedDouble = jump();
- isInteger.link(this);
- convertInt32ToDouble(temporary, fpr);
- hasUnboxedDouble.link(this);
- }
+ if (shouldShowDisassembly() || m_graph.m_vm.m_perBytecodeProfiler)
+ m_disassembler = std::make_unique<Disassembler>(dfg);
}
-// This method used to fill an integer value to a GPR when linking speculative -> non-speculative.
-void JITCompiler::fillInt32ToInteger(NodeIndex nodeIndex, GPRReg gpr)
+JITCompiler::~JITCompiler()
{
- Node& node = graph()[nodeIndex];
-
- if (node.isConstant()) {
- ASSERT(node.op == Int32Constant);
- move(MacroAssembler::Imm32(valueOfInt32Constant(nodeIndex)), gpr);
- } else {
-#if DFG_JIT_ASSERT
- // Redundant load, just so we can check the tag!
- loadPtr(addressFor(node.virtualRegister()), gpr);
- jitAssertIsJSInt32(gpr);
-#endif
- load32(addressFor(node.virtualRegister()), gpr);
- }
}
-// This method used to fill a JSValue to a GPR when linking speculative -> non-speculative.
-void JITCompiler::fillToJS(NodeIndex nodeIndex, GPRReg gpr)
+void JITCompiler::linkOSRExits()
{
- Node& node = graph()[nodeIndex];
-
- if (node.isConstant()) {
- if (isInt32Constant(nodeIndex)) {
- JSValue jsValue = jsNumber(valueOfInt32Constant(nodeIndex));
- move(MacroAssembler::ImmPtr(JSValue::encode(jsValue)), gpr);
- } else if (isDoubleConstant(nodeIndex)) {
- JSValue jsValue(JSValue::EncodeAsDouble, valueOfDoubleConstant(nodeIndex));
- move(MacroAssembler::ImmPtr(JSValue::encode(jsValue)), gpr);
- } else {
- ASSERT(isJSConstant(nodeIndex));
- JSValue jsValue = valueOfJSConstant(nodeIndex);
- move(MacroAssembler::ImmPtr(JSValue::encode(jsValue)), gpr);
+ ASSERT(m_jitCode->osrExit.size() == m_exitCompilationInfo.size());
+ if (m_graph.compilation()) {
+ for (unsigned i = 0; i < m_jitCode->osrExit.size(); ++i) {
+ OSRExitCompilationInfo& info = m_exitCompilationInfo[i];
+ Vector<Label> labels;
+ if (!info.m_failureJumps.empty()) {
+ for (unsigned j = 0; j < info.m_failureJumps.jumps().size(); ++j)
+ labels.append(info.m_failureJumps.jumps()[j].label());
+ } else
+ labels.append(info.m_replacementSource);
+ m_exitSiteLabels.append(labels);
}
- return;
}
+
+ for (unsigned i = 0; i < m_jitCode->osrExit.size(); ++i) {
+ OSRExit& exit = m_jitCode->osrExit[i];
+ OSRExitCompilationInfo& info = m_exitCompilationInfo[i];
+ JumpList& failureJumps = info.m_failureJumps;
+ if (!failureJumps.empty())
+ failureJumps.link(this);
+ else
+ info.m_replacementDestination = label();
+ jitAssertHasValidCallFrame();
+ store32(TrustedImm32(i), &vm()->osrExitIndex);
+ exit.setPatchableCodeOffset(patchableJump());
+ }
+}
- loadPtr(addressFor(node.virtualRegister()), gpr);
+void JITCompiler::compileEntry()
+{
+ // This code currently matches the old JIT. In the function header we need to
+ // save return address and call frame via the prologue and perform a fast stack check.
+ // FIXME: https://bugs.webkit.org/show_bug.cgi?id=56292
+ // We'll need to convert the remaining cti_ style calls (specifically the stack
+ // check) which will be dependent on stack layout. (We'd need to account for this in
+ // both normal return code and when jumping to an exception handler).
+ emitFunctionPrologue();
+ emitPutImmediateToCallFrameHeader(m_codeBlock, JSStack::CodeBlock);
+ jitAssertTagsInPlace();
}
-void JITCompiler::jumpFromSpeculativeToNonSpeculative(const SpeculationCheck& check, const EntryLocation& entry, SpeculationRecovery* recovery)
+void JITCompiler::compileBody()
{
- ASSERT(check.m_nodeIndex == entry.m_nodeIndex);
-
- // Link the jump from the Speculative path to here.
- check.m_check.link(this);
-
- // Does this speculation check require any additional recovery to be performed,
- // to restore any state that has been overwritten before we enter back in to the
- // non-speculative path.
- if (recovery) {
- // The only additional recovery we currently support is for integer add operation
- ASSERT(recovery->type() == SpeculativeAdd);
- // Revert the add.
- sub32(recovery->src(), recovery->dest());
- }
+ // We generate the speculative code path, followed by OSR exit code to return
+ // to the old JIT code if speculations fail.
- // FIXME: - This is hideously inefficient!
- // Where a value is live in a register in the speculative path, and is required in a register
- // on the non-speculative path, we should not need to be spilling it and reloading (we may
- // need to spill anyway, if the value is marked as spilled on the non-speculative path).
- // This may also be spilling values that don't need spilling, e.g. are already spilled,
- // are constants, or are arguments.
-
- // Spill all GPRs in use by the speculative path.
- for (unsigned index = 0; index < GPRInfo::numberOfRegisters; ++index) {
- NodeIndex nodeIndex = check.m_gprInfo[index].nodeIndex;
- if (nodeIndex == NoNode)
- continue;
+ bool compiledSpeculative = m_speculative->compile();
+ ASSERT_UNUSED(compiledSpeculative, compiledSpeculative);
+}
- DataFormat dataFormat = check.m_gprInfo[index].format;
- VirtualRegister virtualRegister = graph()[nodeIndex].virtualRegister();
+void JITCompiler::compileExceptionHandlers()
+{
+ if (!m_exceptionChecksWithCallFrameRollback.empty()) {
+ m_exceptionChecksWithCallFrameRollback.link(this);
+
+ // lookupExceptionHandlerFromCallerFrame is passed two arguments, the VM and the exec (the CallFrame*).
+ move(TrustedImmPtr(vm()), GPRInfo::argumentGPR0);
+ move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1);
+ addPtr(TrustedImm32(m_graph.stackPointerOffset() * sizeof(Register)), GPRInfo::callFrameRegister, stackPointerRegister);
+
+#if CPU(X86)
+ // FIXME: should use the call abstraction, but this is currently in the SpeculativeJIT layer!
+ poke(GPRInfo::argumentGPR0);
+ poke(GPRInfo::argumentGPR1, 1);
+#endif
+ m_calls.append(CallLinkRecord(call(), lookupExceptionHandlerFromCallerFrame));
- ASSERT(dataFormat == DataFormatInteger || DataFormatCell || dataFormat & DataFormatJS);
- if (dataFormat == DataFormatInteger)
- orPtr(GPRInfo::tagTypeNumberRegister, GPRInfo::toRegister(index));
- storePtr(GPRInfo::toRegister(index), addressFor(virtualRegister));
+ jumpToExceptionHandler();
}
- // Spill all FPRs in use by the speculative path.
- for (unsigned index = 0; index < FPRInfo::numberOfRegisters; ++index) {
- NodeIndex nodeIndex = check.m_fprInfo[index];
- if (nodeIndex == NoNode)
- continue;
+ if (!m_exceptionChecks.empty()) {
+ m_exceptionChecks.link(this);
- VirtualRegister virtualRegister = graph()[nodeIndex].virtualRegister();
+ // lookupExceptionHandler is passed two arguments, the VM and the exec (the CallFrame*).
+ move(TrustedImmPtr(vm()), GPRInfo::argumentGPR0);
+ move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1);
+
+#if CPU(X86)
+ // FIXME: should use the call abstraction, but this is currently in the SpeculativeJIT layer!
+ poke(GPRInfo::argumentGPR0);
+ poke(GPRInfo::argumentGPR1, 1);
+#endif
+ m_calls.append(CallLinkRecord(call(), lookupExceptionHandler));
- moveDoubleToPtr(FPRInfo::toRegister(index), GPRInfo::regT0);
- subPtr(GPRInfo::tagTypeNumberRegister, GPRInfo::regT0);
- storePtr(GPRInfo::regT0, addressFor(virtualRegister));
+ jumpToExceptionHandler();
}
+}
- // Fill all FPRs in use by the non-speculative path.
- for (unsigned index = 0; index < FPRInfo::numberOfRegisters; ++index) {
- NodeIndex nodeIndex = entry.m_fprInfo[index];
- if (nodeIndex == NoNode)
- continue;
+void JITCompiler::link(LinkBuffer& linkBuffer)
+{
+ // Link the code, populate data in CodeBlock data structures.
+ m_jitCode->common.frameRegisterCount = m_graph.frameRegisterCount();
+ m_jitCode->common.requiredRegisterCountForExit = m_graph.requiredRegisterCountForExit();
+
+ if (!m_graph.m_plan.inlineCallFrames->isEmpty())
+ m_jitCode->common.inlineCallFrames = m_graph.m_plan.inlineCallFrames;
+
+#if USE(JSVALUE32_64)
+ m_jitCode->common.doubleConstants = WTF::move(m_graph.m_doubleConstants);
+#endif
+
+ m_graph.registerFrozenValues();
- fillNumericToDouble(nodeIndex, FPRInfo::toRegister(index), GPRInfo::regT0);
+ BitVector usedJumpTables;
+ for (Bag<SwitchData>::iterator iter = m_graph.m_switchData.begin(); !!iter; ++iter) {
+ SwitchData& data = **iter;
+ if (!data.didUseJumpTable)
+ continue;
+
+ if (data.kind == SwitchString)
+ continue;
+
+ RELEASE_ASSERT(data.kind == SwitchImm || data.kind == SwitchChar);
+
+ usedJumpTables.set(data.switchTableIndex);
+ SimpleJumpTable& table = m_codeBlock->switchJumpTable(data.switchTableIndex);
+ table.ctiDefault = linkBuffer.locationOf(m_blockHeads[data.fallThrough.block->index]);
+ table.ctiOffsets.grow(table.branchOffsets.size());
+ for (unsigned j = table.ctiOffsets.size(); j--;)
+ table.ctiOffsets[j] = table.ctiDefault;
+ for (unsigned j = data.cases.size(); j--;) {
+ SwitchCase& myCase = data.cases[j];
+ table.ctiOffsets[myCase.value.switchLookupValue(data.kind) - table.min] =
+ linkBuffer.locationOf(m_blockHeads[myCase.target.block->index]);
+ }
}
-
- // Fill all GPRs in use by the non-speculative path.
- for (unsigned index = 0; index < GPRInfo::numberOfRegisters; ++index) {
- NodeIndex nodeIndex = entry.m_gprInfo[index].nodeIndex;
- if (nodeIndex == NoNode)
+
+ for (unsigned i = m_codeBlock->numberOfSwitchJumpTables(); i--;) {
+ if (usedJumpTables.get(i))
continue;
+
+ m_codeBlock->switchJumpTable(i).clear();
+ }
- DataFormat dataFormat = entry.m_gprInfo[index].format;
- if (dataFormat == DataFormatInteger)
- fillInt32ToInteger(nodeIndex, GPRInfo::toRegister(index));
- else {
- ASSERT(dataFormat & DataFormatJS || dataFormat == DataFormatCell); // Treat cell as JSValue for now!
- fillToJS(nodeIndex, GPRInfo::toRegister(index));
- // FIXME: For subtypes of DataFormatJS, should jitAssert the subtype?
+ // NOTE: we cannot clear string switch tables because (1) we're running concurrently
+ // and we cannot deref StringImpl's and (2) it would be weird to deref those
+ // StringImpl's since we refer to them.
+ for (Bag<SwitchData>::iterator switchDataIter = m_graph.m_switchData.begin(); !!switchDataIter; ++switchDataIter) {
+ SwitchData& data = **switchDataIter;
+ if (!data.didUseJumpTable)
+ continue;
+
+ if (data.kind != SwitchString)
+ continue;
+
+ StringJumpTable& table = m_codeBlock->stringSwitchJumpTable(data.switchTableIndex);
+ table.ctiDefault = linkBuffer.locationOf(m_blockHeads[data.fallThrough.block->index]);
+ StringJumpTable::StringOffsetTable::iterator iter;
+ StringJumpTable::StringOffsetTable::iterator end = table.offsetTable.end();
+ for (iter = table.offsetTable.begin(); iter != end; ++iter)
+ iter->value.ctiOffset = table.ctiDefault;
+ for (unsigned j = data.cases.size(); j--;) {
+ SwitchCase& myCase = data.cases[j];
+ iter = table.offsetTable.find(myCase.value.stringImpl());
+ RELEASE_ASSERT(iter != end);
+ iter->value.ctiOffset = linkBuffer.locationOf(m_blockHeads[myCase.target.block->index]);
}
}
- // Jump into the non-speculative path.
- jump(entry.m_entry);
+ // Link all calls out from the JIT code to their respective functions.
+ for (unsigned i = 0; i < m_calls.size(); ++i)
+ linkBuffer.link(m_calls[i].m_call, m_calls[i].m_function);
+
+ for (unsigned i = m_getByIds.size(); i--;)
+ m_getByIds[i].finalize(linkBuffer);
+ for (unsigned i = m_putByIds.size(); i--;)
+ m_putByIds[i].finalize(linkBuffer);
+
+ for (unsigned i = 0; i < m_ins.size(); ++i) {
+ StructureStubInfo& info = *m_ins[i].m_stubInfo;
+ CodeLocationCall callReturnLocation = linkBuffer.locationOf(m_ins[i].m_slowPathGenerator->call());
+ info.patch.deltaCallToDone = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_ins[i].m_done));
+ info.patch.deltaCallToJump = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_ins[i].m_jump));
+ info.callReturnLocation = callReturnLocation;
+ info.patch.deltaCallToSlowCase = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_ins[i].m_slowPathGenerator->label()));
+ }
+
+ for (unsigned i = 0; i < m_jsCalls.size(); ++i) {
+ JSCallRecord& record = m_jsCalls[i];
+ CallLinkInfo& info = *record.m_info;
+ ThunkGenerator generator = linkThunkGeneratorFor(
+ info.specializationKind(),
+ RegisterPreservationNotRequired);
+ linkBuffer.link(record.m_slowCall, FunctionPtr(m_vm->getCTIStub(generator).code().executableAddress()));
+ info.setCallLocations(linkBuffer.locationOfNearCall(record.m_slowCall),
+ linkBuffer.locationOf(record.m_targetToCheck),
+ linkBuffer.locationOfNearCall(record.m_fastCall));
+ }
+
+ MacroAssemblerCodeRef osrExitThunk = vm()->getCTIStub(osrExitGenerationThunkGenerator);
+ CodeLocationLabel target = CodeLocationLabel(osrExitThunk.code());
+ for (unsigned i = 0; i < m_jitCode->osrExit.size(); ++i) {
+ OSRExit& exit = m_jitCode->osrExit[i];
+ OSRExitCompilationInfo& info = m_exitCompilationInfo[i];
+ linkBuffer.link(exit.getPatchableCodeOffsetAsJump(), target);
+ exit.correctJump(linkBuffer);
+ if (info.m_replacementSource.isSet()) {
+ m_jitCode->common.jumpReplacements.append(JumpReplacement(
+ linkBuffer.locationOf(info.m_replacementSource),
+ linkBuffer.locationOf(info.m_replacementDestination)));
+ }
+ }
+
+ if (m_graph.compilation()) {
+ ASSERT(m_exitSiteLabels.size() == m_jitCode->osrExit.size());
+ for (unsigned i = 0; i < m_exitSiteLabels.size(); ++i) {
+ Vector<Label>& labels = m_exitSiteLabels[i];
+ Vector<const void*> addresses;
+ for (unsigned j = 0; j < labels.size(); ++j)
+ addresses.append(linkBuffer.locationOf(labels[j]).executableAddress());
+ m_graph.compilation()->addOSRExitSite(addresses);
+ }
+ } else
+ ASSERT(!m_exitSiteLabels.size());
+
+ m_jitCode->common.compilation = m_graph.compilation();
+
}
-void JITCompiler::linkSpeculationChecks(SpeculativeJIT& speculative, NonSpeculativeJIT& nonSpeculative)
+void JITCompiler::compile()
{
- // Iterators to walk over the set of bail outs & corresponding entry points.
- SpeculationCheckVector::Iterator checksIter = speculative.speculationChecks().begin();
- SpeculationCheckVector::Iterator checksEnd = speculative.speculationChecks().end();
- NonSpeculativeJIT::EntryLocationVector::Iterator entriesIter = nonSpeculative.entryLocations().begin();
- NonSpeculativeJIT::EntryLocationVector::Iterator entriesEnd = nonSpeculative.entryLocations().end();
-
- // Iterate over the speculation checks.
- while (checksIter != checksEnd) {
- // For every bail out from the speculative path, we must have provided an entry point
- // into the non-speculative one.
- ASSERT(checksIter->m_nodeIndex == entriesIter->m_nodeIndex);
-
- // There may be multiple bail outs that map to the same entry point!
- do {
- ASSERT(checksIter != checksEnd);
- ASSERT(entriesIter != entriesEnd);
-
- // Plant code to link this speculation failure.
- const SpeculationCheck& check = *checksIter;
- const EntryLocation& entry = *entriesIter;
- jumpFromSpeculativeToNonSpeculative(check, entry, speculative.speculationRecovery(check.m_recoveryIndex));
- ++checksIter;
- } while (checksIter != checksEnd && checksIter->m_nodeIndex == entriesIter->m_nodeIndex);
- ++entriesIter;
- }
+ SamplingRegion samplingRegion("DFG Backend");
+
+ setStartOfCode();
+ compileEntry();
+ m_speculative = std::make_unique<SpeculativeJIT>(*this);
- // FIXME: https://bugs.webkit.org/show_bug.cgi?id=56289
- ASSERT(!(checksIter != checksEnd));
- ASSERT(!(entriesIter != entriesEnd));
+ // Plant a check that sufficient space is available in the JSStack.
+ addPtr(TrustedImm32(virtualRegisterForLocal(m_graph.requiredRegisterCountForExecutionAndExit() - 1).offset() * sizeof(Register)), GPRInfo::callFrameRegister, GPRInfo::regT1);
+ Jump stackOverflow = branchPtr(Above, AbsoluteAddress(m_vm->addressOfStackLimit()), GPRInfo::regT1);
+
+ addPtr(TrustedImm32(m_graph.stackPointerOffset() * sizeof(Register)), GPRInfo::callFrameRegister, stackPointerRegister);
+ checkStackPointerAlignment();
+ compileBody();
+ setEndOfMainPath();
+
+ // === Footer code generation ===
+ //
+ // Generate the stack overflow handling; if the stack check in the entry head fails,
+ // we need to call out to a helper function to throw the StackOverflowError.
+ stackOverflow.link(this);
+
+ emitStoreCodeOrigin(CodeOrigin(0));
+
+ if (maxFrameExtentForSlowPathCall)
+ addPtr(TrustedImm32(-maxFrameExtentForSlowPathCall), stackPointerRegister);
+
+ m_speculative->callOperationWithCallFrameRollbackOnException(operationThrowStackOverflowError, m_codeBlock);
+
+ // Generate slow path code.
+ m_speculative->runSlowPathGenerators();
+
+ compileExceptionHandlers();
+ linkOSRExits();
+
+ // Create OSR entry trampolines if necessary.
+ m_speculative->createOSREntries();
+ setEndOfCode();
+
+ auto linkBuffer = std::make_unique<LinkBuffer>(*m_vm, *this, m_codeBlock, JITCompilationCanFail);
+ if (linkBuffer->didFailToAllocate()) {
+ m_graph.m_plan.finalizer = std::make_unique<FailedFinalizer>(m_graph.m_plan);
+ return;
+ }
+
+ link(*linkBuffer);
+ m_speculative->linkOSREntries(*linkBuffer);
+
+ m_jitCode->shrinkToFit();
+ codeBlock()->shrinkToFit(CodeBlock::LateShrink);
+
+ disassemble(*linkBuffer);
+
+ m_graph.m_plan.finalizer = std::make_unique<JITFinalizer>(
+ m_graph.m_plan, m_jitCode.release(), WTF::move(linkBuffer));
}
-void JITCompiler::compileFunction(JITCode& entry, MacroAssemblerCodePtr& entryWithArityCheck)
+void JITCompiler::compileFunction()
{
- // === Stage 1 - Function header code generation ===
- //
- // This code currently matches the old JIT. In the function header we need to
- // pop the return address (since we do not allow any recursion on the machine
- // stack), and perform a fast register file check.
+ SamplingRegion samplingRegion("DFG Backend");
+
+ setStartOfCode();
+ compileEntry();
+ // === Function header code generation ===
// This is the main entry point, without performing an arity check.
- // FIXME: https://bugs.webkit.org/show_bug.cgi?id=56292
- // We'll need to convert the remaining cti_ style calls (specifically the register file
- // check) which will be dependent on stack layout. (We'd need to account for this in
- // both normal return code and when jumping to an exception handler).
- preserveReturnAddressAfterCall(GPRInfo::regT2);
- emitPutToCallFrameHeader(GPRInfo::regT2, RegisterFile::ReturnPC);
// If we needed to perform an arity check we will already have moved the return address,
// so enter after this.
Label fromArityCheck(this);
+ // Plant a check that sufficient space is available in the JSStack.
+ addPtr(TrustedImm32(virtualRegisterForLocal(m_graph.requiredRegisterCountForExecutionAndExit() - 1).offset() * sizeof(Register)), GPRInfo::callFrameRegister, GPRInfo::regT1);
+ Jump stackOverflow = branchPtr(Above, AbsoluteAddress(m_vm->addressOfStackLimit()), GPRInfo::regT1);
- // Setup a pointer to the codeblock in the CallFrameHeader.
- emitPutImmediateToCallFrameHeader(m_codeBlock, RegisterFile::CodeBlock);
-
- // Plant a check that sufficient space is available in the RegisterFile.
- // FIXME: https://bugs.webkit.org/show_bug.cgi?id=56291
- addPtr(Imm32(m_codeBlock->m_numCalleeRegisters * sizeof(Register)), GPRInfo::callFrameRegister, GPRInfo::regT1);
- Jump registerFileCheck = branchPtr(Below, AbsoluteAddress(m_globalData->interpreter->registerFile().addressOfEnd()), GPRInfo::regT1);
- // Return here after register file check.
- Label fromRegisterFileCheck = label();
+ // Move the stack pointer down to accommodate locals
+ addPtr(TrustedImm32(m_graph.stackPointerOffset() * sizeof(Register)), GPRInfo::callFrameRegister, stackPointerRegister);
+ checkStackPointerAlignment();
+ // === Function body code generation ===
+ m_speculative = std::make_unique<SpeculativeJIT>(*this);
+ compileBody();
+ setEndOfMainPath();
- // === Stage 2 - Function body code generation ===
+ // === Function footer code generation ===
//
- // We generate the speculative code path, followed by the non-speculative
- // code for the function. Next we need to link the two together, making
- // bail-outs from the speculative path jump to the corresponding point on
- // the non-speculative one (and generating any code necessary to juggle
- // register values around, rebox values, and ensure spilled, to match the
- // non-speculative path's requirements).
-
-#if DFG_JIT_BREAK_ON_EVERY_FUNCTION
- // Handy debug tool!
- breakpoint();
-#endif
-
- // First generate the speculative path.
- Label speculativePathBegin = label();
- SpeculativeJIT speculative(*this);
-#if !DFG_DEBUG_LOCAL_DISBALE_SPECULATIVE
- bool compiledSpeculative = speculative.compile();
-#else
- bool compiledSpeculative = false;
-#endif
-
- // Next, generate the non-speculative path. We pass this a SpeculationCheckIndexIterator
- // to allow it to check which nodes in the graph may bail out, and may need to reenter the
- // non-speculative path.
- if (compiledSpeculative) {
- SpeculationCheckIndexIterator checkIterator(speculative.speculationChecks());
- NonSpeculativeJIT nonSpeculative(*this);
- nonSpeculative.compile(checkIterator);
-
- // Link the bail-outs from the speculative path to the corresponding entry points into the non-speculative one.
- linkSpeculationChecks(speculative, nonSpeculative);
- } else {
- // If compilation through the SpeculativeJIT failed, throw away the code we generated.
- m_calls.clear();
- rewindToLabel(speculativePathBegin);
-
- SpeculationCheckVector noChecks;
- SpeculationCheckIndexIterator checkIterator(noChecks);
- NonSpeculativeJIT nonSpeculative(*this);
- nonSpeculative.compile(checkIterator);
- }
-
- // === Stage 3 - Function footer code generation ===
+ // Generate code to perform the stack overflow handling (if the stack check in
+ // the function header fails), and generate the entry point with arity check.
//
- // Generate code to lookup and jump to exception handlers, to perform the slow
- // register file check (if the fast one in the function header fails), and
- // generate the entry point with arity check.
-
- // Iterate over the m_calls vector, checking for exception checks,
- // and linking them to here.
- unsigned exceptionCheckCount = 0;
- for (unsigned i = 0; i < m_calls.size(); ++i) {
- Jump& exceptionCheck = m_calls[i].m_exceptionCheck;
- if (exceptionCheck.isSet()) {
- exceptionCheck.link(this);
- ++exceptionCheckCount;
- }
- }
- // If any exception checks were linked, generate code to lookup a handler.
- if (exceptionCheckCount) {
- // lookupExceptionHandler is passed two arguments, exec (the CallFrame*), and
- // an identifier for the operation that threw the exception, which we can use
- // to look up handler information. The identifier we use is the return address
- // of the call out from JIT code that threw the exception; this is still
- // available on the stack, just below the stack pointer!
- move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
- peek(GPRInfo::argumentGPR1, -1);
- m_calls.append(CallRecord(call(), lookupExceptionHandler));
- // lookupExceptionHandler leaves the handler CallFrame* in the returnValueGPR,
- // and the address of the handler in returnValueGPR2.
- jump(GPRInfo::returnValueGPR2);
- }
+ // Generate the stack overflow handling; if the stack check in the function head fails,
+ // we need to call out to a helper function to throw the StackOverflowError.
+ stackOverflow.link(this);
- // Generate the register file check; if the fast check in the function head fails,
- // we need to call out to a helper function to check whether more space is available.
- // FIXME: change this from a cti call to a DFG style operation (normal C calling conventions).
- registerFileCheck.link(this);
- move(stackPointerRegister, GPRInfo::argumentGPR0);
- poke(GPRInfo::callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof(void*));
- Call callRegisterFileCheck = call();
- jump(fromRegisterFileCheck);
+ emitStoreCodeOrigin(CodeOrigin(0));
+ if (maxFrameExtentForSlowPathCall)
+ addPtr(TrustedImm32(-maxFrameExtentForSlowPathCall), stackPointerRegister);
+
+ m_speculative->callOperationWithCallFrameRollbackOnException(operationThrowStackOverflowError, m_codeBlock);
+
// The fast entry point into a function does not check the correct number of arguments
// have been passed to the call (we only use the fast entry point where we can statically
// determine the correct number of arguments have been passed, or have already checked).
// In cases where an arity check is necessary, we enter here.
// FIXME: change this from a cti call to a DFG style operation (normal C calling conventions).
- Label arityCheck = label();
- preserveReturnAddressAfterCall(GPRInfo::regT2);
- emitPutToCallFrameHeader(GPRInfo::regT2, RegisterFile::ReturnPC);
- branch32(Equal, GPRInfo::regT1, Imm32(m_codeBlock->m_numParameters)).linkTo(fromArityCheck, this);
- move(stackPointerRegister, GPRInfo::argumentGPR0);
- poke(GPRInfo::callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof(void*));
- Call callArityCheck = call();
- move(GPRInfo::regT0, GPRInfo::callFrameRegister);
- jump(fromArityCheck);
-
-
- // === Stage 4 - Link ===
- //
- // Link the code, populate data in CodeBlock data structures.
-
- LinkBuffer linkBuffer(*m_globalData, this, m_globalData->executableAllocator);
-
-#if DFG_DEBUG_VERBOSE
- fprintf(stderr, "JIT code start at %p\n", linkBuffer.debugAddress());
-#endif
-
- // Link all calls out from the JIT code to their respective functions.
- for (unsigned i = 0; i < m_calls.size(); ++i)
- linkBuffer.link(m_calls[i].m_call, m_calls[i].m_function);
-
- if (m_codeBlock->needsCallReturnIndices()) {
- m_codeBlock->callReturnIndexVector().reserveCapacity(exceptionCheckCount);
- for (unsigned i = 0; i < m_calls.size(); ++i) {
- if (m_calls[i].m_exceptionCheck.isSet()) {
- unsigned returnAddressOffset = linkBuffer.returnAddressOffset(m_calls[i].m_call);
- unsigned exceptionInfo = m_calls[i].m_exceptionInfo;
- m_codeBlock->callReturnIndexVector().append(CallReturnOffsetToBytecodeOffset(returnAddressOffset, exceptionInfo));
- }
- }
- }
-
- // FIXME: switch the register file check & arity check over to DFGOpertaion style calls, not JIT stubs.
- linkBuffer.link(callRegisterFileCheck, cti_register_file_check);
- linkBuffer.link(callArityCheck, m_codeBlock->m_isConstructor ? cti_op_construct_arityCheck : cti_op_call_arityCheck);
-
- entryWithArityCheck = linkBuffer.locationOf(arityCheck);
- entry = linkBuffer.finalizeCode();
-}
-
-#if DFG_JIT_ASSERT
-void JITCompiler::jitAssertIsInt32(GPRReg gpr)
-{
-#if CPU(X86_64)
- Jump checkInt32 = branchPtr(BelowOrEqual, gpr, TrustedImmPtr(reinterpret_cast<void*>(static_cast<uintptr_t>(0xFFFFFFFFu))));
- breakpoint();
- checkInt32.link(this);
+ m_arityCheck = label();
+ compileEntry();
+
+ load32(AssemblyHelpers::payloadFor((VirtualRegister)JSStack::ArgumentCount), GPRInfo::regT1);
+ branch32(AboveOrEqual, GPRInfo::regT1, TrustedImm32(m_codeBlock->numParameters())).linkTo(fromArityCheck, this);
+ emitStoreCodeOrigin(CodeOrigin(0));
+ if (maxFrameExtentForSlowPathCall)
+ addPtr(TrustedImm32(-maxFrameExtentForSlowPathCall), stackPointerRegister);
+ m_speculative->callOperationWithCallFrameRollbackOnException(m_codeBlock->m_isConstructor ? operationConstructArityCheck : operationCallArityCheck, GPRInfo::regT0);
+ if (maxFrameExtentForSlowPathCall)
+ addPtr(TrustedImm32(maxFrameExtentForSlowPathCall), stackPointerRegister);
+ branchTest32(Zero, GPRInfo::regT0).linkTo(fromArityCheck, this);
+ emitStoreCodeOrigin(CodeOrigin(0));
+ GPRReg thunkReg;
+#if USE(JSVALUE64)
+ thunkReg = GPRInfo::regT7;
#else
- UNUSED_PARAM(gpr);
+ thunkReg = GPRInfo::regT5;
#endif
+ CodeLocationLabel* arityThunkLabels =
+ m_vm->arityCheckFailReturnThunks->returnPCsFor(*m_vm, m_codeBlock->numParameters());
+ move(TrustedImmPtr(arityThunkLabels), thunkReg);
+ loadPtr(BaseIndex(thunkReg, GPRInfo::regT0, timesPtr()), thunkReg);
+ m_callArityFixup = call();
+ jump(fromArityCheck);
+
+ // Generate slow path code.
+ m_speculative->runSlowPathGenerators();
+
+ compileExceptionHandlers();
+ linkOSRExits();
+
+ // Create OSR entry trampolines if necessary.
+ m_speculative->createOSREntries();
+ setEndOfCode();
+
+ // === Link ===
+ auto linkBuffer = std::make_unique<LinkBuffer>(*m_vm, *this, m_codeBlock, JITCompilationCanFail);
+ if (linkBuffer->didFailToAllocate()) {
+ m_graph.m_plan.finalizer = std::make_unique<FailedFinalizer>(m_graph.m_plan);
+ return;
+ }
+ link(*linkBuffer);
+ m_speculative->linkOSREntries(*linkBuffer);
+
+ m_jitCode->shrinkToFit();
+ codeBlock()->shrinkToFit(CodeBlock::LateShrink);
+
+ linkBuffer->link(m_callArityFixup, FunctionPtr((m_vm->getCTIStub(arityFixupGenerator)).code().executableAddress()));
+
+ disassemble(*linkBuffer);
+
+ MacroAssemblerCodePtr withArityCheck = linkBuffer->locationOf(m_arityCheck);
+
+ m_graph.m_plan.finalizer = std::make_unique<JITFinalizer>(
+ m_graph.m_plan, m_jitCode.release(), WTF::move(linkBuffer), withArityCheck);
}
-void JITCompiler::jitAssertIsJSInt32(GPRReg gpr)
-{
- Jump checkJSInt32 = branchPtr(AboveOrEqual, gpr, GPRInfo::tagTypeNumberRegister);
- breakpoint();
- checkJSInt32.link(this);
-}
-
-void JITCompiler::jitAssertIsJSNumber(GPRReg gpr)
-{
- Jump checkJSNumber = branchTestPtr(MacroAssembler::NonZero, gpr, GPRInfo::tagTypeNumberRegister);
- breakpoint();
- checkJSNumber.link(this);
-}
-
-void JITCompiler::jitAssertIsJSDouble(GPRReg gpr)
-{
- Jump checkJSInt32 = branchPtr(AboveOrEqual, gpr, GPRInfo::tagTypeNumberRegister);
- Jump checkJSNumber = branchTestPtr(MacroAssembler::NonZero, gpr, GPRInfo::tagTypeNumberRegister);
- checkJSInt32.link(this);
- breakpoint();
- checkJSNumber.link(this);
-}
-#endif
-
-#if ENABLE(SAMPLING_COUNTERS) && CPU(X86_64) // Or any other 64-bit platform!
-void JITCompiler::emitCount(AbstractSamplingCounter& counter, uint32_t increment)
+void JITCompiler::disassemble(LinkBuffer& linkBuffer)
{
- addPtr(TrustedImm32(increment), AbsoluteAddress(counter.addressOfCounter()));
+ if (shouldShowDisassembly()) {
+ m_disassembler->dump(linkBuffer);
+ linkBuffer.didAlreadyDisassemble();
+ }
+
+ if (m_graph.m_plan.compilation)
+ m_disassembler->reportToProfiler(m_graph.m_plan.compilation.get(), linkBuffer);
}
-#endif
-#if ENABLE(SAMPLING_COUNTERS) && CPU(X86) // Or any other little-endian 32-bit platform!
-void JITCompiler::emitCount(AbstractSamplingCounter& counter, uint32_t increment)
+#if USE(JSVALUE32_64)
+void* JITCompiler::addressOfDoubleConstant(Node* node)
{
- intptr_t hiWord = reinterpret_cast<intptr_t>(counter.addressOfCounter()) + sizeof(int32_t);
- add32(TrustedImm32(increment), AbsoluteAddress(counter.addressOfCounter()));
- addWithCarry32(TrustedImm32(0), AbsoluteAddress(reinterpret_cast<void*>(hiWord)));
+ double value = node->asNumber();
+ int64_t valueBits = bitwise_cast<int64_t>(value);
+ auto it = m_graph.m_doubleConstantsMap.find(valueBits);
+ if (it != m_graph.m_doubleConstantsMap.end())
+ return it->second;
+
+ if (!m_graph.m_doubleConstants)
+ m_graph.m_doubleConstants = std::make_unique<Bag<double>>();
+
+ double* addressInConstantPool = m_graph.m_doubleConstants->add();
+ *addressInConstantPool = value;
+ m_graph.m_doubleConstantsMap[valueBits] = addressInConstantPool;
+ return addressInConstantPool;
}
#endif
-#if ENABLE(SAMPLING_FLAGS)
-void JITCompiler::setSamplingFlag(int32_t flag)
+void JITCompiler::noticeOSREntry(BasicBlock& basicBlock, JITCompiler::Label blockHead, LinkBuffer& linkBuffer)
{
- ASSERT(flag >= 1);
- ASSERT(flag <= 32);
- or32(TrustedImm32(1u << (flag - 1)), AbsoluteAddress(SamplingFlags::addressOfFlags()));
-}
-
-void JITCompiler::clearSamplingFlag(int32_t flag)
-{
- ASSERT(flag >= 1);
- ASSERT(flag <= 32);
- and32(TrustedImm32(~(1u << (flag - 1))), AbsoluteAddress(SamplingFlags::addressOfFlags()));
+ // OSR entry is not allowed into blocks deemed unreachable by control flow analysis.
+ if (!basicBlock.intersectionOfCFAHasVisited)
+ return;
+
+ OSREntryData* entry = m_jitCode->appendOSREntryData(basicBlock.bytecodeBegin, linkBuffer.offsetOf(blockHead));
+
+ entry->m_expectedValues = basicBlock.intersectionOfPastValuesAtHead;
+
+ // Fix the expected values: in our protocol, a dead variable will have an expected
+ // value of (None, []). But the old JIT may stash some values there. So we really
+ // need (Top, TOP).
+ for (size_t argument = 0; argument < basicBlock.variablesAtHead.numberOfArguments(); ++argument) {
+ Node* node = basicBlock.variablesAtHead.argument(argument);
+ if (!node || !node->shouldGenerate())
+ entry->m_expectedValues.argument(argument).makeHeapTop();
+ }
+ for (size_t local = 0; local < basicBlock.variablesAtHead.numberOfLocals(); ++local) {
+ Node* node = basicBlock.variablesAtHead.local(local);
+ if (!node || !node->shouldGenerate())
+ entry->m_expectedValues.local(local).makeHeapTop();
+ else {
+ VariableAccessData* variable = node->variableAccessData();
+ entry->m_machineStackUsed.set(variable->machineLocal().toLocal());
+
+ switch (variable->flushFormat()) {
+ case FlushedDouble:
+ entry->m_localsForcedDouble.set(local);
+ break;
+ case FlushedInt52:
+ entry->m_localsForcedMachineInt.set(local);
+ break;
+ default:
+ break;
+ }
+
+ if (variable->local() != variable->machineLocal()) {
+ entry->m_reshufflings.append(
+ OSREntryReshuffling(
+ variable->local().offset(), variable->machineLocal().offset()));
+ }
+ }
+ }
+
+ entry->m_reshufflings.shrinkToFit();
}
-#endif
} } // namespace JSC::DFG
-#endif
+#endif // ENABLE(DFG_JIT)