/*
- * Copyright (C) 2008, 2009, 2010, 2011, 2012, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2008, 2009, 2010, 2011, 2012, 2013, 2014 Apple Inc. All rights reserved.
* Copyright (C) 2008 Cameron Zwarich <cwzwarich@uwaterloo.ca>
*
* Redistribution and use in source and binary forms, with or without
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
#include "ArrayProfile.h"
#include "ByValInfo.h"
#include "BytecodeConventions.h"
+#include "BytecodeLivenessAnalysis.h"
#include "CallLinkInfo.h"
#include "CallReturnOffsetToBytecodeOffset.h"
#include "CodeBlockHash.h"
+#include "CodeBlockSet.h"
+#include "ConcurrentJITLock.h"
#include "CodeOrigin.h"
#include "CodeType.h"
#include "CompactJITCodeMap.h"
-#include "DFGCodeBlocks.h"
#include "DFGCommon.h"
+#include "DFGCommonData.h"
#include "DFGExitProfile.h"
-#include "DFGMinifiedGraph.h"
-#include "DFGOSREntry.h"
-#include "DFGOSRExit.h"
-#include "DFGVariableEventStream.h"
+#include "DeferredCompilationCallback.h"
#include "EvalCodeCache.h"
#include "ExecutionCounter.h"
#include "ExpressionRangeInfo.h"
#include "HandlerInfo.h"
#include "ObjectAllocationProfile.h"
#include "Options.h"
+#include "PutPropertySlot.h"
#include "Instruction.h"
#include "JITCode.h"
#include "JITWriteBarrier.h"
#include "JSGlobalObject.h"
-#include "JumpReplacementWatchpoint.h"
#include "JumpTable.h"
#include "LLIntCallLinkInfo.h"
#include "LazyOperandValueProfile.h"
-#include "LineInfo.h"
#include "ProfilerCompilation.h"
+#include "ProfilerJettisonReason.h"
#include "RegExpObject.h"
-#include "ResolveOperation.h"
#include "StructureStubInfo.h"
#include "UnconditionalFinalizer.h"
#include "ValueProfile.h"
+#include "VirtualRegister.h"
#include "Watchpoint.h"
-#include <wtf/RefCountedArray.h>
-#include <wtf/FastAllocBase.h>
+#include <wtf/Bag.h>
+#include <wtf/FastMalloc.h>
#include <wtf/PassOwnPtr.h>
-#include <wtf/Platform.h>
+#include <wtf/RefCountedArray.h>
#include <wtf/RefPtr.h>
#include <wtf/SegmentedVector.h>
#include <wtf/Vector.h>
namespace JSC {
-class DFGCodeBlocks;
class ExecState;
class LLIntOffsetsExtractor;
class RepatchBuffer;
-inline int unmodifiedArgumentsRegister(int argumentsRegister) { return argumentsRegister - 1; }
+inline VirtualRegister unmodifiedArgumentsRegister(VirtualRegister argumentsRegister) { return VirtualRegister(argumentsRegister.offset() + 1); }
static ALWAYS_INLINE int missingThisObjectMarker() { return std::numeric_limits<int>::max(); }
-class CodeBlock : public UnconditionalFinalizer, public WeakReferenceHarvester {
+enum ReoptimizationMode { DontCountReoptimization, CountReoptimization };
+
+class CodeBlock : public ThreadSafeRefCounted<CodeBlock>, public UnconditionalFinalizer, public WeakReferenceHarvester {
WTF_MAKE_FAST_ALLOCATED;
+ friend class BytecodeLivenessAnalysis;
friend class JIT;
friend class LLIntOffsetsExtractor;
public:
protected:
CodeBlock(CopyParsedBlockTag, CodeBlock& other);
- CodeBlock(ScriptExecutable* ownerExecutable, UnlinkedCodeBlock*, JSGlobalObject*, unsigned baseScopeDepth, PassRefPtr<SourceProvider>, unsigned sourceOffset, unsigned firstLineColumnOffset, PassOwnPtr<CodeBlock> alternative);
+ CodeBlock(ScriptExecutable* ownerExecutable, UnlinkedCodeBlock*, JSScope*, PassRefPtr<SourceProvider>, unsigned sourceOffset, unsigned firstLineColumnOffset);
WriteBarrier<JSGlobalObject> m_globalObject;
Heap* m_heap;
public:
JS_EXPORT_PRIVATE virtual ~CodeBlock();
-
+
UnlinkedCodeBlock* unlinkedCodeBlock() const { return m_unlinkedCode.get(); }
-
- String inferredName() const;
+
+ CString inferredName() const;
CodeBlockHash hash() const;
- String sourceCodeForTools() const; // Not quite the actual source we parsed; this will do things like prefix the source for a function with a reified signature.
- String sourceCodeOnOneLine() const; // As sourceCodeForTools(), but replaces all whitespace runs with a single space.
+ bool hasHash() const;
+ bool isSafeToComputeHash() const;
+ CString hashAsStringIfPossible() const;
+ CString sourceCodeForTools() const; // Not quite the actual source we parsed; this will do things like prefix the source for a function with a reified signature.
+ CString sourceCodeOnOneLine() const; // As sourceCodeForTools(), but replaces all whitespace runs with a single space.
void dumpAssumingJITType(PrintStream&, JITCode::JITType) const;
void dump(PrintStream&) const;
-
+
int numParameters() const { return m_numParameters; }
void setNumParameters(int newValue);
-
+
int* addressOfNumParameters() { return &m_numParameters; }
static ptrdiff_t offsetOfNumParameters() { return OBJECT_OFFSETOF(CodeBlock, m_numParameters); }
CodeBlock* alternative() { return m_alternative.get(); }
- PassOwnPtr<CodeBlock> releaseAlternative() { return m_alternative.release(); }
- void setAlternative(PassOwnPtr<CodeBlock> alternative) { m_alternative = alternative; }
-
- CodeSpecializationKind specializationKind() const
+ PassRefPtr<CodeBlock> releaseAlternative() { return m_alternative.release(); }
+ void setAlternative(PassRefPtr<CodeBlock> alternative) { m_alternative = alternative; }
+
+ template <typename Functor> void forEachRelatedCodeBlock(Functor&& functor)
{
- return specializationFromIsConstruct(m_isConstructor);
- }
-
-#if ENABLE(JIT)
- CodeBlock* baselineVersion()
- {
- CodeBlock* result = replacement();
- if (!result)
- return 0; // This can happen if we're in the process of creating the baseline version.
- while (result->alternative())
- result = result->alternative();
- ASSERT(result);
- ASSERT(JITCode::isBaselineCode(result->getJITType()));
- return result;
+ Functor f(std::forward<Functor>(functor));
+ Vector<CodeBlock*, 4> codeBlocks;
+ codeBlocks.append(this);
+
+ while (!codeBlocks.isEmpty()) {
+ CodeBlock* currentCodeBlock = codeBlocks.takeLast();
+ f(currentCodeBlock);
+
+ if (CodeBlock* alternative = currentCodeBlock->alternative())
+ codeBlocks.append(alternative);
+ if (CodeBlock* osrEntryBlock = currentCodeBlock->specialOSREntryBlockOrNull())
+ codeBlocks.append(osrEntryBlock);
+ }
}
-#else
- CodeBlock* baselineVersion()
+
+ CodeSpecializationKind specializationKind() const
{
- return this;
+ return specializationFromIsConstruct(m_isConstructor);
}
-#endif
+
+ CodeBlock* baselineAlternative();
+
+ // FIXME: Get rid of this.
+ // https://bugs.webkit.org/show_bug.cgi?id=123677
+ CodeBlock* baselineVersion();
void visitAggregate(SlotVisitor&);
- static void dumpStatistics();
-
void dumpBytecode(PrintStream& = WTF::dataFile());
- void dumpBytecode(PrintStream&, unsigned bytecodeOffset);
+ void dumpBytecode(
+ PrintStream&, unsigned bytecodeOffset,
+ const StubInfoMap& = StubInfoMap(), const CallLinkInfoMap& = CallLinkInfoMap());
void printStructures(PrintStream&, const Instruction*);
void printStructure(PrintStream&, const char* name, const Instruction*, int operand);
bool isStrictMode() const { return m_isStrictMode; }
+ ECMAMode ecmaMode() const { return isStrictMode() ? StrictMode : NotStrictMode; }
inline bool isKnownNotImmediate(int index)
{
- if (index == m_thisRegister && !m_isStrictMode)
+ if (index == m_thisRegister.offset() && !m_isStrictMode)
return true;
if (isConstantRegisterIndex(index))
unsigned lineNumberForBytecodeOffset(unsigned bytecodeOffset);
unsigned columnNumberForBytecodeOffset(unsigned bytecodeOffset);
void expressionRangeForBytecodeOffset(unsigned bytecodeOffset, int& divot,
- int& startOffset, int& endOffset, unsigned& line, unsigned& column);
+ int& startOffset, int& endOffset, unsigned& line, unsigned& column);
+ void getStubInfoMap(const ConcurrentJITLocker&, StubInfoMap& result);
+ void getStubInfoMap(StubInfoMap& result);
+
+ void getCallLinkInfoMap(const ConcurrentJITLocker&, CallLinkInfoMap& result);
+ void getCallLinkInfoMap(CallLinkInfoMap& result);
+
#if ENABLE(JIT)
+ StructureStubInfo* addStubInfo();
+ Bag<StructureStubInfo>::iterator stubInfoBegin() { return m_stubInfos.begin(); }
+ Bag<StructureStubInfo>::iterator stubInfoEnd() { return m_stubInfos.end(); }
- StructureStubInfo& getStubInfo(ReturnAddressPtr returnAddress)
- {
- return *(binarySearch<StructureStubInfo, void*>(m_structureStubInfos, m_structureStubInfos.size(), returnAddress.value(), getStructureStubInfoReturnLocation));
- }
-
- StructureStubInfo& getStubInfo(unsigned bytecodeIndex)
- {
- return *(binarySearch<StructureStubInfo, unsigned>(m_structureStubInfos, m_structureStubInfos.size(), bytecodeIndex, getStructureStubInfoBytecodeIndex));
- }
-
void resetStub(StructureStubInfo&);
-
+
ByValInfo& getByValInfo(unsigned bytecodeIndex)
{
return *(binarySearch<ByValInfo, unsigned>(m_byValInfos, m_byValInfos.size(), bytecodeIndex, getByValInfoBytecodeIndex));
}
- CallLinkInfo& getCallLinkInfo(ReturnAddressPtr returnAddress)
- {
- return *(binarySearch<CallLinkInfo, void*>(m_callLinkInfos, m_callLinkInfos.size(), returnAddress.value(), getCallLinkInfoReturnLocation));
- }
-
- CallLinkInfo& getCallLinkInfo(unsigned bytecodeIndex)
- {
- ASSERT(JITCode::isBaselineCode(getJITType()));
- return *(binarySearch<CallLinkInfo, unsigned>(m_callLinkInfos, m_callLinkInfos.size(), bytecodeIndex, getCallLinkInfoBytecodeIndex));
- }
+ CallLinkInfo* addCallLinkInfo();
+ Bag<CallLinkInfo>::iterator callLinkInfosBegin() { return m_callLinkInfos.begin(); }
+ Bag<CallLinkInfo>::iterator callLinkInfosEnd() { return m_callLinkInfos.end(); }
+
+ // This is a slow function call used primarily for compiling OSR exits in the case
+ // that there had been inlining. Chances are if you want to use this, you're really
+ // looking for a CallLinkInfoMap to amortize the cost of calling this.
+ CallLinkInfo* getCallLinkInfoForBytecodeIndex(unsigned bytecodeIndex);
#endif // ENABLE(JIT)
-#if ENABLE(LLINT)
- Instruction* adjustPCIfAtCallSite(Instruction*);
-#endif
- unsigned bytecodeOffset(ExecState*, ReturnAddressPtr);
+ void unlinkIncomingCalls();
#if ENABLE(JIT)
- unsigned bytecodeOffsetForCallAtIndex(unsigned index)
- {
- if (!m_rareData)
- return 1;
- Vector<CallReturnOffsetToBytecodeOffset, 0, UnsafeVectorOverflow>& callIndices = m_rareData->m_callReturnIndexVector;
- if (!callIndices.size())
- return 1;
- // FIXME: Fix places in DFG that call out to C that don't set the CodeOrigin. https://bugs.webkit.org/show_bug.cgi?id=118315
- ASSERT(index < m_rareData->m_callReturnIndexVector.size());
- if (index >= m_rareData->m_callReturnIndexVector.size())
- return 1;
- return m_rareData->m_callReturnIndexVector[index].bytecodeOffset;
- }
-
void unlinkCalls();
- bool hasIncomingCalls() { return m_incomingCalls.begin() != m_incomingCalls.end(); }
-
- void linkIncomingCall(CallLinkInfo* incoming)
- {
- m_incomingCalls.push(incoming);
- }
+ void linkIncomingCall(ExecState* callerFrame, CallLinkInfo*);
bool isIncomingCallAlreadyLinked(CallLinkInfo* incoming)
{
}
#endif // ENABLE(JIT)
-#if ENABLE(LLINT)
- void linkIncomingCall(LLIntCallLinkInfo* incoming)
- {
- m_incomingLLIntCalls.push(incoming);
- }
-#endif // ENABLE(LLINT)
-
- void unlinkIncomingCalls();
+ void linkIncomingCall(ExecState* callerFrame, LLIntCallLinkInfo*);
-#if ENABLE(DFG_JIT) || ENABLE(LLINT)
void setJITCodeMap(PassOwnPtr<CompactJITCodeMap> jitCodeMap)
{
m_jitCodeMap = jitCodeMap;
{
return m_jitCodeMap.get();
}
-#endif
-
-#if ENABLE(DFG_JIT)
- void createDFGDataIfNecessary()
- {
- if (!!m_dfgData)
- return;
-
- m_dfgData = adoptPtr(new DFGData);
- }
-
- void saveCompilation(PassRefPtr<Profiler::Compilation> compilation)
- {
- createDFGDataIfNecessary();
- m_dfgData->compilation = compilation;
- }
-
- Profiler::Compilation* compilation()
- {
- if (!m_dfgData)
- return 0;
- return m_dfgData->compilation.get();
- }
-
- DFG::OSREntryData* appendDFGOSREntryData(unsigned bytecodeIndex, unsigned machineCodeOffset)
- {
- createDFGDataIfNecessary();
- DFG::OSREntryData entry;
- entry.m_bytecodeIndex = bytecodeIndex;
- entry.m_machineCodeOffset = machineCodeOffset;
- m_dfgData->osrEntry.append(entry);
- return &m_dfgData->osrEntry.last();
- }
- unsigned numberOfDFGOSREntries() const
- {
- if (!m_dfgData)
- return 0;
- return m_dfgData->osrEntry.size();
- }
- DFG::OSREntryData* dfgOSREntryData(unsigned i) { return &m_dfgData->osrEntry[i]; }
- DFG::OSREntryData* dfgOSREntryDataForBytecodeIndex(unsigned bytecodeIndex)
- {
- if (!m_dfgData)
- return 0;
- return tryBinarySearch<DFG::OSREntryData, unsigned>(
- m_dfgData->osrEntry, m_dfgData->osrEntry.size(), bytecodeIndex,
- DFG::getOSREntryDataBytecodeIndex);
- }
-
- unsigned appendOSRExit(const DFG::OSRExit& osrExit)
- {
- createDFGDataIfNecessary();
- unsigned result = m_dfgData->osrExit.size();
- m_dfgData->osrExit.append(osrExit);
- return result;
- }
-
- DFG::OSRExit& lastOSRExit()
- {
- return m_dfgData->osrExit.last();
- }
-
- unsigned appendSpeculationRecovery(const DFG::SpeculationRecovery& recovery)
- {
- createDFGDataIfNecessary();
- unsigned result = m_dfgData->speculationRecovery.size();
- m_dfgData->speculationRecovery.append(recovery);
- return result;
- }
-
- unsigned appendWatchpoint(const JumpReplacementWatchpoint& watchpoint)
- {
- createDFGDataIfNecessary();
- unsigned result = m_dfgData->watchpoints.size();
- m_dfgData->watchpoints.append(watchpoint);
- return result;
- }
-
- unsigned numberOfOSRExits()
- {
- if (!m_dfgData)
- return 0;
- return m_dfgData->osrExit.size();
- }
-
- unsigned numberOfSpeculationRecoveries()
- {
- if (!m_dfgData)
- return 0;
- return m_dfgData->speculationRecovery.size();
- }
-
- unsigned numberOfWatchpoints()
- {
- if (!m_dfgData)
- return 0;
- return m_dfgData->watchpoints.size();
- }
-
- DFG::OSRExit& osrExit(unsigned index)
- {
- return m_dfgData->osrExit[index];
- }
-
- DFG::SpeculationRecovery& speculationRecovery(unsigned index)
- {
- return m_dfgData->speculationRecovery[index];
- }
-
- JumpReplacementWatchpoint& watchpoint(unsigned index)
- {
- return m_dfgData->watchpoints[index];
- }
-
- void appendWeakReference(JSCell* target)
- {
- createDFGDataIfNecessary();
- m_dfgData->weakReferences.append(WriteBarrier<JSCell>(*vm(), ownerExecutable(), target));
- }
-
- void appendWeakReferenceTransition(JSCell* codeOrigin, JSCell* from, JSCell* to)
- {
- createDFGDataIfNecessary();
- m_dfgData->transitions.append(
- WeakReferenceTransition(*vm(), ownerExecutable(), codeOrigin, from, to));
- }
-
- DFG::MinifiedGraph& minifiedDFG()
- {
- createDFGDataIfNecessary();
- return m_dfgData->minifiedDFG;
- }
-
- DFG::VariableEventStream& variableEventStream()
- {
- createDFGDataIfNecessary();
- return m_dfgData->variableEventStream;
- }
-#endif
-
+
unsigned bytecodeOffset(Instruction* returnAddress)
{
RELEASE_ASSERT(returnAddress >= instructions().begin() && returnAddress < instructions().end());
unsigned numberOfInstructions() const { return m_instructions.size(); }
RefCountedArray<Instruction>& instructions() { return m_instructions; }
const RefCountedArray<Instruction>& instructions() const { return m_instructions; }
-
+
size_t predictedMachineCodeSize();
-
+
bool usesOpcode(OpcodeID);
- unsigned instructionCount() { return m_instructions.size(); }
+ unsigned instructionCount() const { return m_instructions.size(); }
int argumentIndexAfterCapture(size_t argument);
+
+ bool hasSlowArguments();
+ const SlowArgument* machineSlowArguments();
-#if ENABLE(JIT)
- void setJITCode(const JITCode& code, MacroAssemblerCodePtr codeWithArityCheck)
+ // Exactly equivalent to codeBlock->ownerExecutable()->installCode(codeBlock);
+ void install();
+
+ // Exactly equivalent to codeBlock->ownerExecutable()->newReplacementCodeBlockFor(codeBlock->specializationKind())
+ PassRefPtr<CodeBlock> newReplacement();
+
+ void setJITCode(PassRefPtr<JITCode> code)
{
+ ASSERT(m_heap->isDeferred());
+ m_heap->reportExtraMemoryCost(code->size());
+ ConcurrentJITLocker locker(m_lock);
+ WTF::storeStoreFence(); // This is probably not needed because the lock will also do something similar, but it's good to be paranoid.
m_jitCode = code;
- m_jitCodeWithArityCheck = codeWithArityCheck;
-#if ENABLE(DFG_JIT)
- if (m_jitCode.jitType() == JITCode::DFGJIT) {
- createDFGDataIfNecessary();
- m_vm->heap.m_dfgCodeBlocks.m_set.add(this);
- }
-#endif
- }
- JITCode& getJITCode() { return m_jitCode; }
- MacroAssemblerCodePtr getJITCodeWithArityCheck() { return m_jitCodeWithArityCheck; }
- JITCode::JITType getJITType() const { return m_jitCode.jitType(); }
- ExecutableMemoryHandle* executableMemory() { return getJITCode().getExecutableMemory(); }
- virtual JSObject* compileOptimized(ExecState*, JSScope*, unsigned bytecodeIndex) = 0;
- void jettison();
- enum JITCompilationResult { AlreadyCompiled, CouldNotCompile, CompiledSuccessfully };
- JITCompilationResult jitCompile(ExecState* exec)
- {
- if (getJITType() != JITCode::InterpreterThunk) {
- ASSERT(getJITType() == JITCode::BaselineJIT);
- return AlreadyCompiled;
- }
-#if ENABLE(JIT)
- if (jitCompileImpl(exec))
- return CompiledSuccessfully;
- return CouldNotCompile;
-#else
- UNUSED_PARAM(exec);
- return CouldNotCompile;
-#endif
}
- virtual CodeBlock* replacement() = 0;
-
- virtual DFG::CapabilityLevel canCompileWithDFGInternal() = 0;
- DFG::CapabilityLevel canCompileWithDFG()
+ PassRefPtr<JITCode> jitCode() { return m_jitCode; }
+ JITCode::JITType jitType() const
{
- DFG::CapabilityLevel result = canCompileWithDFGInternal();
- m_canCompileWithDFGState = result;
+ JITCode* jitCode = m_jitCode.get();
+ WTF::loadLoadFence();
+ JITCode::JITType result = JITCode::jitTypeFor(jitCode);
+ WTF::loadLoadFence(); // This probably isn't needed. Oh well, paranoia is good.
return result;
}
- DFG::CapabilityLevel canCompileWithDFGState() { return m_canCompileWithDFGState; }
- bool hasOptimizedReplacement()
+ bool hasBaselineJITProfiling() const
{
- ASSERT(JITCode::isBaselineCode(getJITType()));
- bool result = replacement()->getJITType() > getJITType();
-#if !ASSERT_DISABLED
- if (result)
- ASSERT(replacement()->getJITType() == JITCode::DFGJIT);
- else {
- ASSERT(JITCode::isBaselineCode(replacement()->getJITType()));
- ASSERT(replacement() == this);
- }
-#endif
- return result;
+ return jitType() == JITCode::BaselineJIT;
}
-#else
- JITCode::JITType getJITType() const { return JITCode::BaselineJIT; }
+
+#if ENABLE(JIT)
+ virtual CodeBlock* replacement() = 0;
+
+ virtual DFG::CapabilityLevel capabilityLevelInternal() = 0;
+ DFG::CapabilityLevel capabilityLevel();
+ DFG::CapabilityLevel capabilityLevelState() { return m_capabilityLevelState; }
+
+ bool hasOptimizedReplacement(JITCode::JITType typeToReplace);
+ bool hasOptimizedReplacement(); // the typeToReplace is my JITType
#endif
+ void jettison(Profiler::JettisonReason, ReoptimizationMode = DontCountReoptimization);
+
ScriptExecutable* ownerExecutable() const { return m_ownerExecutable.get(); }
void setVM(VM* vm) { m_vm = vm; }
VM* vm() { return m_vm; }
- void setThisRegister(int thisRegister) { m_thisRegister = thisRegister; }
- int thisRegister() const { return m_thisRegister; }
+ void setThisRegister(VirtualRegister thisRegister) { m_thisRegister = thisRegister; }
+ VirtualRegister thisRegister() const { return m_thisRegister; }
- bool needsFullScopeChain() const { return m_unlinkedCode->needsFullScopeChain(); }
bool usesEval() const { return m_unlinkedCode->usesEval(); }
-
- void setArgumentsRegister(int argumentsRegister)
+
+ void setArgumentsRegister(VirtualRegister argumentsRegister)
{
- ASSERT(argumentsRegister != -1);
+ ASSERT(argumentsRegister.isValid());
m_argumentsRegister = argumentsRegister;
ASSERT(usesArguments());
}
- int argumentsRegister() const
+ VirtualRegister argumentsRegister() const
{
ASSERT(usesArguments());
return m_argumentsRegister;
}
- int uncheckedArgumentsRegister()
+ VirtualRegister uncheckedArgumentsRegister()
{
if (!usesArguments())
- return InvalidVirtualRegister;
+ return VirtualRegister();
return argumentsRegister();
}
- void setActivationRegister(int activationRegister)
+ void setActivationRegister(VirtualRegister activationRegister)
{
m_activationRegister = activationRegister;
}
- int activationRegister() const
+
+ VirtualRegister activationRegister() const
{
- ASSERT(needsFullScopeChain());
+ ASSERT(m_activationRegister.isValid());
return m_activationRegister;
}
- int uncheckedActivationRegister()
+
+ VirtualRegister uncheckedActivationRegister()
{
- if (!needsFullScopeChain())
- return InvalidVirtualRegister;
- return activationRegister();
+ return m_activationRegister;
}
- bool usesArguments() const { return m_argumentsRegister != -1; }
-
+
+ bool usesArguments() const { return m_argumentsRegister.isValid(); }
+
bool needsActivation() const
{
- return needsFullScopeChain() && codeType() != GlobalCode;
+ ASSERT(m_activationRegister.isValid() == m_needsActivation);
+ return m_needsActivation;
}
-
- bool isCaptured(int operand, InlineCallFrame* inlineCallFrame = 0) const
+
+ unsigned captureCount() const
{
- if (operandIsArgument(operand))
- return operandToArgument(operand) && usesArguments();
-
- if (inlineCallFrame)
- return inlineCallFrame->capturedVars.get(operand);
-
- // The activation object isn't in the captured region, but it's "captured"
- // in the sense that stores to its location can be observed indirectly.
- if (needsActivation() && operand == activationRegister())
- return true;
-
- // Ditto for the arguments object.
- if (usesArguments() && operand == argumentsRegister())
- return true;
-
- // Ditto for the arguments object.
- if (usesArguments() && operand == unmodifiedArgumentsRegister(argumentsRegister()))
- return true;
-
- // We're in global code so there are no locals to capture
if (!symbolTable())
- return false;
-
- return operand >= symbolTable()->captureStart()
- && operand < symbolTable()->captureEnd();
+ return 0;
+ return symbolTable()->captureCount();
+ }
+
+ int captureStart() const
+ {
+ if (!symbolTable())
+ return 0;
+ return symbolTable()->captureStart();
+ }
+
+ int captureEnd() const
+ {
+ if (!symbolTable())
+ return 0;
+ return symbolTable()->captureEnd();
}
+ bool isCaptured(VirtualRegister operand, InlineCallFrame* = 0) const;
+
+ int framePointerOffsetToGetActivationRegisters(int machineCaptureStart);
+ int framePointerOffsetToGetActivationRegisters();
+
CodeType codeType() const { return m_unlinkedCode->codeType(); }
+ PutPropertySlot::Context putByIdContext() const
+ {
+ if (codeType() == EvalCode)
+ return PutPropertySlot::PutByIdEval;
+ return PutPropertySlot::PutById;
+ }
SourceProvider* source() const { return m_source.get(); }
unsigned sourceOffset() const { return m_sourceOffset; }
size_t numberOfJumpTargets() const { return m_unlinkedCode->numberOfJumpTargets(); }
unsigned jumpTarget(int index) const { return m_unlinkedCode->jumpTarget(index); }
- void createActivation(CallFrame*);
-
void clearEvalCache();
-
- String nameForRegister(int registerNumber);
+
+ String nameForRegister(VirtualRegister);
#if ENABLE(JIT)
- void setNumberOfStructureStubInfos(size_t size) { m_structureStubInfos.grow(size); }
- size_t numberOfStructureStubInfos() const { return m_structureStubInfos.size(); }
- StructureStubInfo& structureStubInfo(int index) { return m_structureStubInfos[index]; }
-
- void setNumberOfByValInfos(size_t size) { m_byValInfos.grow(size); }
+ void setNumberOfByValInfos(size_t size) { m_byValInfos.resizeToFit(size); }
size_t numberOfByValInfos() const { return m_byValInfos.size(); }
ByValInfo& byValInfo(size_t index) { return m_byValInfos[index]; }
-
- void setNumberOfCallLinkInfos(size_t size) { m_callLinkInfos.grow(size); }
- size_t numberOfCallLinkInfos() const { return m_callLinkInfos.size(); }
- CallLinkInfo& callLinkInfo(int index) { return m_callLinkInfos[index]; }
#endif
-
-#if ENABLE(VALUE_PROFILER)
+
unsigned numberOfArgumentValueProfiles()
{
ASSERT(m_numParameters >= 0);
ASSERT(result->m_bytecodeOffset != -1);
ASSERT(instructions()[bytecodeOffset + opcodeLength(
m_vm->interpreter->getOpcodeID(
- instructions()[
- bytecodeOffset].u.opcode)) - 1].u.profile == result);
+ instructions()[bytecodeOffset].u.opcode)) - 1].u.profile == result);
return result;
}
- SpeculatedType valueProfilePredictionForBytecodeOffset(int bytecodeOffset)
+ SpeculatedType valueProfilePredictionForBytecodeOffset(const ConcurrentJITLocker& locker, int bytecodeOffset)
{
- return valueProfileForBytecodeOffset(bytecodeOffset)->computeUpdatedPrediction();
+ return valueProfileForBytecodeOffset(bytecodeOffset)->computeUpdatedPrediction(locker);
}
-
+
unsigned totalNumberOfValueProfiles()
{
return numberOfArgumentValueProfiles() + numberOfValueProfiles();
return valueProfileForArgument(index);
return valueProfile(index - numberOfArgumentValueProfiles());
}
-
+
RareCaseProfile* addRareCaseProfile(int bytecodeOffset)
{
m_rareCaseProfiles.append(RareCaseProfile(bytecodeOffset));
}
unsigned numberOfRareCaseProfiles() { return m_rareCaseProfiles.size(); }
RareCaseProfile* rareCaseProfile(int index) { return &m_rareCaseProfiles[index]; }
- RareCaseProfile* rareCaseProfileForBytecodeOffset(int bytecodeOffset)
- {
- return tryBinarySearch<RareCaseProfile, int>(
- m_rareCaseProfiles, m_rareCaseProfiles.size(), bytecodeOffset,
- getRareCaseProfileBytecodeOffset);
- }
-
+ RareCaseProfile* rareCaseProfileForBytecodeOffset(int bytecodeOffset);
+
bool likelyToTakeSlowCase(int bytecodeOffset)
{
- if (!numberOfRareCaseProfiles())
+ if (!hasBaselineJITProfiling())
return false;
unsigned value = rareCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
return value >= Options::likelyToTakeSlowCaseMinimumCount();
}
-
+
bool couldTakeSlowCase(int bytecodeOffset)
{
- if (!numberOfRareCaseProfiles())
+ if (!hasBaselineJITProfiling())
return false;
unsigned value = rareCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
return value >= Options::couldTakeSlowCaseMinimumCount();
}
-
+
RareCaseProfile* addSpecialFastCaseProfile(int bytecodeOffset)
{
m_specialFastCaseProfiles.append(RareCaseProfile(bytecodeOffset));
m_specialFastCaseProfiles, m_specialFastCaseProfiles.size(), bytecodeOffset,
getRareCaseProfileBytecodeOffset);
}
-
+
bool likelyToTakeSpecialFastCase(int bytecodeOffset)
{
- if (!numberOfRareCaseProfiles())
+ if (!hasBaselineJITProfiling())
return false;
unsigned specialFastCaseCount = specialFastCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
return specialFastCaseCount >= Options::likelyToTakeSlowCaseMinimumCount();
}
-
+
bool couldTakeSpecialFastCase(int bytecodeOffset)
{
- if (!numberOfRareCaseProfiles())
+ if (!hasBaselineJITProfiling())
return false;
unsigned specialFastCaseCount = specialFastCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
return specialFastCaseCount >= Options::couldTakeSlowCaseMinimumCount();
}
-
+
bool likelyToTakeDeepestSlowCase(int bytecodeOffset)
{
- if (!numberOfRareCaseProfiles())
+ if (!hasBaselineJITProfiling())
return false;
unsigned slowCaseCount = rareCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
unsigned specialFastCaseCount = specialFastCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
unsigned value = slowCaseCount - specialFastCaseCount;
return value >= Options::likelyToTakeSlowCaseMinimumCount();
}
-
+
bool likelyToTakeAnySlowCase(int bytecodeOffset)
{
- if (!numberOfRareCaseProfiles())
+ if (!hasBaselineJITProfiling())
return false;
unsigned slowCaseCount = rareCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
unsigned specialFastCaseCount = specialFastCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
unsigned value = slowCaseCount + specialFastCaseCount;
return value >= Options::likelyToTakeSlowCaseMinimumCount();
}
-
+
unsigned numberOfArrayProfiles() const { return m_arrayProfiles.size(); }
const ArrayProfileVector& arrayProfiles() { return m_arrayProfiles; }
ArrayProfile* addArrayProfile(unsigned bytecodeOffset)
}
ArrayProfile* getArrayProfile(unsigned bytecodeOffset);
ArrayProfile* getOrAddArrayProfile(unsigned bytecodeOffset);
-#endif
// Exception handling support
size_t numberOfExceptionHandlers() const { return m_rareData ? m_rareData->m_exceptionHandlers.size() : 0; }
- void allocateHandlers(const Vector<UnlinkedHandlerInfo>& unlinkedHandlers)
- {
- size_t count = unlinkedHandlers.size();
- if (!count)
- return;
- createRareDataIfNecessary();
- m_rareData->m_exceptionHandlers.resize(count);
- for (size_t i = 0; i < count; ++i) {
- m_rareData->m_exceptionHandlers[i].start = unlinkedHandlers[i].start;
- m_rareData->m_exceptionHandlers[i].end = unlinkedHandlers[i].end;
- m_rareData->m_exceptionHandlers[i].target = unlinkedHandlers[i].target;
- m_rareData->m_exceptionHandlers[i].scopeDepth = unlinkedHandlers[i].scopeDepth;
- }
-
- }
HandlerInfo& exceptionHandler(int index) { RELEASE_ASSERT(m_rareData); return m_rareData->m_exceptionHandlers[index]; }
bool hasExpressionInfo() { return m_unlinkedCode->hasExpressionInfo(); }
-#if ENABLE(JIT)
- Vector<CallReturnOffsetToBytecodeOffset, 0, UnsafeVectorOverflow>& callReturnIndexVector()
- {
- createRareDataIfNecessary();
- return m_rareData->m_callReturnIndexVector;
- }
-#endif
-
#if ENABLE(DFG_JIT)
- SegmentedVector<InlineCallFrame, 4>& inlineCallFrames()
+ Vector<CodeOrigin, 0, UnsafeVectorOverflow>& codeOrigins()
{
- createRareDataIfNecessary();
- return m_rareData->m_inlineCallFrames;
+ return m_jitCode->dfgCommon()->codeOrigins;
}
-
- Vector<CodeOriginAtCallReturnOffset, 0, UnsafeVectorOverflow>& codeOrigins()
- {
- createRareDataIfNecessary();
- return m_rareData->m_codeOrigins;
- }
-
+
// Having code origins implies that there has been some inlining.
bool hasCodeOrigins()
{
- return m_rareData && !!m_rareData->m_codeOrigins.size();
+ return JITCode::isOptimizingJIT(jitType());
}
- bool codeOriginForReturn(ReturnAddressPtr, CodeOrigin&);
-
bool canGetCodeOrigin(unsigned index)
{
- if (!m_rareData)
+ if (!hasCodeOrigins())
return false;
- return m_rareData->m_codeOrigins.size() > index;
+ return index < codeOrigins().size();
}
-
+
CodeOrigin codeOrigin(unsigned index)
{
- RELEASE_ASSERT(m_rareData);
- return m_rareData->m_codeOrigins[index].codeOrigin;
+ return codeOrigins()[index];
}
-
+
bool addFrequentExitSite(const DFG::FrequentExitSite& site)
{
- ASSERT(JITCode::isBaselineCode(getJITType()));
- return m_exitProfile.add(site);
+ ASSERT(JITCode::isBaselineCode(jitType()));
+ ConcurrentJITLocker locker(m_lock);
+ return m_exitProfile.add(locker, site);
+ }
+
+ bool hasExitSite(const ConcurrentJITLocker& locker, const DFG::FrequentExitSite& site) const
+ {
+ return m_exitProfile.hasExitSite(locker, site);
+ }
+ bool hasExitSite(const DFG::FrequentExitSite& site) const
+ {
+ ConcurrentJITLocker locker(m_lock);
+ return hasExitSite(locker, site);
}
-
- bool hasExitSite(const DFG::FrequentExitSite& site) const { return m_exitProfile.hasExitSite(site); }
DFG::ExitProfile& exitProfile() { return m_exitProfile; }
-
+
CompressedLazyOperandValueProfileHolder& lazyOperandValueProfiles()
{
return m_lazyOperandValueProfiles;
}
-#endif
+#endif // ENABLE(DFG_JIT)
// Constant Pool
+#if ENABLE(DFG_JIT)
+ size_t numberOfIdentifiers() const { return m_unlinkedCode->numberOfIdentifiers() + numberOfDFGIdentifiers(); }
+ size_t numberOfDFGIdentifiers() const
+ {
+ if (!JITCode::isOptimizingJIT(jitType()))
+ return 0;
- size_t numberOfIdentifiers() const { return m_identifiers.size(); }
- void addIdentifier(const Identifier& i) { return m_identifiers.append(i); }
- Identifier& identifier(int index) { return m_identifiers[index]; }
+ return m_jitCode->dfgCommon()->dfgIdentifiers.size();
+ }
+ const Identifier& identifier(int index) const
+ {
+ size_t unlinkedIdentifiers = m_unlinkedCode->numberOfIdentifiers();
+ if (static_cast<unsigned>(index) < unlinkedIdentifiers)
+ return m_unlinkedCode->identifier(index);
+ ASSERT(JITCode::isOptimizingJIT(jitType()));
+ return m_jitCode->dfgCommon()->dfgIdentifiers[index - unlinkedIdentifiers];
+ }
+#else
+ size_t numberOfIdentifiers() const { return m_unlinkedCode->numberOfIdentifiers(); }
+ const Identifier& identifier(int index) const { return m_unlinkedCode->identifier(index); }
+#endif
+
+ Vector<WriteBarrier<Unknown>>& constants() { return m_constantRegisters; }
size_t numberOfConstantRegisters() const { return m_constantRegisters.size(); }
unsigned addConstant(JSValue v)
{
return result;
}
+ unsigned addConstantLazily()
+ {
+ unsigned result = m_constantRegisters.size();
+ m_constantRegisters.append(WriteBarrier<Unknown>());
+ return result;
+ }
+ bool findConstant(JSValue, unsigned& result);
unsigned addOrFindConstant(JSValue);
WriteBarrier<Unknown>& constantRegister(int index) { return m_constantRegisters[index - FirstConstantRegisterIndex]; }
ALWAYS_INLINE bool isConstantRegisterIndex(int index) const { return index >= FirstConstantRegisterIndex; }
return constantBufferAsVector(index).data();
}
+ Heap* heap() const { return m_heap; }
JSGlobalObject* globalObject() { return m_globalObject.get(); }
-
+
JSGlobalObject* globalObjectFor(CodeOrigin);
- // Jump Tables
+ BytecodeLivenessAnalysis& livenessAnalysis()
+ {
+ {
+ ConcurrentJITLocker locker(m_lock);
+ if (!!m_livenessAnalysis)
+ return *m_livenessAnalysis;
+ }
+ std::unique_ptr<BytecodeLivenessAnalysis> analysis =
+ std::make_unique<BytecodeLivenessAnalysis>(this);
+ {
+ ConcurrentJITLocker locker(m_lock);
+ if (!m_livenessAnalysis)
+ m_livenessAnalysis = WTF::move(analysis);
+ return *m_livenessAnalysis;
+ }
+ }
+
+ void validate();
- size_t numberOfImmediateSwitchJumpTables() const { return m_rareData ? m_rareData->m_immediateSwitchJumpTables.size() : 0; }
- SimpleJumpTable& addImmediateSwitchJumpTable() { createRareDataIfNecessary(); m_rareData->m_immediateSwitchJumpTables.append(SimpleJumpTable()); return m_rareData->m_immediateSwitchJumpTables.last(); }
- SimpleJumpTable& immediateSwitchJumpTable(int tableIndex) { RELEASE_ASSERT(m_rareData); return m_rareData->m_immediateSwitchJumpTables[tableIndex]; }
+ // Jump Tables
- size_t numberOfCharacterSwitchJumpTables() const { return m_rareData ? m_rareData->m_characterSwitchJumpTables.size() : 0; }
- SimpleJumpTable& addCharacterSwitchJumpTable() { createRareDataIfNecessary(); m_rareData->m_characterSwitchJumpTables.append(SimpleJumpTable()); return m_rareData->m_characterSwitchJumpTables.last(); }
- SimpleJumpTable& characterSwitchJumpTable(int tableIndex) { RELEASE_ASSERT(m_rareData); return m_rareData->m_characterSwitchJumpTables[tableIndex]; }
+ size_t numberOfSwitchJumpTables() const { return m_rareData ? m_rareData->m_switchJumpTables.size() : 0; }
+ SimpleJumpTable& addSwitchJumpTable() { createRareDataIfNecessary(); m_rareData->m_switchJumpTables.append(SimpleJumpTable()); return m_rareData->m_switchJumpTables.last(); }
+ SimpleJumpTable& switchJumpTable(int tableIndex) { RELEASE_ASSERT(m_rareData); return m_rareData->m_switchJumpTables[tableIndex]; }
+ void clearSwitchJumpTables()
+ {
+ if (!m_rareData)
+ return;
+ m_rareData->m_switchJumpTables.clear();
+ }
size_t numberOfStringSwitchJumpTables() const { return m_rareData ? m_rareData->m_stringSwitchJumpTables.size() : 0; }
StringJumpTable& addStringSwitchJumpTable() { createRareDataIfNecessary(); m_rareData->m_stringSwitchJumpTables.append(StringJumpTable()); return m_rareData->m_stringSwitchJumpTables.last(); }
StringJumpTable& stringSwitchJumpTable(int tableIndex) { RELEASE_ASSERT(m_rareData); return m_rareData->m_stringSwitchJumpTables[tableIndex]; }
- SharedSymbolTable* symbolTable() const { return m_unlinkedCode->symbolTable(); }
+ SymbolTable* symbolTable() const { return m_symbolTable.get(); }
EvalCodeCache& evalCodeCache() { createRareDataIfNecessary(); return m_rareData->m_evalCodeCache; }
enum ShrinkMode {
// Shrink prior to generating machine code that may point directly into vectors.
EarlyShrink,
-
+
// Shrink after generating machine code, and after possibly creating new vectors
// and appending to others. At this time it is not safe to shrink certain vectors
// because we would have generated machine code that references them directly.
LateShrink
};
void shrinkToFit(ShrinkMode);
-
- void copyPostParseDataFrom(CodeBlock* alternative);
- void copyPostParseDataFromAlternative();
-
+
// Functions for controlling when JITting kicks in, in a mixed mode
// execution world.
-
+
bool checkIfJITThresholdReached()
{
return m_llintExecuteCounter.checkIfThresholdCrossedAndSet(this);
}
-
+
void dontJITAnytimeSoon()
{
m_llintExecuteCounter.deferIndefinitely();
}
-
+
void jitAfterWarmUp()
{
m_llintExecuteCounter.setNewThreshold(Options::thresholdForJITAfterWarmUp(), this);
}
-
+
void jitSoon()
{
m_llintExecuteCounter.setNewThreshold(Options::thresholdForJITSoon(), this);
}
-
- const ExecutionCounter& llintExecuteCounter() const
+
+ const BaselineExecutionCounter& llintExecuteCounter() const
{
return m_llintExecuteCounter;
}
-
+
// Functions for controlling when tiered compilation kicks in. This
// controls both when the optimizing compiler is invoked and when OSR
// entry happens. Two triggers exist: the loop trigger and the return
// case of the loop trigger, if the optimized compilation succeeds
// (or has already succeeded in the past) then OSR is attempted to
// redirect program flow into the optimized code.
-
+
// These functions are called from within the optimization triggers,
// and are used as a single point at which we define the heuristics
// for how much warm-up is mandated before the next optimization
// trigger files. All CodeBlocks start out with optimizeAfterWarmUp(),
// as this is called from the CodeBlock constructor.
-
+
// When we observe a lot of speculation failures, we trigger a
// reoptimization. But each time, we increase the optimization trigger
// to avoid thrashing.
- unsigned reoptimizationRetryCounter() const;
+ JS_EXPORT_PRIVATE unsigned reoptimizationRetryCounter() const;
void countReoptimization();
+#if ENABLE(JIT)
+ unsigned numberOfDFGCompiles();
int32_t codeTypeThresholdMultiplier() const;
-
- int32_t counterValueForOptimizeAfterWarmUp();
- int32_t counterValueForOptimizeAfterLongWarmUp();
- int32_t counterValueForOptimizeSoon();
-
+
+ int32_t adjustedCounterValue(int32_t desiredThreshold);
+
int32_t* addressOfJITExecuteCounter()
{
return &m_jitExecuteCounter.m_counter;
}
-
- static ptrdiff_t offsetOfJITExecuteCounter() { return OBJECT_OFFSETOF(CodeBlock, m_jitExecuteCounter) + OBJECT_OFFSETOF(ExecutionCounter, m_counter); }
- static ptrdiff_t offsetOfJITExecutionActiveThreshold() { return OBJECT_OFFSETOF(CodeBlock, m_jitExecuteCounter) + OBJECT_OFFSETOF(ExecutionCounter, m_activeThreshold); }
- static ptrdiff_t offsetOfJITExecutionTotalCount() { return OBJECT_OFFSETOF(CodeBlock, m_jitExecuteCounter) + OBJECT_OFFSETOF(ExecutionCounter, m_totalCount); }
- const ExecutionCounter& jitExecuteCounter() const { return m_jitExecuteCounter; }
-
+ static ptrdiff_t offsetOfJITExecuteCounter() { return OBJECT_OFFSETOF(CodeBlock, m_jitExecuteCounter) + OBJECT_OFFSETOF(BaselineExecutionCounter, m_counter); }
+ static ptrdiff_t offsetOfJITExecutionActiveThreshold() { return OBJECT_OFFSETOF(CodeBlock, m_jitExecuteCounter) + OBJECT_OFFSETOF(BaselineExecutionCounter, m_activeThreshold); }
+ static ptrdiff_t offsetOfJITExecutionTotalCount() { return OBJECT_OFFSETOF(CodeBlock, m_jitExecuteCounter) + OBJECT_OFFSETOF(BaselineExecutionCounter, m_totalCount); }
+
+ const BaselineExecutionCounter& jitExecuteCounter() const { return m_jitExecuteCounter; }
+
unsigned optimizationDelayCounter() const { return m_optimizationDelayCounter; }
-
+
// Check if the optimization threshold has been reached, and if not,
// adjust the heuristics accordingly. Returns true if the threshold has
// been reached.
bool checkIfOptimizationThresholdReached();
-
+
// Call this to force the next optimization trigger to fire. This is
// rarely wise, since optimization triggers are typically more
// expensive than executing baseline code.
void optimizeNextInvocation();
-
+
// Call this to prevent optimization from happening again. Note that
// optimization will still happen after roughly 2^29 invocations,
// so this is really meant to delay that as much as possible. This
// is called if optimization failed, and we expect it to fail in
// the future as well.
void dontOptimizeAnytimeSoon();
-
+
// Call this to reinitialize the counter to its starting state,
// forcing a warm-up to happen before the next optimization trigger
// fires. This is called in the CodeBlock constructor. It also
// OSR exit code is code generated, so the value of the execute
// counter that this corresponds to is also available directly.
void optimizeAfterWarmUp();
-
+
// Call this to force an optimization trigger to fire only after
// a lot of warm-up.
void optimizeAfterLongWarmUp();
-
+
// Call this to cause an optimization trigger to fire soon, but
// not necessarily the next one. This makes sense if optimization
// succeeds. Successfuly optimization means that all calls are
// to trigger optimization if one of those functions becomes hot
// in the baseline code.
void optimizeSoon();
-
+
+ void forceOptimizationSlowPathConcurrently();
+
+ void setOptimizationThresholdBasedOnCompilationResult(CompilationResult);
+
uint32_t osrExitCounter() const { return m_osrExitCounter; }
-
+
void countOSRExit() { m_osrExitCounter++; }
-
+
uint32_t* addressOfOSRExitCounter() { return &m_osrExitCounter; }
-
+
static ptrdiff_t offsetOfOSRExitCounter() { return OBJECT_OFFSETOF(CodeBlock, m_osrExitCounter); }
-#if ENABLE(JIT)
uint32_t adjustedExitCountThreshold(uint32_t desiredThreshold);
uint32_t exitCountThresholdForReoptimization();
uint32_t exitCountThresholdForReoptimizationFromLoop();
bool shouldReoptimizeNow();
bool shouldReoptimizeFromLoopNow();
+#else // No JIT
+ void optimizeAfterWarmUp() { }
+ unsigned numberOfDFGCompiles() { return 0; }
#endif
-#if ENABLE(VALUE_PROFILER)
bool shouldOptimizeNow();
- void updateAllValueProfilePredictions(OperationInProgress = NoOperation);
- void updateAllArrayPredictions(OperationInProgress = NoOperation);
- void updateAllPredictions(OperationInProgress = NoOperation);
-#else
- bool shouldOptimizeNow() { return false; }
- void updateAllValueProfilePredictions(OperationInProgress = NoOperation) { }
- void updateAllArrayPredictions(OperationInProgress = NoOperation) { }
- void updateAllPredictions(OperationInProgress = NoOperation) { }
-#endif
-
-#if ENABLE(JIT)
- void reoptimize();
-#endif
+ void updateAllValueProfilePredictions();
+ void updateAllArrayPredictions();
+ void updateAllPredictions();
-#if ENABLE(VERBOSE_VALUE_PROFILE)
- void dumpValueProfiles();
-#endif
-
+ unsigned frameRegisterCount();
+ int stackPointerOffset();
+
+ bool hasOpDebugForLineAndColumn(unsigned line, unsigned column);
+
+ bool hasDebuggerRequests() const { return m_debuggerRequests; }
+ void* debuggerRequestsAddress() { return &m_debuggerRequests; }
+
+ void addBreakpoint(unsigned numBreakpoints);
+ void removeBreakpoint(unsigned numBreakpoints)
+ {
+ ASSERT(m_numBreakpoints >= numBreakpoints);
+ m_numBreakpoints -= numBreakpoints;
+ }
+
+ enum SteppingMode {
+ SteppingModeDisabled,
+ SteppingModeEnabled
+ };
+ void setSteppingMode(SteppingMode);
+
+ void clearDebuggerRequests()
+ {
+ m_steppingMode = SteppingModeDisabled;
+ m_numBreakpoints = 0;
+ }
+
// FIXME: Make these remaining members private.
int m_numCalleeRegisters;
int m_numVars;
- bool m_isConstructor;
+ bool m_isConstructor : 1;
+
+ // This is intentionally public; it's the responsibility of anyone doing any
+ // of the following to hold the lock:
+ //
+ // - Modifying any inline cache in this code block.
+ //
+ // - Quering any inline cache in this code block, from a thread other than
+ // the main thread.
+ //
+ // Additionally, it's only legal to modify the inline cache on the main
+ // thread. This means that the main thread can query the inline cache without
+ // locking. This is crucial since executing the inline cache is effectively
+ // "querying" it.
+ //
+ // Another exception to the rules is that the GC can do whatever it wants
+ // without holding any locks, because the GC is guaranteed to wait until any
+ // concurrent compilation threads finish what they're doing.
+ mutable ConcurrentJITLock m_lock;
+
+ bool m_shouldAlwaysBeInlined; // Not a bitfield because the JIT wants to store to it.
+ bool m_allTransitionsHaveBeenMarked : 1; // Initialized and used on every GC.
+
+ bool m_didFailFTLCompilation : 1;
+ bool m_hasBeenCompiledWithFTL : 1;
+
+ // Internal methods for use by validation code. It would be private if it wasn't
+ // for the fact that we use it from anonymous namespaces.
+ void beginValidationDidFail();
+ NO_RETURN_DUE_TO_CRASH void endValidationDidFail();
+
+ bool isKnownToBeLiveDuringGC(); // Will only return valid results when called during GC. Assumes that you've already established that the owner executable is live.
protected:
-#if ENABLE(JIT)
- virtual bool jitCompileImpl(ExecState*) = 0;
- virtual void jettisonImpl() = 0;
-#endif
- virtual void visitWeakReferences(SlotVisitor&);
- virtual void finalizeUnconditionally();
+ virtual void visitWeakReferences(SlotVisitor&) override;
+ virtual void finalizeUnconditionally() override;
#if ENABLE(DFG_JIT)
void tallyFrequentExitSites();
#endif
private:
- friend class DFGCodeBlocks;
-
+ friend class CodeBlockSet;
+
+ CodeBlock* specialOSREntryBlockOrNull();
+
+ void noticeIncomingCall(ExecState* callerFrame);
+
double optimizationThresholdScalingFactor();
#if ENABLE(JIT)
ClosureCallStubRoutine* findClosureCallForReturnPC(ReturnAddressPtr);
#endif
-#if ENABLE(VALUE_PROFILER)
- void updateAllPredictionsAndCountLiveness(OperationInProgress, unsigned& numberOfLiveNonArgumentValueProfiles, unsigned& numberOfSamplesInProfiles);
-#endif
-
- void setIdentifiers(const Vector<Identifier>& identifiers)
- {
- RELEASE_ASSERT(m_identifiers.isEmpty());
- m_identifiers.appendVector(identifiers);
- }
+ void updateAllPredictionsAndCountLiveness(unsigned& numberOfLiveNonArgumentValueProfiles, unsigned& numberOfSamplesInProfiles);
- void setConstantRegisters(const Vector<WriteBarrier<Unknown> >& constants)
+ void setConstantRegisters(const Vector<WriteBarrier<Unknown>>& constants)
{
size_t count = constants.size();
m_constantRegisters.resize(count);
m_constantRegisters[i].set(*m_vm, ownerExecutable(), constants[i].get());
}
- void dumpBytecode(PrintStream&, ExecState*, const Instruction* begin, const Instruction*&);
+ void dumpBytecode(
+ PrintStream&, ExecState*, const Instruction* begin, const Instruction*&,
+ const StubInfoMap& = StubInfoMap(), const CallLinkInfoMap& = CallLinkInfoMap());
- CString registerName(ExecState*, int r) const;
+ CString registerName(int r) const;
void printUnaryOp(PrintStream&, ExecState*, int location, const Instruction*&, const char* op);
void printBinaryOp(PrintStream&, ExecState*, int location, const Instruction*&, const char* op);
void printConditionalJump(PrintStream&, ExecState*, const Instruction*, const Instruction*&, int location, const char* op);
void printGetByIdOp(PrintStream&, ExecState*, int location, const Instruction*&);
- void printGetByIdCacheStatus(PrintStream&, ExecState*, int location);
+ void printGetByIdCacheStatus(PrintStream&, ExecState*, int location, const StubInfoMap&);
enum CacheDumpMode { DumpCaches, DontDumpCaches };
- void printCallOp(PrintStream&, ExecState*, int location, const Instruction*&, const char* op, CacheDumpMode);
+ void printCallOp(PrintStream&, ExecState*, int location, const Instruction*&, const char* op, CacheDumpMode, bool& hasPrintedProfiling, const CallLinkInfoMap&);
void printPutByIdOp(PrintStream&, ExecState*, int location, const Instruction*&, const char* op);
+ void printLocationAndOp(PrintStream&, ExecState*, int location, const Instruction*&, const char* op);
+ void printLocationOpAndRegisterOperand(PrintStream&, ExecState*, int location, const Instruction*& it, const char* op, int operand);
+
void beginDumpProfiling(PrintStream&, bool& hasPrintedProfiling);
void dumpValueProfiling(PrintStream&, const Instruction*&, bool& hasPrintedProfiling);
void dumpArrayProfiling(PrintStream&, const Instruction*&, bool& hasPrintedProfiling);
-#if ENABLE(VALUE_PROFILER)
void dumpRareCaseProfile(PrintStream&, const char* name, RareCaseProfile*, bool& hasPrintedProfiling);
-#endif
-
- void visitStructures(SlotVisitor&, Instruction* vPC);
-#if ENABLE(DFG_JIT)
- bool shouldImmediatelyAssumeLivenessDuringScan()
- {
- // Null m_dfgData means that this is a baseline JIT CodeBlock. Baseline JIT
- // CodeBlocks don't need to be jettisoned when their weak references go
- // stale. So if a basline JIT CodeBlock gets scanned, we can assume that
- // this means that it's live.
- if (!m_dfgData)
- return true;
-
- // For simplicity, we don't attempt to jettison code blocks during GC if
- // they are executing. Instead we strongly mark their weak references to
- // allow them to continue to execute soundly.
- if (m_dfgData->mayBeExecuting)
- return true;
-
- if (Options::forceDFGCodeBlockLiveness())
- return true;
-
- return false;
- }
-#else
- bool shouldImmediatelyAssumeLivenessDuringScan() { return true; }
-#endif
-
- void performTracingFixpointIteration(SlotVisitor&);
+ bool shouldImmediatelyAssumeLivenessDuringScan();
+
+ void propagateTransitions(SlotVisitor&);
+ void determineLiveness(SlotVisitor&);
void stronglyVisitStrongReferences(SlotVisitor&);
void stronglyVisitWeakReferences(SlotVisitor&);
if (!m_rareData)
m_rareData = adoptPtr(new RareData);
}
-
+
#if ENABLE(JIT)
void resetStubInternal(RepatchBuffer&, StructureStubInfo&);
void resetStubDuringGCInternal(RepatchBuffer&, StructureStubInfo&);
#endif
WriteBarrier<UnlinkedCodeBlock> m_unlinkedCode;
int m_numParameters;
+ union {
+ unsigned m_debuggerRequests;
+ struct {
+ unsigned m_hasDebuggerStatement : 1;
+ unsigned m_steppingMode : 1;
+ unsigned m_numBreakpoints : 30;
+ };
+ };
WriteBarrier<ScriptExecutable> m_ownerExecutable;
VM* m_vm;
RefCountedArray<Instruction> m_instructions;
- int m_thisRegister;
- int m_argumentsRegister;
- int m_activationRegister;
+ WriteBarrier<SymbolTable> m_symbolTable;
+ VirtualRegister m_thisRegister;
+ VirtualRegister m_argumentsRegister;
+ VirtualRegister m_activationRegister;
bool m_isStrictMode;
bool m_needsActivation;
+ bool m_mayBeExecuting;
+ uint8_t m_visitAggregateHasBeenCalled;
RefPtr<SourceProvider> m_source;
unsigned m_sourceOffset;
unsigned m_firstLineColumnOffset;
unsigned m_codeType;
-#if ENABLE(LLINT)
- SegmentedVector<LLIntCallLinkInfo, 8> m_llintCallLinkInfos;
- SentinelLinkedList<LLIntCallLinkInfo, BasicRawSentinelNode<LLIntCallLinkInfo> > m_incomingLLIntCalls;
-#endif
+ Vector<LLIntCallLinkInfo> m_llintCallLinkInfos;
+ SentinelLinkedList<LLIntCallLinkInfo, BasicRawSentinelNode<LLIntCallLinkInfo>> m_incomingLLIntCalls;
+ RefPtr<JITCode> m_jitCode;
#if ENABLE(JIT)
- Vector<StructureStubInfo> m_structureStubInfos;
+ Bag<StructureStubInfo> m_stubInfos;
Vector<ByValInfo> m_byValInfos;
- Vector<CallLinkInfo> m_callLinkInfos;
- JITCode m_jitCode;
- MacroAssemblerCodePtr m_jitCodeWithArityCheck;
- SentinelLinkedList<CallLinkInfo, BasicRawSentinelNode<CallLinkInfo> > m_incomingCalls;
+ Bag<CallLinkInfo> m_callLinkInfos;
+ SentinelLinkedList<CallLinkInfo, BasicRawSentinelNode<CallLinkInfo>> m_incomingCalls;
#endif
-#if ENABLE(DFG_JIT) || ENABLE(LLINT)
OwnPtr<CompactJITCodeMap> m_jitCodeMap;
-#endif
#if ENABLE(DFG_JIT)
- struct WeakReferenceTransition {
- WeakReferenceTransition() { }
-
- WeakReferenceTransition(VM& vm, JSCell* owner, JSCell* codeOrigin, JSCell* from, JSCell* to)
- : m_from(vm, owner, from)
- , m_to(vm, owner, to)
- {
- if (!!codeOrigin)
- m_codeOrigin.set(vm, owner, codeOrigin);
- }
-
- WriteBarrier<JSCell> m_codeOrigin;
- WriteBarrier<JSCell> m_from;
- WriteBarrier<JSCell> m_to;
- };
-
- struct DFGData {
- DFGData()
- : mayBeExecuting(false)
- , isJettisoned(false)
- {
- }
-
- Vector<DFG::OSREntryData> osrEntry;
- SegmentedVector<DFG::OSRExit, 8> osrExit;
- Vector<DFG::SpeculationRecovery> speculationRecovery;
- SegmentedVector<JumpReplacementWatchpoint, 1, 0> watchpoints;
- Vector<WeakReferenceTransition> transitions;
- Vector<WriteBarrier<JSCell> > weakReferences;
- DFG::VariableEventStream variableEventStream;
- DFG::MinifiedGraph minifiedDFG;
- RefPtr<Profiler::Compilation> compilation;
- bool mayBeExecuting;
- bool isJettisoned;
- bool livenessHasBeenProved; // Initialized and used on every GC.
- bool allTransitionsHaveBeenMarked; // Initialized and used on every GC.
- unsigned visitAggregateHasBeenCalled; // Unsigned to make it work seamlessly with the broadest set of CAS implementations.
- };
-
- OwnPtr<DFGData> m_dfgData;
-
// This is relevant to non-DFG code blocks that serve as the profiled code block
// for DFG code blocks.
DFG::ExitProfile m_exitProfile;
CompressedLazyOperandValueProfileHolder m_lazyOperandValueProfiles;
#endif
-#if ENABLE(VALUE_PROFILER)
Vector<ValueProfile> m_argumentValueProfiles;
- SegmentedVector<ValueProfile, 8> m_valueProfiles;
+ Vector<ValueProfile> m_valueProfiles;
SegmentedVector<RareCaseProfile, 8> m_rareCaseProfiles;
SegmentedVector<RareCaseProfile, 8> m_specialFastCaseProfiles;
- SegmentedVector<ArrayAllocationProfile, 8> m_arrayAllocationProfiles;
+ Vector<ArrayAllocationProfile> m_arrayAllocationProfiles;
ArrayProfileVector m_arrayProfiles;
-#endif
- SegmentedVector<ObjectAllocationProfile, 8> m_objectAllocationProfiles;
+ Vector<ObjectAllocationProfile> m_objectAllocationProfiles;
// Constant Pool
- Vector<Identifier> m_identifiers;
COMPILE_ASSERT(sizeof(Register) == sizeof(WriteBarrier<Unknown>), Register_must_be_same_size_as_WriteBarrier_Unknown);
// TODO: This could just be a pointer to m_unlinkedCodeBlock's data, but the DFG mutates
// it, so we're stuck with it for now.
- Vector<WriteBarrier<Unknown> > m_constantRegisters;
- Vector<WriteBarrier<FunctionExecutable> > m_functionDecls;
- Vector<WriteBarrier<FunctionExecutable> > m_functionExprs;
+ Vector<WriteBarrier<Unknown>> m_constantRegisters;
+ Vector<WriteBarrier<FunctionExecutable>> m_functionDecls;
+ Vector<WriteBarrier<FunctionExecutable>> m_functionExprs;
- OwnPtr<CodeBlock> m_alternative;
-
- ExecutionCounter m_llintExecuteCounter;
-
- ExecutionCounter m_jitExecuteCounter;
+ RefPtr<CodeBlock> m_alternative;
+
+ BaselineExecutionCounter m_llintExecuteCounter;
+
+ BaselineExecutionCounter m_jitExecuteCounter;
int32_t m_totalJITExecutions;
uint32_t m_osrExitCounter;
uint16_t m_optimizationDelayCounter;
uint16_t m_reoptimizationRetryCounter;
+
+ mutable CodeBlockHash m_hash;
- Vector<ResolveOperations> m_resolveOperations;
- Vector<PutToBaseOperation, 1> m_putToBaseOperations;
+ std::unique_ptr<BytecodeLivenessAnalysis> m_livenessAnalysis;
struct RareData {
WTF_MAKE_FAST_ALLOCATED;
Vector<HandlerInfo> m_exceptionHandlers;
// Buffers used for large array literals
- Vector<Vector<JSValue> > m_constantBuffers;
-
+ Vector<Vector<JSValue>> m_constantBuffers;
+
// Jump Tables
- Vector<SimpleJumpTable> m_immediateSwitchJumpTables;
- Vector<SimpleJumpTable> m_characterSwitchJumpTables;
+ Vector<SimpleJumpTable> m_switchJumpTables;
Vector<StringJumpTable> m_stringSwitchJumpTables;
EvalCodeCache m_evalCodeCache;
-
-#if ENABLE(JIT)
- Vector<CallReturnOffsetToBytecodeOffset, 0, UnsafeVectorOverflow> m_callReturnIndexVector;
-#endif
-#if ENABLE(DFG_JIT)
- SegmentedVector<InlineCallFrame, 4> m_inlineCallFrames;
- Vector<CodeOriginAtCallReturnOffset, 0, UnsafeVectorOverflow> m_codeOrigins;
-#endif
};
#if COMPILER(MSVC)
friend void WTF::deleteOwnedPtr<RareData>(RareData*);
#endif
OwnPtr<RareData> m_rareData;
#if ENABLE(JIT)
- DFG::CapabilityLevel m_canCompileWithDFGState;
+ DFG::CapabilityLevel m_capabilityLevelState;
#endif
};
class GlobalCodeBlock : public CodeBlock {
protected:
GlobalCodeBlock(CopyParsedBlockTag, GlobalCodeBlock& other)
- : CodeBlock(CopyParsedBlock, other)
+ : CodeBlock(CopyParsedBlock, other)
{
}
- GlobalCodeBlock(ScriptExecutable* ownerExecutable, UnlinkedCodeBlock* unlinkedCodeBlock, JSGlobalObject* globalObject, unsigned baseScopeDepth, PassRefPtr<SourceProvider> sourceProvider, unsigned sourceOffset, unsigned firstLineColumnOffset, PassOwnPtr<CodeBlock> alternative)
- : CodeBlock(ownerExecutable, unlinkedCodeBlock, globalObject, baseScopeDepth, sourceProvider, sourceOffset, firstLineColumnOffset, alternative)
+ GlobalCodeBlock(ScriptExecutable* ownerExecutable, UnlinkedCodeBlock* unlinkedCodeBlock, JSScope* scope, PassRefPtr<SourceProvider> sourceProvider, unsigned sourceOffset, unsigned firstLineColumnOffset)
+ : CodeBlock(ownerExecutable, unlinkedCodeBlock, scope, sourceProvider, sourceOffset, firstLineColumnOffset)
{
}
};
class ProgramCodeBlock : public GlobalCodeBlock {
public:
ProgramCodeBlock(CopyParsedBlockTag, ProgramCodeBlock& other)
- : GlobalCodeBlock(CopyParsedBlock, other)
+ : GlobalCodeBlock(CopyParsedBlock, other)
{
}
- ProgramCodeBlock(ProgramExecutable* ownerExecutable, UnlinkedProgramCodeBlock* unlinkedCodeBlock, JSGlobalObject* globalObject, PassRefPtr<SourceProvider> sourceProvider, unsigned firstLineColumnOffset, PassOwnPtr<CodeBlock> alternative)
- : GlobalCodeBlock(ownerExecutable, unlinkedCodeBlock, globalObject, 0, sourceProvider, 0, firstLineColumnOffset, alternative)
+ ProgramCodeBlock(ProgramExecutable* ownerExecutable, UnlinkedProgramCodeBlock* unlinkedCodeBlock, JSScope* scope, PassRefPtr<SourceProvider> sourceProvider, unsigned firstLineColumnOffset)
+ : GlobalCodeBlock(ownerExecutable, unlinkedCodeBlock, scope, sourceProvider, 0, firstLineColumnOffset)
{
}
#if ENABLE(JIT)
protected:
- virtual JSObject* compileOptimized(ExecState*, JSScope*, unsigned bytecodeIndex);
- virtual void jettisonImpl();
- virtual bool jitCompileImpl(ExecState*);
- virtual CodeBlock* replacement();
- virtual DFG::CapabilityLevel canCompileWithDFGInternal();
+ virtual CodeBlock* replacement() override;
+ virtual DFG::CapabilityLevel capabilityLevelInternal() override;
#endif
};
class EvalCodeBlock : public GlobalCodeBlock {
public:
EvalCodeBlock(CopyParsedBlockTag, EvalCodeBlock& other)
- : GlobalCodeBlock(CopyParsedBlock, other)
+ : GlobalCodeBlock(CopyParsedBlock, other)
{
}
- EvalCodeBlock(EvalExecutable* ownerExecutable, UnlinkedEvalCodeBlock* unlinkedCodeBlock, JSGlobalObject* globalObject, PassRefPtr<SourceProvider> sourceProvider, int baseScopeDepth, PassOwnPtr<CodeBlock> alternative)
- : GlobalCodeBlock(ownerExecutable, unlinkedCodeBlock, globalObject, baseScopeDepth, sourceProvider, 0, 1, alternative)
+ EvalCodeBlock(EvalExecutable* ownerExecutable, UnlinkedEvalCodeBlock* unlinkedCodeBlock, JSScope* scope, PassRefPtr<SourceProvider> sourceProvider)
+ : GlobalCodeBlock(ownerExecutable, unlinkedCodeBlock, scope, sourceProvider, 0, 1)
{
}
-
+
const Identifier& variable(unsigned index) { return unlinkedEvalCodeBlock()->variable(index); }
unsigned numVariables() { return unlinkedEvalCodeBlock()->numVariables(); }
-
+
#if ENABLE(JIT)
protected:
- virtual JSObject* compileOptimized(ExecState*, JSScope*, unsigned bytecodeIndex);
- virtual void jettisonImpl();
- virtual bool jitCompileImpl(ExecState*);
- virtual CodeBlock* replacement();
- virtual DFG::CapabilityLevel canCompileWithDFGInternal();
+ virtual CodeBlock* replacement() override;
+ virtual DFG::CapabilityLevel capabilityLevelInternal() override;
#endif
-
+
private:
UnlinkedEvalCodeBlock* unlinkedEvalCodeBlock() const { return jsCast<UnlinkedEvalCodeBlock*>(unlinkedCodeBlock()); }
};
class FunctionCodeBlock : public CodeBlock {
public:
FunctionCodeBlock(CopyParsedBlockTag, FunctionCodeBlock& other)
- : CodeBlock(CopyParsedBlock, other)
+ : CodeBlock(CopyParsedBlock, other)
{
}
- FunctionCodeBlock(FunctionExecutable* ownerExecutable, UnlinkedFunctionCodeBlock* unlinkedCodeBlock, JSGlobalObject* globalObject, PassRefPtr<SourceProvider> sourceProvider, unsigned sourceOffset, unsigned firstLineColumnOffset, PassOwnPtr<CodeBlock> alternative = nullptr)
- : CodeBlock(ownerExecutable, unlinkedCodeBlock, globalObject, 0, sourceProvider, sourceOffset, firstLineColumnOffset, alternative)
+ FunctionCodeBlock(FunctionExecutable* ownerExecutable, UnlinkedFunctionCodeBlock* unlinkedCodeBlock, JSScope* scope, PassRefPtr<SourceProvider> sourceProvider, unsigned sourceOffset, unsigned firstLineColumnOffset)
+ : CodeBlock(ownerExecutable, unlinkedCodeBlock, scope, sourceProvider, sourceOffset, firstLineColumnOffset)
{
}
-
+
#if ENABLE(JIT)
protected:
- virtual JSObject* compileOptimized(ExecState*, JSScope*, unsigned bytecodeIndex);
- virtual void jettisonImpl();
- virtual bool jitCompileImpl(ExecState*);
- virtual CodeBlock* replacement();
- virtual DFG::CapabilityLevel canCompileWithDFGInternal();
+ virtual CodeBlock* replacement() override;
+ virtual DFG::CapabilityLevel capabilityLevelInternal() override;
#endif
};
{
RELEASE_ASSERT(inlineCallFrame);
ExecutableBase* executable = inlineCallFrame->executable.get();
- RELEASE_ASSERT(executable->structure()->classInfo() == &FunctionExecutable::s_info);
+ RELEASE_ASSERT(executable->structure()->classInfo() == FunctionExecutable::info());
return static_cast<FunctionExecutable*>(executable)->baselineCodeBlockFor(inlineCallFrame->isCall ? CodeForCall : CodeForConstruct);
}
-
+
inline CodeBlock* baselineCodeBlockForOriginAndBaselineCodeBlock(const CodeOrigin& codeOrigin, CodeBlock* baselineCodeBlock)
{
if (codeOrigin.inlineCallFrame)
{
if (argument >= static_cast<size_t>(symbolTable()->parameterCount()))
return CallFrame::argumentOffset(argument);
-
+
const SlowArgument* slowArguments = symbolTable()->slowArguments();
if (!slowArguments || slowArguments[argument].status == SlowArgument::Normal)
return CallFrame::argumentOffset(argument);
-
+
ASSERT(slowArguments[argument].status == SlowArgument::Captured);
return slowArguments[argument].index;
}
+inline bool CodeBlock::hasSlowArguments()
+{
+ return !!symbolTable()->slowArguments();
+}
+
inline Register& ExecState::r(int index)
{
CodeBlock* codeBlock = this->codeBlock();
return this[index];
}
-#if ENABLE(DFG_JIT)
-inline bool ExecState::isInlineCallFrame()
-{
- if (LIKELY(!codeBlock() || codeBlock()->getJITType() != JITCode::DFGJIT))
- return false;
- return isInlineCallFrameSlow();
-}
-#endif
-
inline JSValue ExecState::argumentAfterCapture(size_t argument)
{
if (argument >= argumentCount())
return jsUndefined();
-
+
if (!codeBlock())
return this[argumentOffset(argument)].jsValue();
-
+
return this[codeBlock()->argumentIndexAfterCapture(argument)].jsValue();
}
-#if ENABLE(DFG_JIT)
-inline void DFGCodeBlocks::mark(void* candidateCodeBlock)
+inline void CodeBlockSet::mark(void* candidateCodeBlock)
{
// We have to check for 0 and -1 because those are used by the HashMap as markers.
uintptr_t value = reinterpret_cast<uintptr_t>(candidateCodeBlock);
-
+
// This checks for both of those nasty cases in one go.
// 0 + 1 = 1
// -1 + 1 = 0
if (value + 1 <= 1)
return;
-
- HashSet<CodeBlock*>::iterator iter = m_set.find(static_cast<CodeBlock*>(candidateCodeBlock));
- if (iter == m_set.end())
+
+ CodeBlock* codeBlock = static_cast<CodeBlock*>(candidateCodeBlock);
+ if (!m_oldCodeBlocks.contains(codeBlock) && !m_newCodeBlocks.contains(codeBlock))
return;
-
- (*iter)->m_dfgData->mayBeExecuting = true;
+
+ mark(codeBlock);
}
-#endif
+
+inline void CodeBlockSet::mark(CodeBlock* codeBlock)
+{
+ if (!codeBlock)
+ return;
+ if (codeBlock->m_mayBeExecuting)
+ return;
+
+ codeBlock->m_mayBeExecuting = true;
+ // We might not have cleared the marks for this CodeBlock, but we need to visit it.
+ codeBlock->m_visitAggregateHasBeenCalled = false;
+#if ENABLE(GGC)
+ m_currentlyExecuting.append(codeBlock);
+#endif
+}
+
+template <typename Functor> inline void ScriptExecutable::forEachCodeBlock(Functor&& functor)
+{
+ switch (type()) {
+ case ProgramExecutableType: {
+ if (CodeBlock* codeBlock = jsCast<ProgramExecutable*>(this)->m_programCodeBlock.get())
+ codeBlock->forEachRelatedCodeBlock(std::forward<Functor>(functor));
+ break;
+ }
+
+ case EvalExecutableType: {
+ if (CodeBlock* codeBlock = jsCast<EvalExecutable*>(this)->m_evalCodeBlock.get())
+ codeBlock->forEachRelatedCodeBlock(std::forward<Functor>(functor));
+ break;
+ }
+
+ case FunctionExecutableType: {
+ Functor f(std::forward<Functor>(functor));
+ FunctionExecutable* executable = jsCast<FunctionExecutable*>(this);
+ if (CodeBlock* codeBlock = executable->m_codeBlockForCall.get())
+ codeBlock->forEachRelatedCodeBlock(f);
+ if (CodeBlock* codeBlock = executable->m_codeBlockForConstruct.get())
+ codeBlock->forEachRelatedCodeBlock(f);
+ break;
+ }
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ }
+}
+
} // namespace JSC
#endif // CodeBlock_h