/*
- * Copyright (C) 2008, 2009 Apple Inc. All rights reserved.
+ * Copyright (C) 2008, 2009, 2010 Apple Inc. All rights reserved.
* Copyright (C) 2008 Cameron Zwarich <cwzwarich@uwaterloo.ca>
*
* Redistribution and use in source and binary forms, with or without
#ifndef CodeBlock_h
#define CodeBlock_h
+#include "BytecodeConventions.h"
+#include "CallLinkInfo.h"
+#include "CallReturnOffsetToBytecodeOffset.h"
+#include "CodeOrigin.h"
+#include "CodeType.h"
+#include "CompactJITCodeMap.h"
+#include "DFGCodeBlocks.h"
+#include "DFGExitProfile.h"
+#include "DFGOSREntry.h"
+#include "DFGOSRExit.h"
#include "EvalCodeCache.h"
+#include "ExecutionCounter.h"
+#include "ExpressionRangeInfo.h"
+#include "GlobalResolveInfo.h"
+#include "HandlerInfo.h"
+#include "MethodCallLinkInfo.h"
+#include "Options.h"
#include "Instruction.h"
#include "JITCode.h"
+#include "JITWriteBarrier.h"
#include "JSGlobalObject.h"
#include "JumpTable.h"
+#include "LLIntCallLinkInfo.h"
+#include "LazyOperandValueProfile.h"
+#include "LineInfo.h"
#include "Nodes.h"
-#include "RegExp.h"
+#include "RegExpObject.h"
+#include "StructureStubInfo.h"
#include "UString.h"
+#include "UnconditionalFinalizer.h"
+#include "ValueProfile.h"
+#include <wtf/RefCountedArray.h>
#include <wtf/FastAllocBase.h>
+#include <wtf/PassOwnPtr.h>
#include <wtf/RefPtr.h>
+#include <wtf/SegmentedVector.h>
#include <wtf/Vector.h>
-
-#if ENABLE(JIT)
#include "StructureStubInfo.h"
-#endif
-
-// Register numbers used in bytecode operations have different meaning accoring to their ranges:
-// 0x80000000-0xFFFFFFFF Negative indicies from the CallFrame pointer are entries in the call frame, see RegisterFile.h.
-// 0x00000000-0x3FFFFFFF Forwards indices from the CallFrame pointer are local vars and temporaries with the function's callframe.
-// 0x40000000-0x7FFFFFFF Positive indices from 0x40000000 specify entries in the constant pool on the CodeBlock.
-static const int FirstConstantRegisterIndex = 0x40000000;
namespace JSC {
+ class DFGCodeBlocks;
class ExecState;
+ class LLIntOffsetsExtractor;
- enum CodeType { GlobalCode, EvalCode, FunctionCode, NativeCode };
+ inline int unmodifiedArgumentsRegister(int argumentsRegister) { return argumentsRegister - 1; }
static ALWAYS_INLINE int missingThisObjectMarker() { return std::numeric_limits<int>::max(); }
- struct HandlerInfo {
- uint32_t start;
- uint32_t end;
- uint32_t target;
- uint32_t scopeDepth;
-#if ENABLE(JIT)
- CodeLocationLabel nativeCode;
-#endif
- };
-
- struct ExpressionRangeInfo {
- enum {
- MaxOffset = (1 << 7) - 1,
- MaxDivot = (1 << 25) - 1
- };
- uint32_t instructionOffset : 25;
- uint32_t divotPoint : 25;
- uint32_t startOffset : 7;
- uint32_t endOffset : 7;
- };
-
- struct LineInfo {
- uint32_t instructionOffset;
- int32_t lineNumber;
- };
-
- // Both op_construct and op_instanceof require a use of op_get_by_id to get
- // the prototype property from an object. The exception messages for exceptions
- // thrown by these instances op_get_by_id need to reflect this.
- struct GetByIdExceptionInfo {
- unsigned bytecodeOffset : 31;
- bool isOpConstruct : 1;
- };
-
-#if ENABLE(JIT)
- struct CallLinkInfo {
- CallLinkInfo()
- : callee(0)
- {
- }
-
- unsigned bytecodeIndex;
- CodeLocationNearCall callReturnLocation;
- CodeLocationDataLabelPtr hotPathBegin;
- CodeLocationNearCall hotPathOther;
- CodeBlock* ownerCodeBlock;
- CodeBlock* callee;
- unsigned position;
-
- void setUnlinked() { callee = 0; }
- bool isLinked() { return callee; }
- };
-
- struct MethodCallLinkInfo {
- MethodCallLinkInfo()
- : cachedStructure(0)
- , cachedPrototypeStructure(0)
- {
- }
-
- CodeLocationCall callReturnLocation;
- CodeLocationDataLabelPtr structureLabel;
- Structure* cachedStructure;
- Structure* cachedPrototypeStructure;
- };
+ class CodeBlock : public UnconditionalFinalizer, public WeakReferenceHarvester {
+ WTF_MAKE_FAST_ALLOCATED;
+ friend class JIT;
+ friend class LLIntOffsetsExtractor;
+ public:
+ enum CopyParsedBlockTag { CopyParsedBlock };
+ protected:
+ CodeBlock(CopyParsedBlockTag, CodeBlock& other, SymbolTable*);
+
+ CodeBlock(ScriptExecutable* ownerExecutable, CodeType, JSGlobalObject*, PassRefPtr<SourceProvider>, unsigned sourceOffset, SymbolTable*, bool isConstructor, PassOwnPtr<CodeBlock> alternative);
- struct FunctionRegisterInfo {
- FunctionRegisterInfo(unsigned bytecodeOffset, int functionRegisterIndex)
- : bytecodeOffset(bytecodeOffset)
- , functionRegisterIndex(functionRegisterIndex)
- {
- }
+ WriteBarrier<JSGlobalObject> m_globalObject;
+ Heap* m_heap;
- unsigned bytecodeOffset;
- int functionRegisterIndex;
- };
+ public:
+ JS_EXPORT_PRIVATE virtual ~CodeBlock();
+
+ int numParameters() const { return m_numParameters; }
+ void setNumParameters(int newValue);
+ void addParameter();
+
+ int* addressOfNumParameters() { return &m_numParameters; }
+ static ptrdiff_t offsetOfNumParameters() { return OBJECT_OFFSETOF(CodeBlock, m_numParameters); }
- struct GlobalResolveInfo {
- GlobalResolveInfo(unsigned bytecodeOffset)
- : structure(0)
- , offset(0)
- , bytecodeOffset(bytecodeOffset)
+ CodeBlock* alternative() { return m_alternative.get(); }
+ PassOwnPtr<CodeBlock> releaseAlternative() { return m_alternative.release(); }
+ void setAlternative(PassOwnPtr<CodeBlock> alternative) { m_alternative = alternative; }
+
+ CodeSpecializationKind specializationKind()
{
+ if (m_isConstructor)
+ return CodeForConstruct;
+ return CodeForCall;
}
-
- Structure* structure;
- unsigned offset;
- unsigned bytecodeOffset;
- };
-
- // This structure is used to map from a call return location
- // (given as an offset in bytes into the JIT code) back to
- // the bytecode index of the corresponding bytecode operation.
- // This is then used to look up the corresponding handler.
- struct CallReturnOffsetToBytecodeIndex {
- CallReturnOffsetToBytecodeIndex(unsigned callReturnOffset, unsigned bytecodeIndex)
- : callReturnOffset(callReturnOffset)
- , bytecodeIndex(bytecodeIndex)
+
+#if ENABLE(JIT)
+ CodeBlock* baselineVersion()
{
+ CodeBlock* result = replacement();
+ if (!result)
+ return 0; // This can happen if we're in the process of creating the baseline version.
+ while (result->alternative())
+ result = result->alternative();
+ ASSERT(result);
+ ASSERT(JITCode::isBaselineCode(result->getJITType()));
+ return result;
}
-
- unsigned callReturnOffset;
- unsigned bytecodeIndex;
- };
-
- // valueAtPosition helpers for the binaryChop algorithm below.
-
- inline void* getStructureStubInfoReturnLocation(StructureStubInfo* structureStubInfo)
- {
- return structureStubInfo->callReturnLocation.executableAddress();
- }
-
- inline void* getCallLinkInfoReturnLocation(CallLinkInfo* callLinkInfo)
- {
- return callLinkInfo->callReturnLocation.executableAddress();
- }
-
- inline void* getMethodCallLinkInfoReturnLocation(MethodCallLinkInfo* methodCallLinkInfo)
- {
- return methodCallLinkInfo->callReturnLocation.executableAddress();
- }
-
- inline unsigned getCallReturnOffset(CallReturnOffsetToBytecodeIndex* pc)
- {
- return pc->callReturnOffset;
- }
-
- // Binary chop algorithm, calls valueAtPosition on pre-sorted elements in array,
- // compares result with key (KeyTypes should be comparable with '--', '<', '>').
- // Optimized for cases where the array contains the key, checked by assertions.
- template<typename ArrayType, typename KeyType, KeyType(*valueAtPosition)(ArrayType*)>
- inline ArrayType* binaryChop(ArrayType* array, size_t size, KeyType key)
- {
- // The array must contain at least one element (pre-condition, array does conatin key).
- // If the array only contains one element, no need to do the comparison.
- while (size > 1) {
- // Pick an element to check, half way through the array, and read the value.
- int pos = (size - 1) >> 1;
- KeyType val = valueAtPosition(&array[pos]);
-
- // If the key matches, success!
- if (val == key)
- return &array[pos];
- // The item we are looking for is smaller than the item being check; reduce the value of 'size',
- // chopping off the right hand half of the array.
- else if (key < val)
- size = pos;
- // Discard all values in the left hand half of the array, up to and including the item at pos.
- else {
- size -= (pos + 1);
- array += (pos + 1);
- }
-
- // 'size' should never reach zero.
- ASSERT(size);
- }
-
- // If we reach this point we've chopped down to one element, no need to check it matches
- ASSERT(size == 1);
- ASSERT(key == valueAtPosition(&array[0]));
- return &array[0];
- }
-#endif
-
- class CodeBlock : public WTF::FastAllocBase {
- friend class JIT;
- public:
- CodeBlock(ScopeNode* ownerNode);
- CodeBlock(ScopeNode* ownerNode, CodeType, PassRefPtr<SourceProvider>, unsigned sourceOffset);
- ~CodeBlock();
-
- void mark();
- void refStructures(Instruction* vPC) const;
- void derefStructures(Instruction* vPC) const;
-#if ENABLE(JIT_OPTIMIZE_CALL)
- void unlinkCallers();
#endif
+
+ void visitAggregate(SlotVisitor&);
static void dumpStatistics();
-#if !defined(NDEBUG) || ENABLE_OPCODE_SAMPLING
void dump(ExecState*) const;
void printStructures(const Instruction*) const;
void printStructure(const char* name, const Instruction*, int operand) const;
-#endif
+
+ bool isStrictMode() const { return m_isStrictMode; }
inline bool isKnownNotImmediate(int index)
{
- if (index == m_thisRegister)
+ if (index == m_thisRegister && !m_isStrictMode)
return true;
if (isConstantRegisterIndex(index))
}
HandlerInfo* handlerForBytecodeOffset(unsigned bytecodeOffset);
- int lineNumberForBytecodeOffset(CallFrame*, unsigned bytecodeOffset);
- int expressionRangeForBytecodeOffset(CallFrame*, unsigned bytecodeOffset, int& divot, int& startOffset, int& endOffset);
- bool getByIdExceptionInfoForBytecodeOffset(CallFrame*, unsigned bytecodeOffset, OpcodeID&);
+ int lineNumberForBytecodeOffset(unsigned bytecodeOffset);
+ void expressionRangeForBytecodeOffset(unsigned bytecodeOffset, int& divot, int& startOffset, int& endOffset);
#if ENABLE(JIT)
- void addCaller(CallLinkInfo* caller)
+
+ StructureStubInfo& getStubInfo(ReturnAddressPtr returnAddress)
{
- caller->callee = this;
- caller->position = m_linkedCallerList.size();
- m_linkedCallerList.append(caller);
+ return *(binarySearch<StructureStubInfo, void*, getStructureStubInfoReturnLocation>(m_structureStubInfos.begin(), m_structureStubInfos.size(), returnAddress.value()));
}
- void removeCaller(CallLinkInfo* caller)
+ StructureStubInfo& getStubInfo(unsigned bytecodeIndex)
{
- unsigned pos = caller->position;
- unsigned lastPos = m_linkedCallerList.size() - 1;
+ return *(binarySearch<StructureStubInfo, unsigned, getStructureStubInfoBytecodeIndex>(m_structureStubInfos.begin(), m_structureStubInfos.size(), bytecodeIndex));
+ }
- if (pos != lastPos) {
- m_linkedCallerList[pos] = m_linkedCallerList[lastPos];
- m_linkedCallerList[pos]->position = pos;
- }
- m_linkedCallerList.shrink(lastPos);
+ CallLinkInfo& getCallLinkInfo(ReturnAddressPtr returnAddress)
+ {
+ return *(binarySearch<CallLinkInfo, void*, getCallLinkInfoReturnLocation>(m_callLinkInfos.begin(), m_callLinkInfos.size(), returnAddress.value()));
+ }
+
+ CallLinkInfo& getCallLinkInfo(unsigned bytecodeIndex)
+ {
+ return *(binarySearch<CallLinkInfo, unsigned, getCallLinkInfoBytecodeIndex>(m_callLinkInfos.begin(), m_callLinkInfos.size(), bytecodeIndex));
}
- StructureStubInfo& getStubInfo(ReturnAddressPtr returnAddress)
+ MethodCallLinkInfo& getMethodCallLinkInfo(ReturnAddressPtr returnAddress)
{
- return *(binaryChop<StructureStubInfo, void*, getStructureStubInfoReturnLocation>(m_structureStubInfos.begin(), m_structureStubInfos.size(), returnAddress.value()));
+ return *(binarySearch<MethodCallLinkInfo, void*, getMethodCallLinkInfoReturnLocation>(m_methodCallLinkInfos.begin(), m_methodCallLinkInfos.size(), returnAddress.value()));
}
- CallLinkInfo& getCallLinkInfo(ReturnAddressPtr returnAddress)
+ MethodCallLinkInfo& getMethodCallLinkInfo(unsigned bytecodeIndex)
{
- return *(binaryChop<CallLinkInfo, void*, getCallLinkInfoReturnLocation>(m_callLinkInfos.begin(), m_callLinkInfos.size(), returnAddress.value()));
+ return *(binarySearch<MethodCallLinkInfo, unsigned, getMethodCallLinkInfoBytecodeIndex>(m_methodCallLinkInfos.begin(), m_methodCallLinkInfos.size(), bytecodeIndex));
}
- MethodCallLinkInfo& getMethodCallLinkInfo(ReturnAddressPtr returnAddress)
+ unsigned bytecodeOffset(ExecState*, ReturnAddressPtr);
+
+ unsigned bytecodeOffsetForCallAtIndex(unsigned index)
+ {
+ if (!m_rareData)
+ return 1;
+ Vector<CallReturnOffsetToBytecodeOffset>& callIndices = m_rareData->m_callReturnIndexVector;
+ if (!callIndices.size())
+ return 1;
+ ASSERT(index < m_rareData->m_callReturnIndexVector.size());
+ return m_rareData->m_callReturnIndexVector[index].bytecodeOffset;
+ }
+
+ void unlinkCalls();
+
+ bool hasIncomingCalls() { return m_incomingCalls.begin() != m_incomingCalls.end(); }
+
+ void linkIncomingCall(CallLinkInfo* incoming)
{
- return *(binaryChop<MethodCallLinkInfo, void*, getMethodCallLinkInfoReturnLocation>(m_methodCallLinkInfos.begin(), m_methodCallLinkInfos.size(), returnAddress.value()));
+ m_incomingCalls.push(incoming);
}
+#if ENABLE(LLINT)
+ void linkIncomingCall(LLIntCallLinkInfo* incoming)
+ {
+ m_incomingLLIntCalls.push(incoming);
+ }
+#endif // ENABLE(LLINT)
+
+ void unlinkIncomingCalls();
+#endif // ENABLE(JIT)
- unsigned getBytecodeIndex(CallFrame* callFrame, ReturnAddressPtr returnAddress)
+#if ENABLE(DFG_JIT) || ENABLE(LLINT)
+ void setJITCodeMap(PassOwnPtr<CompactJITCodeMap> jitCodeMap)
+ {
+ m_jitCodeMap = jitCodeMap;
+ }
+ CompactJITCodeMap* jitCodeMap()
+ {
+ return m_jitCodeMap.get();
+ }
+#endif
+
+#if ENABLE(DFG_JIT)
+ void createDFGDataIfNecessary()
+ {
+ if (!!m_dfgData)
+ return;
+
+ m_dfgData = adoptPtr(new DFGData);
+ }
+
+ DFG::OSREntryData* appendDFGOSREntryData(unsigned bytecodeIndex, unsigned machineCodeOffset)
+ {
+ createDFGDataIfNecessary();
+ DFG::OSREntryData entry;
+ entry.m_bytecodeIndex = bytecodeIndex;
+ entry.m_machineCodeOffset = machineCodeOffset;
+ m_dfgData->osrEntry.append(entry);
+ return &m_dfgData->osrEntry.last();
+ }
+ unsigned numberOfDFGOSREntries() const
+ {
+ if (!m_dfgData)
+ return 0;
+ return m_dfgData->osrEntry.size();
+ }
+ DFG::OSREntryData* dfgOSREntryData(unsigned i) { return &m_dfgData->osrEntry[i]; }
+ DFG::OSREntryData* dfgOSREntryDataForBytecodeIndex(unsigned bytecodeIndex)
+ {
+ return binarySearch<DFG::OSREntryData, unsigned, DFG::getOSREntryDataBytecodeIndex>(m_dfgData->osrEntry.begin(), m_dfgData->osrEntry.size(), bytecodeIndex);
+ }
+
+ void appendOSRExit(const DFG::OSRExit& osrExit)
+ {
+ createDFGDataIfNecessary();
+ m_dfgData->osrExit.append(osrExit);
+ }
+
+ DFG::OSRExit& lastOSRExit()
+ {
+ return m_dfgData->osrExit.last();
+ }
+
+ void appendSpeculationRecovery(const DFG::SpeculationRecovery& recovery)
+ {
+ createDFGDataIfNecessary();
+ m_dfgData->speculationRecovery.append(recovery);
+ }
+
+ unsigned numberOfOSRExits()
+ {
+ if (!m_dfgData)
+ return 0;
+ return m_dfgData->osrExit.size();
+ }
+
+ unsigned numberOfSpeculationRecoveries()
+ {
+ if (!m_dfgData)
+ return 0;
+ return m_dfgData->speculationRecovery.size();
+ }
+
+ DFG::OSRExit& osrExit(unsigned index)
+ {
+ return m_dfgData->osrExit[index];
+ }
+
+ DFG::SpeculationRecovery& speculationRecovery(unsigned index)
+ {
+ return m_dfgData->speculationRecovery[index];
+ }
+
+ void appendWeakReference(JSCell* target)
+ {
+ createDFGDataIfNecessary();
+ m_dfgData->weakReferences.append(WriteBarrier<JSCell>(*globalData(), ownerExecutable(), target));
+ }
+
+ void shrinkWeakReferencesToFit()
+ {
+ if (!m_dfgData)
+ return;
+ m_dfgData->weakReferences.shrinkToFit();
+ }
+
+ void appendWeakReferenceTransition(JSCell* codeOrigin, JSCell* from, JSCell* to)
{
- reparseForExceptionInfoIfNecessary(callFrame);
- return binaryChop<CallReturnOffsetToBytecodeIndex, unsigned, getCallReturnOffset>(m_exceptionInfo->m_callReturnIndexVector.begin(), m_exceptionInfo->m_callReturnIndexVector.size(), ownerNode()->generatedJITCode().offsetOf(returnAddress.value()))->bytecodeIndex;
+ createDFGDataIfNecessary();
+ m_dfgData->transitions.append(
+ WeakReferenceTransition(*globalData(), ownerExecutable(), codeOrigin, from, to));
}
- bool functionRegisterForBytecodeOffset(unsigned bytecodeOffset, int& functionRegisterIndex);
+ void shrinkWeakReferenceTransitionsToFit()
+ {
+ if (!m_dfgData)
+ return;
+ m_dfgData->transitions.shrinkToFit();
+ }
#endif
+ unsigned bytecodeOffset(Instruction* returnAddress)
+ {
+ ASSERT(returnAddress >= instructions().begin() && returnAddress < instructions().end());
+ return static_cast<Instruction*>(returnAddress) - instructions().begin();
+ }
+
void setIsNumericCompareFunction(bool isNumericCompareFunction) { m_isNumericCompareFunction = isNumericCompareFunction; }
bool isNumericCompareFunction() { return m_isNumericCompareFunction; }
- Vector<Instruction>& instructions() { return m_instructions; }
-#ifndef NDEBUG
- void setInstructionCount(unsigned instructionCount) { m_instructionCount = instructionCount; }
-#endif
+ unsigned numberOfInstructions() const { return m_instructions.size(); }
+ RefCountedArray<Instruction>& instructions() { return m_instructions; }
+ const RefCountedArray<Instruction>& instructions() const { return m_instructions; }
+
+ size_t predictedMachineCodeSize();
+
+ bool usesOpcode(OpcodeID);
+ unsigned instructionCount() { return m_instructions.size(); }
+
+#if ENABLE(JIT)
+ void setJITCode(const JITCode& code, MacroAssemblerCodePtr codeWithArityCheck)
+ {
+ m_jitCode = code;
+ m_jitCodeWithArityCheck = codeWithArityCheck;
+#if ENABLE(DFG_JIT)
+ if (m_jitCode.jitType() == JITCode::DFGJIT) {
+ createDFGDataIfNecessary();
+ m_globalData->heap.m_dfgCodeBlocks.m_set.add(this);
+ }
+#endif
+ }
+ JITCode& getJITCode() { return m_jitCode; }
+ MacroAssemblerCodePtr getJITCodeWithArityCheck() { return m_jitCodeWithArityCheck; }
+ JITCode::JITType getJITType() { return m_jitCode.jitType(); }
+ ExecutableMemoryHandle* executableMemory() { return getJITCode().getExecutableMemory(); }
+ virtual JSObject* compileOptimized(ExecState*, ScopeChainNode*) = 0;
+ virtual void jettison() = 0;
+ enum JITCompilationResult { AlreadyCompiled, CouldNotCompile, CompiledSuccessfully };
+ JITCompilationResult jitCompile(JSGlobalData& globalData)
+ {
+ if (getJITType() != JITCode::InterpreterThunk) {
+ ASSERT(getJITType() == JITCode::BaselineJIT);
+ return AlreadyCompiled;
+ }
#if ENABLE(JIT)
- JITCode& getJITCode() { return ownerNode()->generatedJITCode(); }
- void setJITCode(JITCode);
- ExecutablePool* executablePool() { return ownerNode()->getExecutablePool(); }
+ if (jitCompileImpl(globalData))
+ return CompiledSuccessfully;
+ return CouldNotCompile;
+#else
+ UNUSED_PARAM(globalData);
+ return CouldNotCompile;
+#endif
+ }
+ virtual CodeBlock* replacement() = 0;
+
+ enum CompileWithDFGState {
+ CompileWithDFGFalse,
+ CompileWithDFGTrue,
+ CompileWithDFGUnset
+ };
+
+ virtual bool canCompileWithDFGInternal() = 0;
+ bool canCompileWithDFG()
+ {
+ bool result = canCompileWithDFGInternal();
+ m_canCompileWithDFGState = result ? CompileWithDFGTrue : CompileWithDFGFalse;
+ return result;
+ }
+ CompileWithDFGState canCompileWithDFGState() { return m_canCompileWithDFGState; }
+
+ bool hasOptimizedReplacement()
+ {
+ ASSERT(JITCode::isBaselineCode(getJITType()));
+ bool result = replacement()->getJITType() > getJITType();
+#if !ASSERT_DISABLED
+ if (result)
+ ASSERT(replacement()->getJITType() == JITCode::DFGJIT);
+ else {
+ ASSERT(JITCode::isBaselineCode(replacement()->getJITType()));
+ ASSERT(replacement() == this);
+ }
+#endif
+ return result;
+ }
+#else
+ JITCode::JITType getJITType() { return JITCode::BaselineJIT; }
#endif
- ScopeNode* ownerNode() const { return m_ownerNode; }
+ ScriptExecutable* ownerExecutable() const { return m_ownerExecutable.get(); }
void setGlobalData(JSGlobalData* globalData) { m_globalData = globalData; }
+ JSGlobalData* globalData() { return m_globalData; }
void setThisRegister(int thisRegister) { m_thisRegister = thisRegister; }
int thisRegister() const { return m_thisRegister; }
bool needsFullScopeChain() const { return m_needsFullScopeChain; }
void setUsesEval(bool usesEval) { m_usesEval = usesEval; }
bool usesEval() const { return m_usesEval; }
- void setUsesArguments(bool usesArguments) { m_usesArguments = usesArguments; }
- bool usesArguments() const { return m_usesArguments; }
+
+ void setArgumentsRegister(int argumentsRegister)
+ {
+ ASSERT(argumentsRegister != -1);
+ m_argumentsRegister = argumentsRegister;
+ ASSERT(usesArguments());
+ }
+ int argumentsRegister()
+ {
+ ASSERT(usesArguments());
+ return m_argumentsRegister;
+ }
+ void setActivationRegister(int activationRegister)
+ {
+ m_activationRegister = activationRegister;
+ }
+ int activationRegister()
+ {
+ ASSERT(needsFullScopeChain());
+ return m_activationRegister;
+ }
+ bool usesArguments() const { return m_argumentsRegister != -1; }
CodeType codeType() const { return m_codeType; }
- SourceProvider* source() const { ASSERT(m_codeType != NativeCode); return m_source.get(); }
- unsigned sourceOffset() const { ASSERT(m_codeType != NativeCode); return m_sourceOffset; }
+ SourceProvider* source() const { return m_source.get(); }
+ unsigned sourceOffset() const { return m_sourceOffset; }
size_t numberOfJumpTargets() const { return m_jumpTargets.size(); }
void addJumpTarget(unsigned jumpTarget) { m_jumpTargets.append(jumpTarget); }
unsigned jumpTarget(int index) const { return m_jumpTargets[index]; }
unsigned lastJumpTarget() const { return m_jumpTargets.last(); }
-#if !ENABLE(JIT)
- void addPropertyAccessInstruction(unsigned propertyAccessInstruction) { m_propertyAccessInstructions.append(propertyAccessInstruction); }
- void addGlobalResolveInstruction(unsigned globalResolveInstruction) { m_globalResolveInstructions.append(globalResolveInstruction); }
+ void createActivation(CallFrame*);
+
+ void clearEvalCache();
+
+ void addPropertyAccessInstruction(unsigned propertyAccessInstruction)
+ {
+ m_propertyAccessInstructions.append(propertyAccessInstruction);
+ }
+ void addGlobalResolveInstruction(unsigned globalResolveInstruction)
+ {
+ m_globalResolveInstructions.append(globalResolveInstruction);
+ }
bool hasGlobalResolveInstructionAtBytecodeOffset(unsigned bytecodeOffset);
-#else
+#if ENABLE(LLINT)
+ LLIntCallLinkInfo* addLLIntCallLinkInfo()
+ {
+ m_llintCallLinkInfos.append(LLIntCallLinkInfo());
+ return &m_llintCallLinkInfos.last();
+ }
+#endif
+#if ENABLE(JIT)
+ void setNumberOfStructureStubInfos(size_t size) { m_structureStubInfos.grow(size); }
size_t numberOfStructureStubInfos() const { return m_structureStubInfos.size(); }
- void addStructureStubInfo(const StructureStubInfo& stubInfo) { m_structureStubInfos.append(stubInfo); }
StructureStubInfo& structureStubInfo(int index) { return m_structureStubInfos[index]; }
- void addGlobalResolveInfo(unsigned globalResolveInstruction) { m_globalResolveInfos.append(GlobalResolveInfo(globalResolveInstruction)); }
+ void addGlobalResolveInfo(unsigned globalResolveInstruction)
+ {
+ m_globalResolveInfos.append(GlobalResolveInfo(globalResolveInstruction));
+ }
GlobalResolveInfo& globalResolveInfo(int index) { return m_globalResolveInfos[index]; }
bool hasGlobalResolveInfoAtBytecodeOffset(unsigned bytecodeOffset);
+ void setNumberOfCallLinkInfos(size_t size) { m_callLinkInfos.grow(size); }
size_t numberOfCallLinkInfos() const { return m_callLinkInfos.size(); }
- void addCallLinkInfo() { m_callLinkInfos.append(CallLinkInfo()); }
CallLinkInfo& callLinkInfo(int index) { return m_callLinkInfos[index]; }
- void addMethodCallLinkInfos(unsigned n) { m_methodCallLinkInfos.grow(n); }
+ void addMethodCallLinkInfos(unsigned n) { ASSERT(m_globalData->canUseJIT()); m_methodCallLinkInfos.grow(n); }
MethodCallLinkInfo& methodCallLinkInfo(int index) { return m_methodCallLinkInfos[index]; }
+ size_t numberOfMethodCallLinkInfos() { return m_methodCallLinkInfos.size(); }
+#endif
+
+#if ENABLE(VALUE_PROFILER)
+ unsigned numberOfArgumentValueProfiles()
+ {
+ ASSERT(m_numParameters >= 0);
+ ASSERT(m_argumentValueProfiles.size() == static_cast<unsigned>(m_numParameters));
+ return m_argumentValueProfiles.size();
+ }
+ ValueProfile* valueProfileForArgument(unsigned argumentIndex)
+ {
+ ValueProfile* result = &m_argumentValueProfiles[argumentIndex];
+ ASSERT(result->m_bytecodeOffset == -1);
+ return result;
+ }
+
+ ValueProfile* addValueProfile(int bytecodeOffset)
+ {
+ ASSERT(bytecodeOffset != -1);
+ ASSERT(m_valueProfiles.isEmpty() || m_valueProfiles.last().m_bytecodeOffset < bytecodeOffset);
+ m_valueProfiles.append(ValueProfile(bytecodeOffset));
+ return &m_valueProfiles.last();
+ }
+ unsigned numberOfValueProfiles() { return m_valueProfiles.size(); }
+ ValueProfile* valueProfile(int index)
+ {
+ ValueProfile* result = &m_valueProfiles[index];
+ ASSERT(result->m_bytecodeOffset != -1);
+ return result;
+ }
+ ValueProfile* valueProfileForBytecodeOffset(int bytecodeOffset)
+ {
+ ValueProfile* result = WTF::genericBinarySearch<ValueProfile, int, getValueProfileBytecodeOffset>(m_valueProfiles, m_valueProfiles.size(), bytecodeOffset);
+ ASSERT(result->m_bytecodeOffset != -1);
+ ASSERT(instructions()[bytecodeOffset + opcodeLength(
+ m_globalData->interpreter->getOpcodeID(
+ instructions()[
+ bytecodeOffset].u.opcode)) - 1].u.profile == result);
+ return result;
+ }
+ PredictedType valueProfilePredictionForBytecodeOffset(int bytecodeOffset)
+ {
+ return valueProfileForBytecodeOffset(bytecodeOffset)->computeUpdatedPrediction();
+ }
+
+ unsigned totalNumberOfValueProfiles()
+ {
+ return numberOfArgumentValueProfiles() + numberOfValueProfiles();
+ }
+ ValueProfile* getFromAllValueProfiles(unsigned index)
+ {
+ if (index < numberOfArgumentValueProfiles())
+ return valueProfileForArgument(index);
+ return valueProfile(index - numberOfArgumentValueProfiles());
+ }
+
+ RareCaseProfile* addRareCaseProfile(int bytecodeOffset)
+ {
+ m_rareCaseProfiles.append(RareCaseProfile(bytecodeOffset));
+ return &m_rareCaseProfiles.last();
+ }
+ unsigned numberOfRareCaseProfiles() { return m_rareCaseProfiles.size(); }
+ RareCaseProfile* rareCaseProfile(int index) { return &m_rareCaseProfiles[index]; }
+ RareCaseProfile* rareCaseProfileForBytecodeOffset(int bytecodeOffset)
+ {
+ return WTF::genericBinarySearch<RareCaseProfile, int, getRareCaseProfileBytecodeOffset>(m_rareCaseProfiles, m_rareCaseProfiles.size(), bytecodeOffset);
+ }
+
+ bool likelyToTakeSlowCase(int bytecodeOffset)
+ {
+ if (!numberOfRareCaseProfiles())
+ return false;
+ unsigned value = rareCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
+ return value >= Options::likelyToTakeSlowCaseMinimumCount && static_cast<double>(value) / m_executionEntryCount >= Options::likelyToTakeSlowCaseThreshold;
+ }
+
+ bool couldTakeSlowCase(int bytecodeOffset)
+ {
+ if (!numberOfRareCaseProfiles())
+ return false;
+ unsigned value = rareCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
+ return value >= Options::couldTakeSlowCaseMinimumCount && static_cast<double>(value) / m_executionEntryCount >= Options::couldTakeSlowCaseThreshold;
+ }
+
+ RareCaseProfile* addSpecialFastCaseProfile(int bytecodeOffset)
+ {
+ m_specialFastCaseProfiles.append(RareCaseProfile(bytecodeOffset));
+ return &m_specialFastCaseProfiles.last();
+ }
+ unsigned numberOfSpecialFastCaseProfiles() { return m_specialFastCaseProfiles.size(); }
+ RareCaseProfile* specialFastCaseProfile(int index) { return &m_specialFastCaseProfiles[index]; }
+ RareCaseProfile* specialFastCaseProfileForBytecodeOffset(int bytecodeOffset)
+ {
+ return WTF::genericBinarySearch<RareCaseProfile, int, getRareCaseProfileBytecodeOffset>(m_specialFastCaseProfiles, m_specialFastCaseProfiles.size(), bytecodeOffset);
+ }
+
+ bool likelyToTakeSpecialFastCase(int bytecodeOffset)
+ {
+ if (!numberOfRareCaseProfiles())
+ return false;
+ unsigned specialFastCaseCount = specialFastCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
+ return specialFastCaseCount >= Options::likelyToTakeSlowCaseMinimumCount && static_cast<double>(specialFastCaseCount) / m_executionEntryCount >= Options::likelyToTakeSlowCaseThreshold;
+ }
+
+ bool likelyToTakeDeepestSlowCase(int bytecodeOffset)
+ {
+ if (!numberOfRareCaseProfiles())
+ return false;
+ unsigned slowCaseCount = rareCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
+ unsigned specialFastCaseCount = specialFastCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
+ unsigned value = slowCaseCount - specialFastCaseCount;
+ return value >= Options::likelyToTakeSlowCaseMinimumCount && static_cast<double>(value) / m_executionEntryCount >= Options::likelyToTakeSlowCaseThreshold;
+ }
+
+ bool likelyToTakeAnySlowCase(int bytecodeOffset)
+ {
+ if (!numberOfRareCaseProfiles())
+ return false;
+ unsigned slowCaseCount = rareCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
+ unsigned specialFastCaseCount = specialFastCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
+ unsigned value = slowCaseCount + specialFastCaseCount;
+ return value >= Options::likelyToTakeSlowCaseMinimumCount && static_cast<double>(value) / m_executionEntryCount >= Options::likelyToTakeSlowCaseThreshold;
+ }
+
+ unsigned executionEntryCount() const { return m_executionEntryCount; }
+#endif
- void addFunctionRegisterInfo(unsigned bytecodeOffset, int functionIndex) { createRareDataIfNecessary(); m_rareData->m_functionRegisterInfos.append(FunctionRegisterInfo(bytecodeOffset, functionIndex)); }
+ unsigned globalResolveInfoCount() const
+ {
+#if ENABLE(JIT)
+ if (m_globalData->canUseJIT())
+ return m_globalResolveInfos.size();
#endif
+ return 0;
+ }
// Exception handling support
void addExceptionHandler(const HandlerInfo& hanler) { createRareDataIfNecessary(); return m_rareData->m_exceptionHandlers.append(hanler); }
HandlerInfo& exceptionHandler(int index) { ASSERT(m_rareData); return m_rareData->m_exceptionHandlers[index]; }
- bool hasExceptionInfo() const { return m_exceptionInfo; }
- void clearExceptionInfo() { m_exceptionInfo.clear(); }
+ void addExpressionInfo(const ExpressionRangeInfo& expressionInfo)
+ {
+ createRareDataIfNecessary();
+ m_rareData->m_expressionInfo.append(expressionInfo);
+ }
- void addExpressionInfo(const ExpressionRangeInfo& expressionInfo) { ASSERT(m_exceptionInfo); m_exceptionInfo->m_expressionInfo.append(expressionInfo); }
- void addGetByIdExceptionInfo(const GetByIdExceptionInfo& info) { ASSERT(m_exceptionInfo); m_exceptionInfo->m_getByIdExceptionInfo.append(info); }
+ void addLineInfo(unsigned bytecodeOffset, int lineNo)
+ {
+ createRareDataIfNecessary();
+ Vector<LineInfo>& lineInfo = m_rareData->m_lineInfo;
+ if (!lineInfo.size() || lineInfo.last().lineNumber != lineNo) {
+ LineInfo info = { bytecodeOffset, lineNo };
+ lineInfo.append(info);
+ }
+ }
- size_t numberOfLineInfos() const { ASSERT(m_exceptionInfo); return m_exceptionInfo->m_lineInfo.size(); }
- void addLineInfo(const LineInfo& lineInfo) { ASSERT(m_exceptionInfo); m_exceptionInfo->m_lineInfo.append(lineInfo); }
- LineInfo& lastLineInfo() { ASSERT(m_exceptionInfo); return m_exceptionInfo->m_lineInfo.last(); }
+ bool hasExpressionInfo() { return m_rareData && m_rareData->m_expressionInfo.size(); }
+ bool hasLineInfo() { return m_rareData && m_rareData->m_lineInfo.size(); }
+ // We only generate exception handling info if the user is debugging
+ // (and may want line number info), or if the function contains exception handler.
+ bool needsCallReturnIndices()
+ {
+ return m_rareData &&
+ (m_rareData->m_expressionInfo.size() || m_rareData->m_lineInfo.size() || m_rareData->m_exceptionHandlers.size());
+ }
#if ENABLE(JIT)
- Vector<CallReturnOffsetToBytecodeIndex>& callReturnIndexVector() { ASSERT(m_exceptionInfo); return m_exceptionInfo->m_callReturnIndexVector; }
+ Vector<CallReturnOffsetToBytecodeOffset>& callReturnIndexVector()
+ {
+ createRareDataIfNecessary();
+ return m_rareData->m_callReturnIndexVector;
+ }
+#endif
+
+#if ENABLE(DFG_JIT)
+ SegmentedVector<InlineCallFrame, 4>& inlineCallFrames()
+ {
+ createRareDataIfNecessary();
+ return m_rareData->m_inlineCallFrames;
+ }
+
+ Vector<CodeOriginAtCallReturnOffset>& codeOrigins()
+ {
+ createRareDataIfNecessary();
+ return m_rareData->m_codeOrigins;
+ }
+
+ // Having code origins implies that there has been some inlining.
+ bool hasCodeOrigins()
+ {
+ return m_rareData && !!m_rareData->m_codeOrigins.size();
+ }
+
+ bool codeOriginForReturn(ReturnAddressPtr returnAddress, CodeOrigin& codeOrigin)
+ {
+ if (!hasCodeOrigins())
+ return false;
+ unsigned offset = getJITCode().offsetOf(returnAddress.value());
+ CodeOriginAtCallReturnOffset* entry = binarySearch<CodeOriginAtCallReturnOffset, unsigned, getCallReturnOffsetForCodeOrigin>(codeOrigins().begin(), codeOrigins().size(), offset, WTF::KeyMustNotBePresentInArray);
+ if (entry->callReturnOffset != offset)
+ return false;
+ codeOrigin = entry->codeOrigin;
+ return true;
+ }
+
+ CodeOrigin codeOrigin(unsigned index)
+ {
+ ASSERT(m_rareData);
+ return m_rareData->m_codeOrigins[index].codeOrigin;
+ }
+
+ bool addFrequentExitSite(const DFG::FrequentExitSite& site)
+ {
+ ASSERT(JITCode::isBaselineCode(getJITType()));
+ return m_exitProfile.add(site);
+ }
+
+ DFG::ExitProfile& exitProfile() { return m_exitProfile; }
+
+ CompressedLazyOperandValueProfileHolder& lazyOperandValueProfiles()
+ {
+ return m_lazyOperandValueProfiles;
+ }
#endif
// Constant Pool
Identifier& identifier(int index) { return m_identifiers[index]; }
size_t numberOfConstantRegisters() const { return m_constantRegisters.size(); }
- void addConstantRegister(const Register& r) { return m_constantRegisters.append(r); }
- Register& constantRegister(int index) { return m_constantRegisters[index - FirstConstantRegisterIndex]; }
- ALWAYS_INLINE bool isConstantRegisterIndex(int index) { return index >= FirstConstantRegisterIndex; }
- ALWAYS_INLINE JSValue getConstant(int index) const { return m_constantRegisters[index - FirstConstantRegisterIndex].jsValue(); }
+ unsigned addConstant(JSValue v)
+ {
+ unsigned result = m_constantRegisters.size();
+ m_constantRegisters.append(WriteBarrier<Unknown>());
+ m_constantRegisters.last().set(m_globalObject->globalData(), m_ownerExecutable.get(), v);
+ return result;
+ }
+ unsigned addOrFindConstant(JSValue);
+ WriteBarrier<Unknown>& constantRegister(int index) { return m_constantRegisters[index - FirstConstantRegisterIndex]; }
+ ALWAYS_INLINE bool isConstantRegisterIndex(int index) const { return index >= FirstConstantRegisterIndex; }
+ ALWAYS_INLINE JSValue getConstant(int index) const { return m_constantRegisters[index - FirstConstantRegisterIndex].get(); }
- unsigned addFunctionExpression(FuncExprNode* n) { unsigned size = m_functionExpressions.size(); m_functionExpressions.append(n); return size; }
- FuncExprNode* functionExpression(int index) const { return m_functionExpressions[index].get(); }
+ unsigned addFunctionDecl(FunctionExecutable* n)
+ {
+ unsigned size = m_functionDecls.size();
+ m_functionDecls.append(WriteBarrier<FunctionExecutable>());
+ m_functionDecls.last().set(m_globalObject->globalData(), m_ownerExecutable.get(), n);
+ return size;
+ }
+ FunctionExecutable* functionDecl(int index) { return m_functionDecls[index].get(); }
+ int numberOfFunctionDecls() { return m_functionDecls.size(); }
+ unsigned addFunctionExpr(FunctionExecutable* n)
+ {
+ unsigned size = m_functionExprs.size();
+ m_functionExprs.append(WriteBarrier<FunctionExecutable>());
+ m_functionExprs.last().set(m_globalObject->globalData(), m_ownerExecutable.get(), n);
+ return size;
+ }
+ FunctionExecutable* functionExpr(int index) { return m_functionExprs[index].get(); }
- unsigned addFunction(FuncDeclNode* n) { createRareDataIfNecessary(); unsigned size = m_rareData->m_functions.size(); m_rareData->m_functions.append(n); return size; }
- FuncDeclNode* function(int index) const { ASSERT(m_rareData); return m_rareData->m_functions[index].get(); }
+ unsigned addRegExp(RegExp* r)
+ {
+ createRareDataIfNecessary();
+ unsigned size = m_rareData->m_regexps.size();
+ m_rareData->m_regexps.append(WriteBarrier<RegExp>(*m_globalData, ownerExecutable(), r));
+ return size;
+ }
+ unsigned numberOfRegExps() const
+ {
+ if (!m_rareData)
+ return 0;
+ return m_rareData->m_regexps.size();
+ }
+ RegExp* regexp(int index) const { ASSERT(m_rareData); return m_rareData->m_regexps[index].get(); }
- bool hasFunctions() const { return m_functionExpressions.size() || (m_rareData && m_rareData->m_functions.size()); }
+ unsigned addConstantBuffer(unsigned length)
+ {
+ createRareDataIfNecessary();
+ unsigned size = m_rareData->m_constantBuffers.size();
+ m_rareData->m_constantBuffers.append(Vector<JSValue>(length));
+ return size;
+ }
- unsigned addRegExp(RegExp* r) { createRareDataIfNecessary(); unsigned size = m_rareData->m_regexps.size(); m_rareData->m_regexps.append(r); return size; }
- RegExp* regexp(int index) const { ASSERT(m_rareData); return m_rareData->m_regexps[index].get(); }
+ JSValue* constantBuffer(unsigned index)
+ {
+ ASSERT(m_rareData);
+ return m_rareData->m_constantBuffers[index].data();
+ }
+ JSGlobalObject* globalObject() { return m_globalObject.get(); }
+
+ JSGlobalObject* globalObjectFor(CodeOrigin codeOrigin)
+ {
+ if (!codeOrigin.inlineCallFrame)
+ return globalObject();
+ // FIXME: if we ever inline based on executable not function, this code will need to change.
+ return codeOrigin.inlineCallFrame->callee->scope()->globalObject.get();
+ }
// Jump Tables
StringJumpTable& stringSwitchJumpTable(int tableIndex) { ASSERT(m_rareData); return m_rareData->m_stringSwitchJumpTables[tableIndex]; }
- SymbolTable& symbolTable() { return m_symbolTable; }
+ SymbolTable* symbolTable() { return m_symbolTable; }
+ SharedSymbolTable* sharedSymbolTable() { ASSERT(m_codeType == FunctionCode); return static_cast<SharedSymbolTable*>(m_symbolTable); }
- EvalCodeCache& evalCodeCache() { ASSERT(m_codeType != NativeCode); createRareDataIfNecessary(); return m_rareData->m_evalCodeCache; }
+ EvalCodeCache& evalCodeCache() { createRareDataIfNecessary(); return m_rareData->m_evalCodeCache; }
void shrinkToFit();
+
+ void copyPostParseDataFrom(CodeBlock* alternative);
+ void copyPostParseDataFromAlternative();
+
+ // Functions for controlling when JITting kicks in, in a mixed mode
+ // execution world.
+
+ bool checkIfJITThresholdReached()
+ {
+ return m_llintExecuteCounter.checkIfThresholdCrossedAndSet(this);
+ }
+
+ void dontJITAnytimeSoon()
+ {
+ m_llintExecuteCounter.deferIndefinitely();
+ }
+
+ void jitAfterWarmUp()
+ {
+ m_llintExecuteCounter.setNewThreshold(Options::thresholdForJITAfterWarmUp, this);
+ }
+
+ void jitSoon()
+ {
+ m_llintExecuteCounter.setNewThreshold(Options::thresholdForJITSoon, this);
+ }
+
+ int32_t llintExecuteCounter() const
+ {
+ return m_llintExecuteCounter.m_counter;
+ }
+
+ // Functions for controlling when tiered compilation kicks in. This
+ // controls both when the optimizing compiler is invoked and when OSR
+ // entry happens. Two triggers exist: the loop trigger and the return
+ // trigger. In either case, when an addition to m_jitExecuteCounter
+ // causes it to become non-negative, the optimizing compiler is
+ // invoked. This includes a fast check to see if this CodeBlock has
+ // already been optimized (i.e. replacement() returns a CodeBlock
+ // that was optimized with a higher tier JIT than this one). In the
+ // case of the loop trigger, if the optimized compilation succeeds
+ // (or has already succeeded in the past) then OSR is attempted to
+ // redirect program flow into the optimized code.
+
+ // These functions are called from within the optimization triggers,
+ // and are used as a single point at which we define the heuristics
+ // for how much warm-up is mandated before the next optimization
+ // trigger files. All CodeBlocks start out with optimizeAfterWarmUp(),
+ // as this is called from the CodeBlock constructor.
+
+ // When we observe a lot of speculation failures, we trigger a
+ // reoptimization. But each time, we increase the optimization trigger
+ // to avoid thrashing.
+ unsigned reoptimizationRetryCounter() const
+ {
+ ASSERT(m_reoptimizationRetryCounter <= Options::reoptimizationRetryCounterMax);
+ return m_reoptimizationRetryCounter;
+ }
+
+ void countReoptimization()
+ {
+ m_reoptimizationRetryCounter++;
+ if (m_reoptimizationRetryCounter > Options::reoptimizationRetryCounterMax)
+ m_reoptimizationRetryCounter = Options::reoptimizationRetryCounterMax;
+ }
+
+ int32_t counterValueForOptimizeAfterWarmUp()
+ {
+ return Options::thresholdForOptimizeAfterWarmUp << reoptimizationRetryCounter();
+ }
+
+ int32_t counterValueForOptimizeAfterLongWarmUp()
+ {
+ return Options::thresholdForOptimizeAfterLongWarmUp << reoptimizationRetryCounter();
+ }
+
+ int32_t* addressOfJITExecuteCounter()
+ {
+ return &m_jitExecuteCounter.m_counter;
+ }
+
+ static ptrdiff_t offsetOfJITExecuteCounter() { return OBJECT_OFFSETOF(CodeBlock, m_jitExecuteCounter) + OBJECT_OFFSETOF(ExecutionCounter, m_counter); }
+ static ptrdiff_t offsetOfJITExecutionActiveThreshold() { return OBJECT_OFFSETOF(CodeBlock, m_jitExecuteCounter) + OBJECT_OFFSETOF(ExecutionCounter, m_activeThreshold); }
+ static ptrdiff_t offsetOfJITExecutionTotalCount() { return OBJECT_OFFSETOF(CodeBlock, m_jitExecuteCounter) + OBJECT_OFFSETOF(ExecutionCounter, m_totalCount); }
+
+ int32_t jitExecuteCounter() const { return m_jitExecuteCounter.m_counter; }
+
+ unsigned optimizationDelayCounter() const { return m_optimizationDelayCounter; }
+
+ // Check if the optimization threshold has been reached, and if not,
+ // adjust the heuristics accordingly. Returns true if the threshold has
+ // been reached.
+ bool checkIfOptimizationThresholdReached()
+ {
+ return m_jitExecuteCounter.checkIfThresholdCrossedAndSet(this);
+ }
+
+ // Call this to force the next optimization trigger to fire. This is
+ // rarely wise, since optimization triggers are typically more
+ // expensive than executing baseline code.
+ void optimizeNextInvocation()
+ {
+ m_jitExecuteCounter.setNewThreshold(0, this);
+ }
+
+ // Call this to prevent optimization from happening again. Note that
+ // optimization will still happen after roughly 2^29 invocations,
+ // so this is really meant to delay that as much as possible. This
+ // is called if optimization failed, and we expect it to fail in
+ // the future as well.
+ void dontOptimizeAnytimeSoon()
+ {
+ m_jitExecuteCounter.deferIndefinitely();
+ }
+
+ // Call this to reinitialize the counter to its starting state,
+ // forcing a warm-up to happen before the next optimization trigger
+ // fires. This is called in the CodeBlock constructor. It also
+ // makes sense to call this if an OSR exit occurred. Note that
+ // OSR exit code is code generated, so the value of the execute
+ // counter that this corresponds to is also available directly.
+ void optimizeAfterWarmUp()
+ {
+ m_jitExecuteCounter.setNewThreshold(counterValueForOptimizeAfterWarmUp(), this);
+ }
+
+ // Call this to force an optimization trigger to fire only after
+ // a lot of warm-up.
+ void optimizeAfterLongWarmUp()
+ {
+ m_jitExecuteCounter.setNewThreshold(counterValueForOptimizeAfterLongWarmUp(), this);
+ }
+
+ // Call this to cause an optimization trigger to fire soon, but
+ // not necessarily the next one. This makes sense if optimization
+ // succeeds. Successfuly optimization means that all calls are
+ // relinked to the optimized code, so this only affects call
+ // frames that are still executing this CodeBlock. The value here
+ // is tuned to strike a balance between the cost of OSR entry
+ // (which is too high to warrant making every loop back edge to
+ // trigger OSR immediately) and the cost of executing baseline
+ // code (which is high enough that we don't necessarily want to
+ // have a full warm-up). The intuition for calling this instead of
+ // optimizeNextInvocation() is for the case of recursive functions
+ // with loops. Consider that there may be N call frames of some
+ // recursive function, for a reasonably large value of N. The top
+ // one triggers optimization, and then returns, and then all of
+ // the others return. We don't want optimization to be triggered on
+ // each return, as that would be superfluous. It only makes sense
+ // to trigger optimization if one of those functions becomes hot
+ // in the baseline code.
+ void optimizeSoon()
+ {
+ m_jitExecuteCounter.setNewThreshold(Options::thresholdForOptimizeSoon << reoptimizationRetryCounter(), this);
+ }
+
+ // The speculative JIT tracks its success rate, so that we can
+ // decide when to reoptimize. It's interesting to note that these
+ // counters may overflow without any protection. The success
+ // counter will overflow before the fail one does, becuase the
+ // fail one is used as a trigger to reoptimize. So the worst case
+ // is that the success counter overflows and we reoptimize without
+ // needing to. But this is harmless. If a method really did
+ // execute 2^32 times then compiling it again probably won't hurt
+ // anyone.
+
+ void countSpeculationSuccess()
+ {
+ m_speculativeSuccessCounter++;
+ }
+
+ void countSpeculationFailure()
+ {
+ m_speculativeFailCounter++;
+ }
+
+ uint32_t speculativeSuccessCounter() const { return m_speculativeSuccessCounter; }
+ uint32_t speculativeFailCounter() const { return m_speculativeFailCounter; }
+ uint32_t forcedOSRExitCounter() const { return m_forcedOSRExitCounter; }
+
+ uint32_t* addressOfSpeculativeSuccessCounter() { return &m_speculativeSuccessCounter; }
+ uint32_t* addressOfSpeculativeFailCounter() { return &m_speculativeFailCounter; }
+ uint32_t* addressOfForcedOSRExitCounter() { return &m_forcedOSRExitCounter; }
+
+ static ptrdiff_t offsetOfSpeculativeSuccessCounter() { return OBJECT_OFFSETOF(CodeBlock, m_speculativeSuccessCounter); }
+ static ptrdiff_t offsetOfSpeculativeFailCounter() { return OBJECT_OFFSETOF(CodeBlock, m_speculativeFailCounter); }
+ static ptrdiff_t offsetOfForcedOSRExitCounter() { return OBJECT_OFFSETOF(CodeBlock, m_forcedOSRExitCounter); }
+
+#if ENABLE(JIT)
+ // The number of failures that triggers the use of the ratio.
+ unsigned largeFailCountThreshold() { return Options::largeFailCountThresholdBase << baselineVersion()->reoptimizationRetryCounter(); }
+ unsigned largeFailCountThresholdForLoop() { return Options::largeFailCountThresholdBaseForLoop << baselineVersion()->reoptimizationRetryCounter(); }
+ bool shouldReoptimizeNow()
+ {
+ return (Options::desiredSpeculativeSuccessFailRatio *
+ speculativeFailCounter() >= speculativeSuccessCounter()
+ && speculativeFailCounter() >= largeFailCountThreshold())
+ || forcedOSRExitCounter() >=
+ Options::forcedOSRExitCountForReoptimization;
+ }
+
+ bool shouldReoptimizeFromLoopNow()
+ {
+ return (Options::desiredSpeculativeSuccessFailRatio *
+ speculativeFailCounter() >= speculativeSuccessCounter()
+ && speculativeFailCounter() >= largeFailCountThresholdForLoop())
+ || forcedOSRExitCounter() >=
+ Options::forcedOSRExitCountForReoptimization;
+ }
+#endif
+
+#if ENABLE(VALUE_PROFILER)
+ bool shouldOptimizeNow();
+#else
+ bool shouldOptimizeNow() { return false; }
+#endif
+
+#if ENABLE(JIT)
+ void reoptimize()
+ {
+ ASSERT(replacement() != this);
+ ASSERT(replacement()->alternative() == this);
+ replacement()->tallyFrequentExitSites();
+ replacement()->jettison();
+ countReoptimization();
+ optimizeAfterWarmUp();
+ }
+#endif
+
+#if ENABLE(VERBOSE_VALUE_PROFILE)
+ void dumpValueProfiles();
+#endif
+
// FIXME: Make these remaining members private.
int m_numCalleeRegisters;
int m_numVars;
- int m_numParameters;
+ int m_numCapturedVars;
+ bool m_isConstructor;
+ protected:
+#if ENABLE(JIT)
+ virtual bool jitCompileImpl(JSGlobalData&) = 0;
+#endif
+ virtual void visitWeakReferences(SlotVisitor&);
+ virtual void finalizeUnconditionally();
+
private:
-#if !defined(NDEBUG) || ENABLE(OPCODE_SAMPLING)
- void dump(ExecState*, const Vector<Instruction>::const_iterator& begin, Vector<Instruction>::const_iterator&) const;
+ friend class DFGCodeBlocks;
+
+#if ENABLE(DFG_JIT)
+ void tallyFrequentExitSites();
+#else
+ void tallyFrequentExitSites() { }
#endif
+
+ void dump(ExecState*, const Vector<Instruction>::const_iterator& begin, Vector<Instruction>::const_iterator&) const;
- void reparseForExceptionInfoIfNecessary(CallFrame*);
+ CString registerName(ExecState*, int r) const;
+ void printUnaryOp(ExecState*, int location, Vector<Instruction>::const_iterator&, const char* op) const;
+ void printBinaryOp(ExecState*, int location, Vector<Instruction>::const_iterator&, const char* op) const;
+ void printConditionalJump(ExecState*, const Vector<Instruction>::const_iterator&, Vector<Instruction>::const_iterator&, int location, const char* op) const;
+ void printGetByIdOp(ExecState*, int location, Vector<Instruction>::const_iterator&, const char* op) const;
+ void printCallOp(ExecState*, int location, Vector<Instruction>::const_iterator&, const char* op) const;
+ void printPutByIdOp(ExecState*, int location, Vector<Instruction>::const_iterator&, const char* op) const;
+ void visitStructures(SlotVisitor&, Instruction* vPC) const;
+
+#if ENABLE(DFG_JIT)
+ bool shouldImmediatelyAssumeLivenessDuringScan()
+ {
+ // Null m_dfgData means that this is a baseline JIT CodeBlock. Baseline JIT
+ // CodeBlocks don't need to be jettisoned when their weak references go
+ // stale. So if a basline JIT CodeBlock gets scanned, we can assume that
+ // this means that it's live.
+ if (!m_dfgData)
+ return true;
+
+ // For simplicity, we don't attempt to jettison code blocks during GC if
+ // they are executing. Instead we strongly mark their weak references to
+ // allow them to continue to execute soundly.
+ if (m_dfgData->mayBeExecuting)
+ return true;
+
+ return false;
+ }
+#else
+ bool shouldImmediatelyAssumeLivenessDuringScan() { return true; }
+#endif
+
+ void performTracingFixpointIteration(SlotVisitor&);
+
+ void stronglyVisitStrongReferences(SlotVisitor&);
+ void stronglyVisitWeakReferences(SlotVisitor&);
void createRareDataIfNecessary()
{
- ASSERT(m_codeType != NativeCode);
if (!m_rareData)
- m_rareData.set(new RareData);
+ m_rareData = adoptPtr(new RareData);
}
+
+ int m_numParameters;
- ScopeNode* m_ownerNode;
+ WriteBarrier<ScriptExecutable> m_ownerExecutable;
JSGlobalData* m_globalData;
- Vector<Instruction> m_instructions;
-#ifndef NDEBUG
- unsigned m_instructionCount;
-#endif
+ RefCountedArray<Instruction> m_instructions;
int m_thisRegister;
+ int m_argumentsRegister;
+ int m_activationRegister;
bool m_needsFullScopeChain;
bool m_usesEval;
- bool m_usesArguments;
bool m_isNumericCompareFunction;
+ bool m_isStrictMode;
CodeType m_codeType;
RefPtr<SourceProvider> m_source;
unsigned m_sourceOffset;
-#if !ENABLE(JIT)
Vector<unsigned> m_propertyAccessInstructions;
Vector<unsigned> m_globalResolveInstructions;
-#else
+#if ENABLE(LLINT)
+ SegmentedVector<LLIntCallLinkInfo, 8> m_llintCallLinkInfos;
+ SentinelLinkedList<LLIntCallLinkInfo, BasicRawSentinelNode<LLIntCallLinkInfo> > m_incomingLLIntCalls;
+#endif
+#if ENABLE(JIT)
Vector<StructureStubInfo> m_structureStubInfos;
Vector<GlobalResolveInfo> m_globalResolveInfos;
Vector<CallLinkInfo> m_callLinkInfos;
Vector<MethodCallLinkInfo> m_methodCallLinkInfos;
- Vector<CallLinkInfo*> m_linkedCallerList;
+ JITCode m_jitCode;
+ MacroAssemblerCodePtr m_jitCodeWithArityCheck;
+ SentinelLinkedList<CallLinkInfo, BasicRawSentinelNode<CallLinkInfo> > m_incomingCalls;
+#endif
+#if ENABLE(DFG_JIT) || ENABLE(LLINT)
+ OwnPtr<CompactJITCodeMap> m_jitCodeMap;
+#endif
+#if ENABLE(DFG_JIT)
+ struct WeakReferenceTransition {
+ WeakReferenceTransition() { }
+
+ WeakReferenceTransition(JSGlobalData& globalData, JSCell* owner, JSCell* codeOrigin, JSCell* from, JSCell* to)
+ : m_from(globalData, owner, from)
+ , m_to(globalData, owner, to)
+ {
+ if (!!codeOrigin)
+ m_codeOrigin.set(globalData, owner, codeOrigin);
+ }
+
+ WriteBarrier<JSCell> m_codeOrigin;
+ WriteBarrier<JSCell> m_from;
+ WriteBarrier<JSCell> m_to;
+ };
+
+ struct DFGData {
+ DFGData()
+ : mayBeExecuting(false)
+ , isJettisoned(false)
+ {
+ }
+
+ Vector<DFG::OSREntryData> osrEntry;
+ SegmentedVector<DFG::OSRExit, 8> osrExit;
+ Vector<DFG::SpeculationRecovery> speculationRecovery;
+ Vector<WeakReferenceTransition> transitions;
+ Vector<WriteBarrier<JSCell> > weakReferences;
+ bool mayBeExecuting;
+ bool isJettisoned;
+ bool livenessHasBeenProved; // Initialized and used on every GC.
+ bool allTransitionsHaveBeenMarked; // Initialized and used on every GC.
+ unsigned visitAggregateHasBeenCalled; // Unsigned to make it work seamlessly with the broadest set of CAS implementations.
+ };
+
+ OwnPtr<DFGData> m_dfgData;
+
+ // This is relevant to non-DFG code blocks that serve as the profiled code block
+ // for DFG code blocks.
+ DFG::ExitProfile m_exitProfile;
+ CompressedLazyOperandValueProfileHolder m_lazyOperandValueProfiles;
+#endif
+#if ENABLE(VALUE_PROFILER)
+ Vector<ValueProfile> m_argumentValueProfiles;
+ SegmentedVector<ValueProfile, 8> m_valueProfiles;
+ SegmentedVector<RareCaseProfile, 8> m_rareCaseProfiles;
+ SegmentedVector<RareCaseProfile, 8> m_specialFastCaseProfiles;
+ unsigned m_executionEntryCount;
#endif
Vector<unsigned> m_jumpTargets;
+ Vector<unsigned> m_loopTargets;
// Constant Pool
Vector<Identifier> m_identifiers;
- Vector<Register> m_constantRegisters;
- Vector<RefPtr<FuncExprNode> > m_functionExpressions;
+ COMPILE_ASSERT(sizeof(Register) == sizeof(WriteBarrier<Unknown>), Register_must_be_same_size_as_WriteBarrier_Unknown);
+ Vector<WriteBarrier<Unknown> > m_constantRegisters;
+ Vector<WriteBarrier<FunctionExecutable> > m_functionDecls;
+ Vector<WriteBarrier<FunctionExecutable> > m_functionExprs;
- SymbolTable m_symbolTable;
-
- struct ExceptionInfo {
- Vector<ExpressionRangeInfo> m_expressionInfo;
- Vector<LineInfo> m_lineInfo;
- Vector<GetByIdExceptionInfo> m_getByIdExceptionInfo;
-
-#if ENABLE(JIT)
- Vector<CallReturnOffsetToBytecodeIndex> m_callReturnIndexVector;
-#endif
- };
- OwnPtr<ExceptionInfo> m_exceptionInfo;
+ SymbolTable* m_symbolTable;
+ OwnPtr<CodeBlock> m_alternative;
+
+ ExecutionCounter m_llintExecuteCounter;
+
+ ExecutionCounter m_jitExecuteCounter;
+ int32_t m_totalJITExecutions;
+ uint32_t m_speculativeSuccessCounter;
+ uint32_t m_speculativeFailCounter;
+ uint32_t m_forcedOSRExitCounter;
+ uint16_t m_optimizationDelayCounter;
+ uint16_t m_reoptimizationRetryCounter;
+
struct RareData {
+ WTF_MAKE_FAST_ALLOCATED;
+ public:
Vector<HandlerInfo> m_exceptionHandlers;
// Rare Constants
- Vector<RefPtr<FuncDeclNode> > m_functions;
- Vector<RefPtr<RegExp> > m_regexps;
+ Vector<WriteBarrier<RegExp> > m_regexps;
+ // Buffers used for large array literals
+ Vector<Vector<JSValue> > m_constantBuffers;
+
// Jump Tables
Vector<SimpleJumpTable> m_immediateSwitchJumpTables;
Vector<SimpleJumpTable> m_characterSwitchJumpTables;
EvalCodeCache m_evalCodeCache;
+ // Expression info - present if debugging.
+ Vector<ExpressionRangeInfo> m_expressionInfo;
+ // Line info - present if profiling or debugging.
+ Vector<LineInfo> m_lineInfo;
#if ENABLE(JIT)
- Vector<FunctionRegisterInfo> m_functionRegisterInfos;
+ Vector<CallReturnOffsetToBytecodeOffset> m_callReturnIndexVector;
+#endif
+#if ENABLE(DFG_JIT)
+ SegmentedVector<InlineCallFrame, 4> m_inlineCallFrames;
+ Vector<CodeOriginAtCallReturnOffset> m_codeOrigins;
#endif
};
+#if COMPILER(MSVC)
+ friend void WTF::deleteOwnedPtr<RareData>(RareData*);
+#endif
OwnPtr<RareData> m_rareData;
+#if ENABLE(JIT)
+ CompileWithDFGState m_canCompileWithDFGState;
+#endif
};
// Program code is not marked by any function, so we make the global object
// responsible for marking it.
- class ProgramCodeBlock : public CodeBlock {
- public:
- ProgramCodeBlock(ScopeNode* ownerNode, CodeType codeType, JSGlobalObject* globalObject, PassRefPtr<SourceProvider> sourceProvider)
- : CodeBlock(ownerNode, codeType, sourceProvider, 0)
- , m_globalObject(globalObject)
+ class GlobalCodeBlock : public CodeBlock {
+ protected:
+ GlobalCodeBlock(CopyParsedBlockTag, GlobalCodeBlock& other)
+ : CodeBlock(CopyParsedBlock, other, &m_unsharedSymbolTable)
+ , m_unsharedSymbolTable(other.m_unsharedSymbolTable)
{
- m_globalObject->codeBlocks().add(this);
}
-
- ~ProgramCodeBlock()
+
+ GlobalCodeBlock(ScriptExecutable* ownerExecutable, CodeType codeType, JSGlobalObject* globalObject, PassRefPtr<SourceProvider> sourceProvider, unsigned sourceOffset, PassOwnPtr<CodeBlock> alternative)
+ : CodeBlock(ownerExecutable, codeType, globalObject, sourceProvider, sourceOffset, &m_unsharedSymbolTable, false, alternative)
{
- if (m_globalObject)
- m_globalObject->codeBlocks().remove(this);
}
- void clearGlobalObject() { m_globalObject = 0; }
-
private:
- JSGlobalObject* m_globalObject; // For program and eval nodes, the global object that marks the constant pool.
+ SymbolTable m_unsharedSymbolTable;
+ };
+
+ class ProgramCodeBlock : public GlobalCodeBlock {
+ public:
+ ProgramCodeBlock(CopyParsedBlockTag, ProgramCodeBlock& other)
+ : GlobalCodeBlock(CopyParsedBlock, other)
+ {
+ }
+
+ ProgramCodeBlock(ProgramExecutable* ownerExecutable, CodeType codeType, JSGlobalObject* globalObject, PassRefPtr<SourceProvider> sourceProvider, PassOwnPtr<CodeBlock> alternative)
+ : GlobalCodeBlock(ownerExecutable, codeType, globalObject, sourceProvider, 0, alternative)
+ {
+ }
+
+#if ENABLE(JIT)
+ protected:
+ virtual JSObject* compileOptimized(ExecState*, ScopeChainNode*);
+ virtual void jettison();
+ virtual bool jitCompileImpl(JSGlobalData&);
+ virtual CodeBlock* replacement();
+ virtual bool canCompileWithDFGInternal();
+#endif
};
- class EvalCodeBlock : public ProgramCodeBlock {
+ class EvalCodeBlock : public GlobalCodeBlock {
public:
- EvalCodeBlock(ScopeNode* ownerNode, JSGlobalObject* globalObject, PassRefPtr<SourceProvider> sourceProvider, int baseScopeDepth)
- : ProgramCodeBlock(ownerNode, EvalCode, globalObject, sourceProvider)
+ EvalCodeBlock(CopyParsedBlockTag, EvalCodeBlock& other)
+ : GlobalCodeBlock(CopyParsedBlock, other)
+ , m_baseScopeDepth(other.m_baseScopeDepth)
+ , m_variables(other.m_variables)
+ {
+ }
+
+ EvalCodeBlock(EvalExecutable* ownerExecutable, JSGlobalObject* globalObject, PassRefPtr<SourceProvider> sourceProvider, int baseScopeDepth, PassOwnPtr<CodeBlock> alternative)
+ : GlobalCodeBlock(ownerExecutable, EvalCode, globalObject, sourceProvider, 0, alternative)
, m_baseScopeDepth(baseScopeDepth)
{
}
int baseScopeDepth() const { return m_baseScopeDepth; }
+ const Identifier& variable(unsigned index) { return m_variables[index]; }
+ unsigned numVariables() { return m_variables.size(); }
+ void adoptVariables(Vector<Identifier>& variables)
+ {
+ ASSERT(m_variables.isEmpty());
+ m_variables.swap(variables);
+ }
+
+#if ENABLE(JIT)
+ protected:
+ virtual JSObject* compileOptimized(ExecState*, ScopeChainNode*);
+ virtual void jettison();
+ virtual bool jitCompileImpl(JSGlobalData&);
+ virtual CodeBlock* replacement();
+ virtual bool canCompileWithDFGInternal();
+#endif
+
private:
int m_baseScopeDepth;
+ Vector<Identifier> m_variables;
};
+ class FunctionCodeBlock : public CodeBlock {
+ public:
+ FunctionCodeBlock(CopyParsedBlockTag, FunctionCodeBlock& other)
+ : CodeBlock(CopyParsedBlock, other, other.sharedSymbolTable())
+ {
+ // The fact that we have to do this is yucky, but is necessary because of the
+ // class hierarchy issues described in the comment block for the main
+ // constructor, below.
+ sharedSymbolTable()->ref();
+ }
+
+ // Rather than using the usual RefCounted::create idiom for SharedSymbolTable we just use new
+ // as we need to initialise the CodeBlock before we could initialise any RefPtr to hold the shared
+ // symbol table, so we just pass as a raw pointer with a ref count of 1. We then manually deref
+ // in the destructor.
+ FunctionCodeBlock(FunctionExecutable* ownerExecutable, CodeType codeType, JSGlobalObject* globalObject, PassRefPtr<SourceProvider> sourceProvider, unsigned sourceOffset, bool isConstructor, PassOwnPtr<CodeBlock> alternative = nullptr)
+ : CodeBlock(ownerExecutable, codeType, globalObject, sourceProvider, sourceOffset, SharedSymbolTable::create().leakRef(), isConstructor, alternative)
+ {
+ }
+ ~FunctionCodeBlock()
+ {
+ sharedSymbolTable()->deref();
+ }
+
+#if ENABLE(JIT)
+ protected:
+ virtual JSObject* compileOptimized(ExecState*, ScopeChainNode*);
+ virtual void jettison();
+ virtual bool jitCompileImpl(JSGlobalData&);
+ virtual CodeBlock* replacement();
+ virtual bool canCompileWithDFGInternal();
+#endif
+ };
+
+ inline CodeBlock* baselineCodeBlockForInlineCallFrame(InlineCallFrame* inlineCallFrame)
+ {
+ ASSERT(inlineCallFrame);
+ ExecutableBase* executable = inlineCallFrame->executable.get();
+ ASSERT(executable->structure()->classInfo() == &FunctionExecutable::s_info);
+ return static_cast<FunctionExecutable*>(executable)->baselineCodeBlockFor(inlineCallFrame->isCall ? CodeForCall : CodeForConstruct);
+ }
+
+ inline CodeBlock* baselineCodeBlockForOriginAndBaselineCodeBlock(const CodeOrigin& codeOrigin, CodeBlock* baselineCodeBlock)
+ {
+ if (codeOrigin.inlineCallFrame)
+ return baselineCodeBlockForInlineCallFrame(codeOrigin.inlineCallFrame);
+ return baselineCodeBlock;
+ }
+
+
inline Register& ExecState::r(int index)
{
CodeBlock* codeBlock = this->codeBlock();
if (codeBlock->isConstantRegisterIndex(index))
- return codeBlock->constantRegister(index);
+ return *reinterpret_cast<Register*>(&codeBlock->constantRegister(index));
+ return this[index];
+ }
+
+ inline Register& ExecState::uncheckedR(int index)
+ {
+ ASSERT(index < FirstConstantRegisterIndex);
return this[index];
}
+#if ENABLE(DFG_JIT)
+ inline bool ExecState::isInlineCallFrame()
+ {
+ if (LIKELY(!codeBlock() || codeBlock()->getJITType() != JITCode::DFGJIT))
+ return false;
+ return isInlineCallFrameSlow();
+ }
+#endif
+
+#if ENABLE(DFG_JIT)
+ inline void DFGCodeBlocks::mark(void* candidateCodeBlock)
+ {
+ // We have to check for 0 and -1 because those are used by the HashMap as markers.
+ uintptr_t value = reinterpret_cast<uintptr_t>(candidateCodeBlock);
+
+ // This checks for both of those nasty cases in one go.
+ // 0 + 1 = 1
+ // -1 + 1 = 0
+ if (value + 1 <= 1)
+ return;
+
+ HashSet<CodeBlock*>::iterator iter = m_set.find(static_cast<CodeBlock*>(candidateCodeBlock));
+ if (iter == m_set.end())
+ return;
+
+ (*iter)->m_dfgData->mayBeExecuting = true;
+ }
+#endif
+
} // namespace JSC
#endif // CodeBlock_h