X-Git-Url: https://git.saurik.com/apple/javascriptcore.git/blobdiff_plain/4e4e5a6f2694187498445a6ac6f1634ce8141119..4be4e30906bcb8ee30b4d189205cb70bad6707ce:/bytecode/CodeBlock.h diff --git a/bytecode/CodeBlock.h b/bytecode/CodeBlock.h index 63d9b8a..1329e23 100644 --- a/bytecode/CodeBlock.h +++ b/bytecode/CodeBlock.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2008, 2009 Apple Inc. All rights reserved. + * Copyright (C) 2008, 2009, 2010, 2011, 2012, 2013 Apple Inc. All rights reserved. * Copyright (C) 2008 Cameron Zwarich * * Redistribution and use in source and binary forms, with or without @@ -30,637 +30,1452 @@ #ifndef CodeBlock_h #define CodeBlock_h +#include "ArrayProfile.h" +#include "ByValInfo.h" +#include "BytecodeConventions.h" +#include "CallLinkInfo.h" +#include "CallReturnOffsetToBytecodeOffset.h" +#include "CodeBlockHash.h" +#include "CodeOrigin.h" +#include "CodeType.h" +#include "CompactJITCodeMap.h" +#include "DFGCodeBlocks.h" +#include "DFGCommon.h" +#include "DFGExitProfile.h" +#include "DFGMinifiedGraph.h" +#include "DFGOSREntry.h" +#include "DFGOSRExit.h" +#include "DFGVariableEventStream.h" #include "EvalCodeCache.h" +#include "ExecutionCounter.h" +#include "ExpressionRangeInfo.h" +#include "HandlerInfo.h" +#include "ObjectAllocationProfile.h" +#include "Options.h" #include "Instruction.h" #include "JITCode.h" +#include "JITWriteBarrier.h" #include "JSGlobalObject.h" +#include "JumpReplacementWatchpoint.h" #include "JumpTable.h" -#include "Nodes.h" -#include "RegExp.h" -#include "UString.h" +#include "LLIntCallLinkInfo.h" +#include "LazyOperandValueProfile.h" +#include "LineInfo.h" +#include "ProfilerCompilation.h" +#include "RegExpObject.h" +#include "ResolveOperation.h" +#include "StructureStubInfo.h" +#include "UnconditionalFinalizer.h" +#include "ValueProfile.h" +#include "Watchpoint.h" +#include #include +#include +#include #include +#include #include - -#if ENABLE(JIT) -#include "StructureStubInfo.h" -#endif - -// Register numbers used in bytecode operations have different meaning accoring to their ranges: -// 0x80000000-0xFFFFFFFF Negative indicies from the CallFrame pointer are entries in the call frame, see RegisterFile.h. -// 0x00000000-0x3FFFFFFF Forwards indices from the CallFrame pointer are local vars and temporaries with the function's callframe. -// 0x40000000-0x7FFFFFFF Positive indices from 0x40000000 specify entries in the constant pool on the CodeBlock. -static const int FirstConstantRegisterIndex = 0x40000000; +#include namespace JSC { - enum HasSeenShouldRepatch { - hasSeenShouldRepatch - }; - - class ExecState; +class DFGCodeBlocks; +class ExecState; +class LLIntOffsetsExtractor; +class RepatchBuffer; - enum CodeType { GlobalCode, EvalCode, FunctionCode }; +inline int unmodifiedArgumentsRegister(int argumentsRegister) { return argumentsRegister - 1; } - static ALWAYS_INLINE int missingThisObjectMarker() { return std::numeric_limits::max(); } +static ALWAYS_INLINE int missingThisObjectMarker() { return std::numeric_limits::max(); } - struct HandlerInfo { - uint32_t start; - uint32_t end; - uint32_t target; - uint32_t scopeDepth; -#if ENABLE(JIT) - CodeLocationLabel nativeCode; -#endif - }; - - struct ExpressionRangeInfo { - enum { - MaxOffset = (1 << 7) - 1, - MaxDivot = (1 << 25) - 1 - }; - uint32_t instructionOffset : 25; - uint32_t divotPoint : 25; - uint32_t startOffset : 7; - uint32_t endOffset : 7; - }; +class CodeBlock : public UnconditionalFinalizer, public WeakReferenceHarvester { + WTF_MAKE_FAST_ALLOCATED; + friend class JIT; + friend class LLIntOffsetsExtractor; +public: + enum CopyParsedBlockTag { CopyParsedBlock }; +protected: + CodeBlock(CopyParsedBlockTag, CodeBlock& other); + + CodeBlock(ScriptExecutable* ownerExecutable, UnlinkedCodeBlock*, JSGlobalObject*, unsigned baseScopeDepth, PassRefPtr, unsigned sourceOffset, unsigned firstLineColumnOffset, PassOwnPtr alternative); - struct LineInfo { - uint32_t instructionOffset; - int32_t lineNumber; - }; + WriteBarrier m_globalObject; + Heap* m_heap; - // Both op_construct and op_instanceof require a use of op_get_by_id to get - // the prototype property from an object. The exception messages for exceptions - // thrown by these instances op_get_by_id need to reflect this. - struct GetByIdExceptionInfo { - unsigned bytecodeOffset : 31; - bool isOpConstruct : 1; - }; +public: + JS_EXPORT_PRIVATE virtual ~CodeBlock(); + + UnlinkedCodeBlock* unlinkedCodeBlock() const { return m_unlinkedCode.get(); } + + String inferredName() const; + CodeBlockHash hash() const; + String sourceCodeForTools() const; // Not quite the actual source we parsed; this will do things like prefix the source for a function with a reified signature. + String sourceCodeOnOneLine() const; // As sourceCodeForTools(), but replaces all whitespace runs with a single space. + void dumpAssumingJITType(PrintStream&, JITCode::JITType) const; + void dump(PrintStream&) const; + + int numParameters() const { return m_numParameters; } + void setNumParameters(int newValue); + + int* addressOfNumParameters() { return &m_numParameters; } + static ptrdiff_t offsetOfNumParameters() { return OBJECT_OFFSETOF(CodeBlock, m_numParameters); } + CodeBlock* alternative() { return m_alternative.get(); } + PassOwnPtr releaseAlternative() { return m_alternative.release(); } + void setAlternative(PassOwnPtr alternative) { m_alternative = alternative; } + + CodeSpecializationKind specializationKind() const + { + return specializationFromIsConstruct(m_isConstructor); + } + #if ENABLE(JIT) - struct CallLinkInfo { - CallLinkInfo() - : callee(0) - , position(0) - , hasSeenShouldRepatch(0) - { - } + CodeBlock* baselineVersion() + { + CodeBlock* result = replacement(); + if (!result) + return 0; // This can happen if we're in the process of creating the baseline version. + while (result->alternative()) + result = result->alternative(); + ASSERT(result); + ASSERT(JITCode::isBaselineCode(result->getJITType())); + return result; + } +#else + CodeBlock* baselineVersion() + { + return this; + } +#endif - unsigned bytecodeIndex; - CodeLocationNearCall callReturnLocation; - CodeLocationDataLabelPtr hotPathBegin; - CodeLocationNearCall hotPathOther; - CodeBlock* ownerCodeBlock; - CodeBlock* callee; - unsigned position : 31; - unsigned hasSeenShouldRepatch : 1; - - void setUnlinked() { callee = 0; } - bool isLinked() { return callee; } + void visitAggregate(SlotVisitor&); - bool seenOnce() - { - return hasSeenShouldRepatch; - } + static void dumpStatistics(); - void setSeen() - { - hasSeenShouldRepatch = true; - } - }; + void dumpBytecode(PrintStream& = WTF::dataFile()); + void dumpBytecode(PrintStream&, unsigned bytecodeOffset); + void printStructures(PrintStream&, const Instruction*); + void printStructure(PrintStream&, const char* name, const Instruction*, int operand); - struct MethodCallLinkInfo { - MethodCallLinkInfo() - : cachedStructure(0) - , cachedPrototypeStructure(0) - { - } + bool isStrictMode() const { return m_isStrictMode; } - bool seenOnce() - { - ASSERT(!cachedStructure); - return cachedPrototypeStructure; - } + inline bool isKnownNotImmediate(int index) + { + if (index == m_thisRegister && !m_isStrictMode) + return true; - void setSeen() - { - ASSERT(!cachedStructure && !cachedPrototypeStructure); - // We use the values of cachedStructure & cachedPrototypeStructure to indicate the - // current state. - // - In the initial state, both are null. - // - Once this transition has been taken once, cachedStructure is - // null and cachedPrototypeStructure is set to a nun-null value. - // - Once the call is linked both structures are set to non-null values. - cachedPrototypeStructure = (Structure*)1; - } + if (isConstantRegisterIndex(index)) + return getConstant(index).isCell(); - CodeLocationCall callReturnLocation; - CodeLocationDataLabelPtr structureLabel; - Structure* cachedStructure; - Structure* cachedPrototypeStructure; - }; + return false; + } - struct FunctionRegisterInfo { - FunctionRegisterInfo(unsigned bytecodeOffset, int functionRegisterIndex) - : bytecodeOffset(bytecodeOffset) - , functionRegisterIndex(functionRegisterIndex) - { - } + ALWAYS_INLINE bool isTemporaryRegisterIndex(int index) + { + return index >= m_numVars; + } - unsigned bytecodeOffset; - int functionRegisterIndex; - }; + HandlerInfo* handlerForBytecodeOffset(unsigned bytecodeOffset); + unsigned lineNumberForBytecodeOffset(unsigned bytecodeOffset); + unsigned columnNumberForBytecodeOffset(unsigned bytecodeOffset); + void expressionRangeForBytecodeOffset(unsigned bytecodeOffset, int& divot, + int& startOffset, int& endOffset, unsigned& line, unsigned& column); - struct GlobalResolveInfo { - GlobalResolveInfo(unsigned bytecodeOffset) - : structure(0) - , offset(0) - , bytecodeOffset(bytecodeOffset) - { - } +#if ENABLE(JIT) - Structure* structure; - unsigned offset; - unsigned bytecodeOffset; - }; + StructureStubInfo& getStubInfo(ReturnAddressPtr returnAddress) + { + return *(binarySearch(m_structureStubInfos, m_structureStubInfos.size(), returnAddress.value(), getStructureStubInfoReturnLocation)); + } - // This structure is used to map from a call return location - // (given as an offset in bytes into the JIT code) back to - // the bytecode index of the corresponding bytecode operation. - // This is then used to look up the corresponding handler. - struct CallReturnOffsetToBytecodeIndex { - CallReturnOffsetToBytecodeIndex(unsigned callReturnOffset, unsigned bytecodeIndex) - : callReturnOffset(callReturnOffset) - , bytecodeIndex(bytecodeIndex) - { - } + StructureStubInfo& getStubInfo(unsigned bytecodeIndex) + { + return *(binarySearch(m_structureStubInfos, m_structureStubInfos.size(), bytecodeIndex, getStructureStubInfoBytecodeIndex)); + } + + void resetStub(StructureStubInfo&); + + ByValInfo& getByValInfo(unsigned bytecodeIndex) + { + return *(binarySearch(m_byValInfos, m_byValInfos.size(), bytecodeIndex, getByValInfoBytecodeIndex)); + } - unsigned callReturnOffset; - unsigned bytecodeIndex; - }; + CallLinkInfo& getCallLinkInfo(ReturnAddressPtr returnAddress) + { + return *(binarySearch(m_callLinkInfos, m_callLinkInfos.size(), returnAddress.value(), getCallLinkInfoReturnLocation)); + } + + CallLinkInfo& getCallLinkInfo(unsigned bytecodeIndex) + { + ASSERT(JITCode::isBaselineCode(getJITType())); + return *(binarySearch(m_callLinkInfos, m_callLinkInfos.size(), bytecodeIndex, getCallLinkInfoBytecodeIndex)); + } +#endif // ENABLE(JIT) - // valueAtPosition helpers for the binaryChop algorithm below. +#if ENABLE(LLINT) + Instruction* adjustPCIfAtCallSite(Instruction*); +#endif + unsigned bytecodeOffset(ExecState*, ReturnAddressPtr); - inline void* getStructureStubInfoReturnLocation(StructureStubInfo* structureStubInfo) +#if ENABLE(JIT) + unsigned bytecodeOffsetForCallAtIndex(unsigned index) { - return structureStubInfo->callReturnLocation.executableAddress(); + if (!m_rareData) + return 1; + Vector& callIndices = m_rareData->m_callReturnIndexVector; + if (!callIndices.size()) + return 1; + // FIXME: Fix places in DFG that call out to C that don't set the CodeOrigin. https://bugs.webkit.org/show_bug.cgi?id=118315 + ASSERT(index < m_rareData->m_callReturnIndexVector.size()); + if (index >= m_rareData->m_callReturnIndexVector.size()) + return 1; + return m_rareData->m_callReturnIndexVector[index].bytecodeOffset; } - inline void* getCallLinkInfoReturnLocation(CallLinkInfo* callLinkInfo) + void unlinkCalls(); + + bool hasIncomingCalls() { return m_incomingCalls.begin() != m_incomingCalls.end(); } + + void linkIncomingCall(CallLinkInfo* incoming) { - return callLinkInfo->callReturnLocation.executableAddress(); + m_incomingCalls.push(incoming); } - - inline void* getMethodCallLinkInfoReturnLocation(MethodCallLinkInfo* methodCallLinkInfo) + + bool isIncomingCallAlreadyLinked(CallLinkInfo* incoming) { - return methodCallLinkInfo->callReturnLocation.executableAddress(); + return m_incomingCalls.isOnList(incoming); } +#endif // ENABLE(JIT) - inline unsigned getCallReturnOffset(CallReturnOffsetToBytecodeIndex* pc) +#if ENABLE(LLINT) + void linkIncomingCall(LLIntCallLinkInfo* incoming) { - return pc->callReturnOffset; + m_incomingLLIntCalls.push(incoming); } +#endif // ENABLE(LLINT) + + void unlinkIncomingCalls(); - // Binary chop algorithm, calls valueAtPosition on pre-sorted elements in array, - // compares result with key (KeyTypes should be comparable with '--', '<', '>'). - // Optimized for cases where the array contains the key, checked by assertions. - template - inline ArrayType* binaryChop(ArrayType* array, size_t size, KeyType key) +#if ENABLE(DFG_JIT) || ENABLE(LLINT) + void setJITCodeMap(PassOwnPtr jitCodeMap) + { + m_jitCodeMap = jitCodeMap; + } + CompactJITCodeMap* jitCodeMap() { - // The array must contain at least one element (pre-condition, array does conatin key). - // If the array only contains one element, no need to do the comparison. - while (size > 1) { - // Pick an element to check, half way through the array, and read the value. - int pos = (size - 1) >> 1; - KeyType val = valueAtPosition(&array[pos]); + return m_jitCodeMap.get(); + } +#endif + +#if ENABLE(DFG_JIT) + void createDFGDataIfNecessary() + { + if (!!m_dfgData) + return; - // If the key matches, success! - if (val == key) - return &array[pos]; - // The item we are looking for is smaller than the item being check; reduce the value of 'size', - // chopping off the right hand half of the array. - else if (key < val) - size = pos; - // Discard all values in the left hand half of the array, up to and including the item at pos. - else { - size -= (pos + 1); - array += (pos + 1); - } - - // 'size' should never reach zero. - ASSERT(size); - } + m_dfgData = adoptPtr(new DFGData); + } + + void saveCompilation(PassRefPtr compilation) + { + createDFGDataIfNecessary(); + m_dfgData->compilation = compilation; + } - // If we reach this point we've chopped down to one element, no need to check it matches - ASSERT(size == 1); - ASSERT(key == valueAtPosition(&array[0])); - return &array[0]; + Profiler::Compilation* compilation() + { + if (!m_dfgData) + return 0; + return m_dfgData->compilation.get(); + } + + DFG::OSREntryData* appendDFGOSREntryData(unsigned bytecodeIndex, unsigned machineCodeOffset) + { + createDFGDataIfNecessary(); + DFG::OSREntryData entry; + entry.m_bytecodeIndex = bytecodeIndex; + entry.m_machineCodeOffset = machineCodeOffset; + m_dfgData->osrEntry.append(entry); + return &m_dfgData->osrEntry.last(); + } + unsigned numberOfDFGOSREntries() const + { + if (!m_dfgData) + return 0; + return m_dfgData->osrEntry.size(); + } + DFG::OSREntryData* dfgOSREntryData(unsigned i) { return &m_dfgData->osrEntry[i]; } + DFG::OSREntryData* dfgOSREntryDataForBytecodeIndex(unsigned bytecodeIndex) + { + if (!m_dfgData) + return 0; + return tryBinarySearch( + m_dfgData->osrEntry, m_dfgData->osrEntry.size(), bytecodeIndex, + DFG::getOSREntryDataBytecodeIndex); + } + + unsigned appendOSRExit(const DFG::OSRExit& osrExit) + { + createDFGDataIfNecessary(); + unsigned result = m_dfgData->osrExit.size(); + m_dfgData->osrExit.append(osrExit); + return result; + } + + DFG::OSRExit& lastOSRExit() + { + return m_dfgData->osrExit.last(); + } + + unsigned appendSpeculationRecovery(const DFG::SpeculationRecovery& recovery) + { + createDFGDataIfNecessary(); + unsigned result = m_dfgData->speculationRecovery.size(); + m_dfgData->speculationRecovery.append(recovery); + return result; + } + + unsigned appendWatchpoint(const JumpReplacementWatchpoint& watchpoint) + { + createDFGDataIfNecessary(); + unsigned result = m_dfgData->watchpoints.size(); + m_dfgData->watchpoints.append(watchpoint); + return result; + } + + unsigned numberOfOSRExits() + { + if (!m_dfgData) + return 0; + return m_dfgData->osrExit.size(); + } + + unsigned numberOfSpeculationRecoveries() + { + if (!m_dfgData) + return 0; + return m_dfgData->speculationRecovery.size(); + } + + unsigned numberOfWatchpoints() + { + if (!m_dfgData) + return 0; + return m_dfgData->watchpoints.size(); + } + + DFG::OSRExit& osrExit(unsigned index) + { + return m_dfgData->osrExit[index]; + } + + DFG::SpeculationRecovery& speculationRecovery(unsigned index) + { + return m_dfgData->speculationRecovery[index]; + } + + JumpReplacementWatchpoint& watchpoint(unsigned index) + { + return m_dfgData->watchpoints[index]; + } + + void appendWeakReference(JSCell* target) + { + createDFGDataIfNecessary(); + m_dfgData->weakReferences.append(WriteBarrier(*vm(), ownerExecutable(), target)); + } + + void appendWeakReferenceTransition(JSCell* codeOrigin, JSCell* from, JSCell* to) + { + createDFGDataIfNecessary(); + m_dfgData->transitions.append( + WeakReferenceTransition(*vm(), ownerExecutable(), codeOrigin, from, to)); + } + + DFG::MinifiedGraph& minifiedDFG() + { + createDFGDataIfNecessary(); + return m_dfgData->minifiedDFG; + } + + DFG::VariableEventStream& variableEventStream() + { + createDFGDataIfNecessary(); + return m_dfgData->variableEventStream; } #endif - struct ExceptionInfo : FastAllocBase { - Vector m_expressionInfo; - Vector m_lineInfo; - Vector m_getByIdExceptionInfo; - -#if ENABLE(JIT) - Vector m_callReturnIndexVector; -#endif - }; - - class CodeBlock : public FastAllocBase { - friend class JIT; - protected: - CodeBlock(ScriptExecutable* ownerExecutable, CodeType, PassRefPtr, unsigned sourceOffset, SymbolTable* symbolTable); - public: - virtual ~CodeBlock(); - - void markAggregate(MarkStack&); - void refStructures(Instruction* vPC) const; - void derefStructures(Instruction* vPC) const; -#if ENABLE(JIT_OPTIMIZE_CALL) - void unlinkCallers(); -#endif - - static void dumpStatistics(); - -#if !defined(NDEBUG) || ENABLE_OPCODE_SAMPLING - void dump(ExecState*) const; - void printStructures(const Instruction*) const; - void printStructure(const char* name, const Instruction*, int operand) const; -#endif - - inline bool isKnownNotImmediate(int index) - { - if (index == m_thisRegister) - return true; + unsigned bytecodeOffset(Instruction* returnAddress) + { + RELEASE_ASSERT(returnAddress >= instructions().begin() && returnAddress < instructions().end()); + return static_cast(returnAddress) - instructions().begin(); + } - if (isConstantRegisterIndex(index)) - return getConstant(index).isCell(); + bool isNumericCompareFunction() { return m_unlinkedCode->isNumericCompareFunction(); } - return false; - } + unsigned numberOfInstructions() const { return m_instructions.size(); } + RefCountedArray& instructions() { return m_instructions; } + const RefCountedArray& instructions() const { return m_instructions; } + + size_t predictedMachineCodeSize(); + + bool usesOpcode(OpcodeID); - ALWAYS_INLINE bool isTemporaryRegisterIndex(int index) - { - return index >= m_numVars; - } + unsigned instructionCount() { return m_instructions.size(); } - HandlerInfo* handlerForBytecodeOffset(unsigned bytecodeOffset); - int lineNumberForBytecodeOffset(CallFrame*, unsigned bytecodeOffset); - int expressionRangeForBytecodeOffset(CallFrame*, unsigned bytecodeOffset, int& divot, int& startOffset, int& endOffset); - bool getByIdExceptionInfoForBytecodeOffset(CallFrame*, unsigned bytecodeOffset, OpcodeID&); + int argumentIndexAfterCapture(size_t argument); #if ENABLE(JIT) - void addCaller(CallLinkInfo* caller) - { - caller->callee = this; - caller->position = m_linkedCallerList.size(); - m_linkedCallerList.append(caller); - } - - void removeCaller(CallLinkInfo* caller) - { - unsigned pos = caller->position; - unsigned lastPos = m_linkedCallerList.size() - 1; - - if (pos != lastPos) { - m_linkedCallerList[pos] = m_linkedCallerList[lastPos]; - m_linkedCallerList[pos]->position = pos; - } - m_linkedCallerList.shrink(lastPos); - } - - StructureStubInfo& getStubInfo(ReturnAddressPtr returnAddress) - { - return *(binaryChop(m_structureStubInfos.begin(), m_structureStubInfos.size(), returnAddress.value())); + void setJITCode(const JITCode& code, MacroAssemblerCodePtr codeWithArityCheck) + { + m_jitCode = code; + m_jitCodeWithArityCheck = codeWithArityCheck; +#if ENABLE(DFG_JIT) + if (m_jitCode.jitType() == JITCode::DFGJIT) { + createDFGDataIfNecessary(); + m_vm->heap.m_dfgCodeBlocks.m_set.add(this); } - - CallLinkInfo& getCallLinkInfo(ReturnAddressPtr returnAddress) - { - return *(binaryChop(m_callLinkInfos.begin(), m_callLinkInfos.size(), returnAddress.value())); +#endif + } + JITCode& getJITCode() { return m_jitCode; } + MacroAssemblerCodePtr getJITCodeWithArityCheck() { return m_jitCodeWithArityCheck; } + JITCode::JITType getJITType() const { return m_jitCode.jitType(); } + ExecutableMemoryHandle* executableMemory() { return getJITCode().getExecutableMemory(); } + virtual JSObject* compileOptimized(ExecState*, JSScope*, unsigned bytecodeIndex) = 0; + void jettison(); + enum JITCompilationResult { AlreadyCompiled, CouldNotCompile, CompiledSuccessfully }; + JITCompilationResult jitCompile(ExecState* exec) + { + if (getJITType() != JITCode::InterpreterThunk) { + ASSERT(getJITType() == JITCode::BaselineJIT); + return AlreadyCompiled; } +#if ENABLE(JIT) + if (jitCompileImpl(exec)) + return CompiledSuccessfully; + return CouldNotCompile; +#else + UNUSED_PARAM(exec); + return CouldNotCompile; +#endif + } + virtual CodeBlock* replacement() = 0; - MethodCallLinkInfo& getMethodCallLinkInfo(ReturnAddressPtr returnAddress) - { - return *(binaryChop(m_methodCallLinkInfos.begin(), m_methodCallLinkInfos.size(), returnAddress.value())); - } + virtual DFG::CapabilityLevel canCompileWithDFGInternal() = 0; + DFG::CapabilityLevel canCompileWithDFG() + { + DFG::CapabilityLevel result = canCompileWithDFGInternal(); + m_canCompileWithDFGState = result; + return result; + } + DFG::CapabilityLevel canCompileWithDFGState() { return m_canCompileWithDFGState; } - unsigned getBytecodeIndex(CallFrame* callFrame, ReturnAddressPtr returnAddress) - { - reparseForExceptionInfoIfNecessary(callFrame); - return binaryChop(callReturnIndexVector().begin(), callReturnIndexVector().size(), ownerExecutable()->generatedJITCode().offsetOf(returnAddress.value()))->bytecodeIndex; + bool hasOptimizedReplacement() + { + ASSERT(JITCode::isBaselineCode(getJITType())); + bool result = replacement()->getJITType() > getJITType(); +#if !ASSERT_DISABLED + if (result) + ASSERT(replacement()->getJITType() == JITCode::DFGJIT); + else { + ASSERT(JITCode::isBaselineCode(replacement()->getJITType())); + ASSERT(replacement() == this); } - - bool functionRegisterForBytecodeOffset(unsigned bytecodeOffset, int& functionRegisterIndex); #endif -#if ENABLE(INTERPRETER) - unsigned bytecodeOffset(CallFrame*, Instruction* returnAddress) - { - return static_cast(returnAddress) - instructions().begin(); - } + return result; + } +#else + JITCode::JITType getJITType() const { return JITCode::BaselineJIT; } #endif - void setIsNumericCompareFunction(bool isNumericCompareFunction) { m_isNumericCompareFunction = isNumericCompareFunction; } - bool isNumericCompareFunction() { return m_isNumericCompareFunction; } + ScriptExecutable* ownerExecutable() const { return m_ownerExecutable.get(); } - Vector& instructions() { return m_instructions; } - void discardBytecode() { m_instructions.clear(); } + void setVM(VM* vm) { m_vm = vm; } + VM* vm() { return m_vm; } -#ifndef NDEBUG - unsigned instructionCount() { return m_instructionCount; } - void setInstructionCount(unsigned instructionCount) { m_instructionCount = instructionCount; } -#endif + void setThisRegister(int thisRegister) { m_thisRegister = thisRegister; } + int thisRegister() const { return m_thisRegister; } -#if ENABLE(JIT) - JITCode& getJITCode() { return ownerExecutable()->generatedJITCode(); } - ExecutablePool* executablePool() { return ownerExecutable()->getExecutablePool(); } -#endif + bool needsFullScopeChain() const { return m_unlinkedCode->needsFullScopeChain(); } + bool usesEval() const { return m_unlinkedCode->usesEval(); } + + void setArgumentsRegister(int argumentsRegister) + { + ASSERT(argumentsRegister != -1); + m_argumentsRegister = argumentsRegister; + ASSERT(usesArguments()); + } + int argumentsRegister() const + { + ASSERT(usesArguments()); + return m_argumentsRegister; + } + int uncheckedArgumentsRegister() + { + if (!usesArguments()) + return InvalidVirtualRegister; + return argumentsRegister(); + } + void setActivationRegister(int activationRegister) + { + m_activationRegister = activationRegister; + } + int activationRegister() const + { + ASSERT(needsFullScopeChain()); + return m_activationRegister; + } + int uncheckedActivationRegister() + { + if (!needsFullScopeChain()) + return InvalidVirtualRegister; + return activationRegister(); + } + bool usesArguments() const { return m_argumentsRegister != -1; } + + bool needsActivation() const + { + return needsFullScopeChain() && codeType() != GlobalCode; + } - ScriptExecutable* ownerExecutable() const { return m_ownerExecutable; } + bool isCaptured(int operand, InlineCallFrame* inlineCallFrame = 0) const + { + if (operandIsArgument(operand)) + return operandToArgument(operand) && usesArguments(); - void setGlobalData(JSGlobalData* globalData) { m_globalData = globalData; } + if (inlineCallFrame) + return inlineCallFrame->capturedVars.get(operand); - void setThisRegister(int thisRegister) { m_thisRegister = thisRegister; } - int thisRegister() const { return m_thisRegister; } + // The activation object isn't in the captured region, but it's "captured" + // in the sense that stores to its location can be observed indirectly. + if (needsActivation() && operand == activationRegister()) + return true; - void setNeedsFullScopeChain(bool needsFullScopeChain) { m_needsFullScopeChain = needsFullScopeChain; } - bool needsFullScopeChain() const { return m_needsFullScopeChain; } - void setUsesEval(bool usesEval) { m_usesEval = usesEval; } - bool usesEval() const { return m_usesEval; } - void setUsesArguments(bool usesArguments) { m_usesArguments = usesArguments; } - bool usesArguments() const { return m_usesArguments; } + // Ditto for the arguments object. + if (usesArguments() && operand == argumentsRegister()) + return true; - CodeType codeType() const { return m_codeType; } + // Ditto for the arguments object. + if (usesArguments() && operand == unmodifiedArgumentsRegister(argumentsRegister())) + return true; - SourceProvider* source() const { return m_source.get(); } - unsigned sourceOffset() const { return m_sourceOffset; } + // We're in global code so there are no locals to capture + if (!symbolTable()) + return false; - size_t numberOfJumpTargets() const { return m_jumpTargets.size(); } - void addJumpTarget(unsigned jumpTarget) { m_jumpTargets.append(jumpTarget); } - unsigned jumpTarget(int index) const { return m_jumpTargets[index]; } - unsigned lastJumpTarget() const { return m_jumpTargets.last(); } + return operand >= symbolTable()->captureStart() + && operand < symbolTable()->captureEnd(); + } -#if ENABLE(INTERPRETER) - void addPropertyAccessInstruction(unsigned propertyAccessInstruction) { m_propertyAccessInstructions.append(propertyAccessInstruction); } - void addGlobalResolveInstruction(unsigned globalResolveInstruction) { m_globalResolveInstructions.append(globalResolveInstruction); } - bool hasGlobalResolveInstructionAtBytecodeOffset(unsigned bytecodeOffset); -#endif -#if ENABLE(JIT) - size_t numberOfStructureStubInfos() const { return m_structureStubInfos.size(); } - void addStructureStubInfo(const StructureStubInfo& stubInfo) { m_structureStubInfos.append(stubInfo); } - StructureStubInfo& structureStubInfo(int index) { return m_structureStubInfos[index]; } + CodeType codeType() const { return m_unlinkedCode->codeType(); } - void addGlobalResolveInfo(unsigned globalResolveInstruction) { m_globalResolveInfos.append(GlobalResolveInfo(globalResolveInstruction)); } - GlobalResolveInfo& globalResolveInfo(int index) { return m_globalResolveInfos[index]; } - bool hasGlobalResolveInfoAtBytecodeOffset(unsigned bytecodeOffset); + SourceProvider* source() const { return m_source.get(); } + unsigned sourceOffset() const { return m_sourceOffset; } + unsigned firstLineColumnOffset() const { return m_firstLineColumnOffset; } - size_t numberOfCallLinkInfos() const { return m_callLinkInfos.size(); } - void addCallLinkInfo() { m_callLinkInfos.append(CallLinkInfo()); } - CallLinkInfo& callLinkInfo(int index) { return m_callLinkInfos[index]; } + size_t numberOfJumpTargets() const { return m_unlinkedCode->numberOfJumpTargets(); } + unsigned jumpTarget(int index) const { return m_unlinkedCode->jumpTarget(index); } - void addMethodCallLinkInfos(unsigned n) { m_methodCallLinkInfos.grow(n); } - MethodCallLinkInfo& methodCallLinkInfo(int index) { return m_methodCallLinkInfos[index]; } + void createActivation(CallFrame*); - void addFunctionRegisterInfo(unsigned bytecodeOffset, int functionIndex) { createRareDataIfNecessary(); m_rareData->m_functionRegisterInfos.append(FunctionRegisterInfo(bytecodeOffset, functionIndex)); } + void clearEvalCache(); + + String nameForRegister(int registerNumber); + +#if ENABLE(JIT) + void setNumberOfStructureStubInfos(size_t size) { m_structureStubInfos.grow(size); } + size_t numberOfStructureStubInfos() const { return m_structureStubInfos.size(); } + StructureStubInfo& structureStubInfo(int index) { return m_structureStubInfos[index]; } + + void setNumberOfByValInfos(size_t size) { m_byValInfos.grow(size); } + size_t numberOfByValInfos() const { return m_byValInfos.size(); } + ByValInfo& byValInfo(size_t index) { return m_byValInfos[index]; } + + void setNumberOfCallLinkInfos(size_t size) { m_callLinkInfos.grow(size); } + size_t numberOfCallLinkInfos() const { return m_callLinkInfos.size(); } + CallLinkInfo& callLinkInfo(int index) { return m_callLinkInfos[index]; } #endif + +#if ENABLE(VALUE_PROFILER) + unsigned numberOfArgumentValueProfiles() + { + ASSERT(m_numParameters >= 0); + ASSERT(m_argumentValueProfiles.size() == static_cast(m_numParameters)); + return m_argumentValueProfiles.size(); + } + ValueProfile* valueProfileForArgument(unsigned argumentIndex) + { + ValueProfile* result = &m_argumentValueProfiles[argumentIndex]; + ASSERT(result->m_bytecodeOffset == -1); + return result; + } - // Exception handling support + unsigned numberOfValueProfiles() { return m_valueProfiles.size(); } + ValueProfile* valueProfile(int index) { return &m_valueProfiles[index]; } + ValueProfile* valueProfileForBytecodeOffset(int bytecodeOffset) + { + ValueProfile* result = binarySearch( + m_valueProfiles, m_valueProfiles.size(), bytecodeOffset, + getValueProfileBytecodeOffset); + ASSERT(result->m_bytecodeOffset != -1); + ASSERT(instructions()[bytecodeOffset + opcodeLength( + m_vm->interpreter->getOpcodeID( + instructions()[ + bytecodeOffset].u.opcode)) - 1].u.profile == result); + return result; + } + SpeculatedType valueProfilePredictionForBytecodeOffset(int bytecodeOffset) + { + return valueProfileForBytecodeOffset(bytecodeOffset)->computeUpdatedPrediction(); + } + + unsigned totalNumberOfValueProfiles() + { + return numberOfArgumentValueProfiles() + numberOfValueProfiles(); + } + ValueProfile* getFromAllValueProfiles(unsigned index) + { + if (index < numberOfArgumentValueProfiles()) + return valueProfileForArgument(index); + return valueProfile(index - numberOfArgumentValueProfiles()); + } + + RareCaseProfile* addRareCaseProfile(int bytecodeOffset) + { + m_rareCaseProfiles.append(RareCaseProfile(bytecodeOffset)); + return &m_rareCaseProfiles.last(); + } + unsigned numberOfRareCaseProfiles() { return m_rareCaseProfiles.size(); } + RareCaseProfile* rareCaseProfile(int index) { return &m_rareCaseProfiles[index]; } + RareCaseProfile* rareCaseProfileForBytecodeOffset(int bytecodeOffset) + { + return tryBinarySearch( + m_rareCaseProfiles, m_rareCaseProfiles.size(), bytecodeOffset, + getRareCaseProfileBytecodeOffset); + } + + bool likelyToTakeSlowCase(int bytecodeOffset) + { + if (!numberOfRareCaseProfiles()) + return false; + unsigned value = rareCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter; + return value >= Options::likelyToTakeSlowCaseMinimumCount(); + } + + bool couldTakeSlowCase(int bytecodeOffset) + { + if (!numberOfRareCaseProfiles()) + return false; + unsigned value = rareCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter; + return value >= Options::couldTakeSlowCaseMinimumCount(); + } + + RareCaseProfile* addSpecialFastCaseProfile(int bytecodeOffset) + { + m_specialFastCaseProfiles.append(RareCaseProfile(bytecodeOffset)); + return &m_specialFastCaseProfiles.last(); + } + unsigned numberOfSpecialFastCaseProfiles() { return m_specialFastCaseProfiles.size(); } + RareCaseProfile* specialFastCaseProfile(int index) { return &m_specialFastCaseProfiles[index]; } + RareCaseProfile* specialFastCaseProfileForBytecodeOffset(int bytecodeOffset) + { + return tryBinarySearch( + m_specialFastCaseProfiles, m_specialFastCaseProfiles.size(), bytecodeOffset, + getRareCaseProfileBytecodeOffset); + } + + bool likelyToTakeSpecialFastCase(int bytecodeOffset) + { + if (!numberOfRareCaseProfiles()) + return false; + unsigned specialFastCaseCount = specialFastCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter; + return specialFastCaseCount >= Options::likelyToTakeSlowCaseMinimumCount(); + } + + bool couldTakeSpecialFastCase(int bytecodeOffset) + { + if (!numberOfRareCaseProfiles()) + return false; + unsigned specialFastCaseCount = specialFastCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter; + return specialFastCaseCount >= Options::couldTakeSlowCaseMinimumCount(); + } + + bool likelyToTakeDeepestSlowCase(int bytecodeOffset) + { + if (!numberOfRareCaseProfiles()) + return false; + unsigned slowCaseCount = rareCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter; + unsigned specialFastCaseCount = specialFastCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter; + unsigned value = slowCaseCount - specialFastCaseCount; + return value >= Options::likelyToTakeSlowCaseMinimumCount(); + } + + bool likelyToTakeAnySlowCase(int bytecodeOffset) + { + if (!numberOfRareCaseProfiles()) + return false; + unsigned slowCaseCount = rareCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter; + unsigned specialFastCaseCount = specialFastCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter; + unsigned value = slowCaseCount + specialFastCaseCount; + return value >= Options::likelyToTakeSlowCaseMinimumCount(); + } + + unsigned numberOfArrayProfiles() const { return m_arrayProfiles.size(); } + const ArrayProfileVector& arrayProfiles() { return m_arrayProfiles; } + ArrayProfile* addArrayProfile(unsigned bytecodeOffset) + { + m_arrayProfiles.append(ArrayProfile(bytecodeOffset)); + return &m_arrayProfiles.last(); + } + ArrayProfile* getArrayProfile(unsigned bytecodeOffset); + ArrayProfile* getOrAddArrayProfile(unsigned bytecodeOffset); +#endif - size_t numberOfExceptionHandlers() const { return m_rareData ? m_rareData->m_exceptionHandlers.size() : 0; } - void addExceptionHandler(const HandlerInfo& hanler) { createRareDataIfNecessary(); return m_rareData->m_exceptionHandlers.append(hanler); } - HandlerInfo& exceptionHandler(int index) { ASSERT(m_rareData); return m_rareData->m_exceptionHandlers[index]; } + // Exception handling support - bool hasExceptionInfo() const { return m_exceptionInfo; } - void clearExceptionInfo() { m_exceptionInfo.clear(); } - ExceptionInfo* extractExceptionInfo() { ASSERT(m_exceptionInfo); return m_exceptionInfo.release(); } + size_t numberOfExceptionHandlers() const { return m_rareData ? m_rareData->m_exceptionHandlers.size() : 0; } + void allocateHandlers(const Vector& unlinkedHandlers) + { + size_t count = unlinkedHandlers.size(); + if (!count) + return; + createRareDataIfNecessary(); + m_rareData->m_exceptionHandlers.resize(count); + for (size_t i = 0; i < count; ++i) { + m_rareData->m_exceptionHandlers[i].start = unlinkedHandlers[i].start; + m_rareData->m_exceptionHandlers[i].end = unlinkedHandlers[i].end; + m_rareData->m_exceptionHandlers[i].target = unlinkedHandlers[i].target; + m_rareData->m_exceptionHandlers[i].scopeDepth = unlinkedHandlers[i].scopeDepth; + } - void addExpressionInfo(const ExpressionRangeInfo& expressionInfo) { ASSERT(m_exceptionInfo); m_exceptionInfo->m_expressionInfo.append(expressionInfo); } - void addGetByIdExceptionInfo(const GetByIdExceptionInfo& info) { ASSERT(m_exceptionInfo); m_exceptionInfo->m_getByIdExceptionInfo.append(info); } + } + HandlerInfo& exceptionHandler(int index) { RELEASE_ASSERT(m_rareData); return m_rareData->m_exceptionHandlers[index]; } - size_t numberOfLineInfos() const { ASSERT(m_exceptionInfo); return m_exceptionInfo->m_lineInfo.size(); } - void addLineInfo(const LineInfo& lineInfo) { ASSERT(m_exceptionInfo); m_exceptionInfo->m_lineInfo.append(lineInfo); } - LineInfo& lastLineInfo() { ASSERT(m_exceptionInfo); return m_exceptionInfo->m_lineInfo.last(); } + bool hasExpressionInfo() { return m_unlinkedCode->hasExpressionInfo(); } #if ENABLE(JIT) - Vector& callReturnIndexVector() { ASSERT(m_exceptionInfo); return m_exceptionInfo->m_callReturnIndexVector; } + Vector& callReturnIndexVector() + { + createRareDataIfNecessary(); + return m_rareData->m_callReturnIndexVector; + } #endif - // Constant Pool - - size_t numberOfIdentifiers() const { return m_identifiers.size(); } - void addIdentifier(const Identifier& i) { return m_identifiers.append(i); } - Identifier& identifier(int index) { return m_identifiers[index]; } - - size_t numberOfConstantRegisters() const { return m_constantRegisters.size(); } - void addConstantRegister(const Register& r) { return m_constantRegisters.append(r); } - Register& constantRegister(int index) { return m_constantRegisters[index - FirstConstantRegisterIndex]; } - ALWAYS_INLINE bool isConstantRegisterIndex(int index) const { return index >= FirstConstantRegisterIndex; } - ALWAYS_INLINE JSValue getConstant(int index) const { return m_constantRegisters[index - FirstConstantRegisterIndex].jsValue(); } - - unsigned addFunctionDecl(NonNullPassRefPtr n) { unsigned size = m_functionDecls.size(); m_functionDecls.append(n); return size; } - FunctionExecutable* functionDecl(int index) { return m_functionDecls[index].get(); } - int numberOfFunctionDecls() { return m_functionDecls.size(); } - unsigned addFunctionExpr(NonNullPassRefPtr n) { unsigned size = m_functionExprs.size(); m_functionExprs.append(n); return size; } - FunctionExecutable* functionExpr(int index) { return m_functionExprs[index].get(); } +#if ENABLE(DFG_JIT) + SegmentedVector& inlineCallFrames() + { + createRareDataIfNecessary(); + return m_rareData->m_inlineCallFrames; + } + + Vector& codeOrigins() + { + createRareDataIfNecessary(); + return m_rareData->m_codeOrigins; + } + + // Having code origins implies that there has been some inlining. + bool hasCodeOrigins() + { + return m_rareData && !!m_rareData->m_codeOrigins.size(); + } + + bool codeOriginForReturn(ReturnAddressPtr, CodeOrigin&); + + bool canGetCodeOrigin(unsigned index) + { + if (!m_rareData) + return false; + return m_rareData->m_codeOrigins.size() > index; + } + + CodeOrigin codeOrigin(unsigned index) + { + RELEASE_ASSERT(m_rareData); + return m_rareData->m_codeOrigins[index].codeOrigin; + } + + bool addFrequentExitSite(const DFG::FrequentExitSite& site) + { + ASSERT(JITCode::isBaselineCode(getJITType())); + return m_exitProfile.add(site); + } + + bool hasExitSite(const DFG::FrequentExitSite& site) const { return m_exitProfile.hasExitSite(site); } - unsigned addRegExp(RegExp* r) { createRareDataIfNecessary(); unsigned size = m_rareData->m_regexps.size(); m_rareData->m_regexps.append(r); return size; } - RegExp* regexp(int index) const { ASSERT(m_rareData); return m_rareData->m_regexps[index].get(); } + DFG::ExitProfile& exitProfile() { return m_exitProfile; } + + CompressedLazyOperandValueProfileHolder& lazyOperandValueProfiles() + { + return m_lazyOperandValueProfiles; + } +#endif + // Constant Pool - // Jump Tables + size_t numberOfIdentifiers() const { return m_identifiers.size(); } + void addIdentifier(const Identifier& i) { return m_identifiers.append(i); } + Identifier& identifier(int index) { return m_identifiers[index]; } - size_t numberOfImmediateSwitchJumpTables() const { return m_rareData ? m_rareData->m_immediateSwitchJumpTables.size() : 0; } - SimpleJumpTable& addImmediateSwitchJumpTable() { createRareDataIfNecessary(); m_rareData->m_immediateSwitchJumpTables.append(SimpleJumpTable()); return m_rareData->m_immediateSwitchJumpTables.last(); } - SimpleJumpTable& immediateSwitchJumpTable(int tableIndex) { ASSERT(m_rareData); return m_rareData->m_immediateSwitchJumpTables[tableIndex]; } + size_t numberOfConstantRegisters() const { return m_constantRegisters.size(); } + unsigned addConstant(JSValue v) + { + unsigned result = m_constantRegisters.size(); + m_constantRegisters.append(WriteBarrier()); + m_constantRegisters.last().set(m_globalObject->vm(), m_ownerExecutable.get(), v); + return result; + } - size_t numberOfCharacterSwitchJumpTables() const { return m_rareData ? m_rareData->m_characterSwitchJumpTables.size() : 0; } - SimpleJumpTable& addCharacterSwitchJumpTable() { createRareDataIfNecessary(); m_rareData->m_characterSwitchJumpTables.append(SimpleJumpTable()); return m_rareData->m_characterSwitchJumpTables.last(); } - SimpleJumpTable& characterSwitchJumpTable(int tableIndex) { ASSERT(m_rareData); return m_rareData->m_characterSwitchJumpTables[tableIndex]; } - size_t numberOfStringSwitchJumpTables() const { return m_rareData ? m_rareData->m_stringSwitchJumpTables.size() : 0; } - StringJumpTable& addStringSwitchJumpTable() { createRareDataIfNecessary(); m_rareData->m_stringSwitchJumpTables.append(StringJumpTable()); return m_rareData->m_stringSwitchJumpTables.last(); } - StringJumpTable& stringSwitchJumpTable(int tableIndex) { ASSERT(m_rareData); return m_rareData->m_stringSwitchJumpTables[tableIndex]; } + unsigned addOrFindConstant(JSValue); + WriteBarrier& constantRegister(int index) { return m_constantRegisters[index - FirstConstantRegisterIndex]; } + ALWAYS_INLINE bool isConstantRegisterIndex(int index) const { return index >= FirstConstantRegisterIndex; } + ALWAYS_INLINE JSValue getConstant(int index) const { return m_constantRegisters[index - FirstConstantRegisterIndex].get(); } + FunctionExecutable* functionDecl(int index) { return m_functionDecls[index].get(); } + int numberOfFunctionDecls() { return m_functionDecls.size(); } + FunctionExecutable* functionExpr(int index) { return m_functionExprs[index].get(); } - SymbolTable* symbolTable() { return m_symbolTable; } - SharedSymbolTable* sharedSymbolTable() { ASSERT(m_codeType == FunctionCode); return static_cast(m_symbolTable); } + RegExp* regexp(int index) const { return m_unlinkedCode->regexp(index); } - EvalCodeCache& evalCodeCache() { createRareDataIfNecessary(); return m_rareData->m_evalCodeCache; } + unsigned numberOfConstantBuffers() const + { + if (!m_rareData) + return 0; + return m_rareData->m_constantBuffers.size(); + } + unsigned addConstantBuffer(const Vector& buffer) + { + createRareDataIfNecessary(); + unsigned size = m_rareData->m_constantBuffers.size(); + m_rareData->m_constantBuffers.append(buffer); + return size; + } - void shrinkToFit(); + Vector& constantBufferAsVector(unsigned index) + { + ASSERT(m_rareData); + return m_rareData->m_constantBuffers[index]; + } + JSValue* constantBuffer(unsigned index) + { + return constantBufferAsVector(index).data(); + } - // FIXME: Make these remaining members private. + JSGlobalObject* globalObject() { return m_globalObject.get(); } + + JSGlobalObject* globalObjectFor(CodeOrigin); - int m_numCalleeRegisters; - int m_numVars; - int m_numParameters; + // Jump Tables - private: -#if !defined(NDEBUG) || ENABLE(OPCODE_SAMPLING) - void dump(ExecState*, const Vector::const_iterator& begin, Vector::const_iterator&) const; + size_t numberOfImmediateSwitchJumpTables() const { return m_rareData ? m_rareData->m_immediateSwitchJumpTables.size() : 0; } + SimpleJumpTable& addImmediateSwitchJumpTable() { createRareDataIfNecessary(); m_rareData->m_immediateSwitchJumpTables.append(SimpleJumpTable()); return m_rareData->m_immediateSwitchJumpTables.last(); } + SimpleJumpTable& immediateSwitchJumpTable(int tableIndex) { RELEASE_ASSERT(m_rareData); return m_rareData->m_immediateSwitchJumpTables[tableIndex]; } - CString registerName(ExecState*, int r) const; - void printUnaryOp(ExecState*, int location, Vector::const_iterator&, const char* op) const; - void printBinaryOp(ExecState*, int location, Vector::const_iterator&, const char* op) const; - void printConditionalJump(ExecState*, const Vector::const_iterator&, Vector::const_iterator&, int location, const char* op) const; - void printGetByIdOp(ExecState*, int location, Vector::const_iterator&, const char* op) const; - void printPutByIdOp(ExecState*, int location, Vector::const_iterator&, const char* op) const; -#endif + size_t numberOfCharacterSwitchJumpTables() const { return m_rareData ? m_rareData->m_characterSwitchJumpTables.size() : 0; } + SimpleJumpTable& addCharacterSwitchJumpTable() { createRareDataIfNecessary(); m_rareData->m_characterSwitchJumpTables.append(SimpleJumpTable()); return m_rareData->m_characterSwitchJumpTables.last(); } + SimpleJumpTable& characterSwitchJumpTable(int tableIndex) { RELEASE_ASSERT(m_rareData); return m_rareData->m_characterSwitchJumpTables[tableIndex]; } - void reparseForExceptionInfoIfNecessary(CallFrame*); + size_t numberOfStringSwitchJumpTables() const { return m_rareData ? m_rareData->m_stringSwitchJumpTables.size() : 0; } + StringJumpTable& addStringSwitchJumpTable() { createRareDataIfNecessary(); m_rareData->m_stringSwitchJumpTables.append(StringJumpTable()); return m_rareData->m_stringSwitchJumpTables.last(); } + StringJumpTable& stringSwitchJumpTable(int tableIndex) { RELEASE_ASSERT(m_rareData); return m_rareData->m_stringSwitchJumpTables[tableIndex]; } - void createRareDataIfNecessary() - { - if (!m_rareData) - m_rareData.set(new RareData); - } - ScriptExecutable* m_ownerExecutable; - JSGlobalData* m_globalData; + SharedSymbolTable* symbolTable() const { return m_unlinkedCode->symbolTable(); } - Vector m_instructions; -#ifndef NDEBUG - unsigned m_instructionCount; -#endif + EvalCodeCache& evalCodeCache() { createRareDataIfNecessary(); return m_rareData->m_evalCodeCache; } - int m_thisRegister; + enum ShrinkMode { + // Shrink prior to generating machine code that may point directly into vectors. + EarlyShrink, + + // Shrink after generating machine code, and after possibly creating new vectors + // and appending to others. At this time it is not safe to shrink certain vectors + // because we would have generated machine code that references them directly. + LateShrink + }; + void shrinkToFit(ShrinkMode); + + void copyPostParseDataFrom(CodeBlock* alternative); + void copyPostParseDataFromAlternative(); + + // Functions for controlling when JITting kicks in, in a mixed mode + // execution world. + + bool checkIfJITThresholdReached() + { + return m_llintExecuteCounter.checkIfThresholdCrossedAndSet(this); + } + + void dontJITAnytimeSoon() + { + m_llintExecuteCounter.deferIndefinitely(); + } + + void jitAfterWarmUp() + { + m_llintExecuteCounter.setNewThreshold(Options::thresholdForJITAfterWarmUp(), this); + } + + void jitSoon() + { + m_llintExecuteCounter.setNewThreshold(Options::thresholdForJITSoon(), this); + } + + const ExecutionCounter& llintExecuteCounter() const + { + return m_llintExecuteCounter; + } + + // Functions for controlling when tiered compilation kicks in. This + // controls both when the optimizing compiler is invoked and when OSR + // entry happens. Two triggers exist: the loop trigger and the return + // trigger. In either case, when an addition to m_jitExecuteCounter + // causes it to become non-negative, the optimizing compiler is + // invoked. This includes a fast check to see if this CodeBlock has + // already been optimized (i.e. replacement() returns a CodeBlock + // that was optimized with a higher tier JIT than this one). In the + // case of the loop trigger, if the optimized compilation succeeds + // (or has already succeeded in the past) then OSR is attempted to + // redirect program flow into the optimized code. + + // These functions are called from within the optimization triggers, + // and are used as a single point at which we define the heuristics + // for how much warm-up is mandated before the next optimization + // trigger files. All CodeBlocks start out with optimizeAfterWarmUp(), + // as this is called from the CodeBlock constructor. + + // When we observe a lot of speculation failures, we trigger a + // reoptimization. But each time, we increase the optimization trigger + // to avoid thrashing. + unsigned reoptimizationRetryCounter() const; + void countReoptimization(); - bool m_needsFullScopeChain; - bool m_usesEval; - bool m_usesArguments; - bool m_isNumericCompareFunction; + int32_t codeTypeThresholdMultiplier() const; + + int32_t counterValueForOptimizeAfterWarmUp(); + int32_t counterValueForOptimizeAfterLongWarmUp(); + int32_t counterValueForOptimizeSoon(); + + int32_t* addressOfJITExecuteCounter() + { + return &m_jitExecuteCounter.m_counter; + } + + static ptrdiff_t offsetOfJITExecuteCounter() { return OBJECT_OFFSETOF(CodeBlock, m_jitExecuteCounter) + OBJECT_OFFSETOF(ExecutionCounter, m_counter); } + static ptrdiff_t offsetOfJITExecutionActiveThreshold() { return OBJECT_OFFSETOF(CodeBlock, m_jitExecuteCounter) + OBJECT_OFFSETOF(ExecutionCounter, m_activeThreshold); } + static ptrdiff_t offsetOfJITExecutionTotalCount() { return OBJECT_OFFSETOF(CodeBlock, m_jitExecuteCounter) + OBJECT_OFFSETOF(ExecutionCounter, m_totalCount); } - CodeType m_codeType; + const ExecutionCounter& jitExecuteCounter() const { return m_jitExecuteCounter; } + + unsigned optimizationDelayCounter() const { return m_optimizationDelayCounter; } + + // Check if the optimization threshold has been reached, and if not, + // adjust the heuristics accordingly. Returns true if the threshold has + // been reached. + bool checkIfOptimizationThresholdReached(); + + // Call this to force the next optimization trigger to fire. This is + // rarely wise, since optimization triggers are typically more + // expensive than executing baseline code. + void optimizeNextInvocation(); + + // Call this to prevent optimization from happening again. Note that + // optimization will still happen after roughly 2^29 invocations, + // so this is really meant to delay that as much as possible. This + // is called if optimization failed, and we expect it to fail in + // the future as well. + void dontOptimizeAnytimeSoon(); + + // Call this to reinitialize the counter to its starting state, + // forcing a warm-up to happen before the next optimization trigger + // fires. This is called in the CodeBlock constructor. It also + // makes sense to call this if an OSR exit occurred. Note that + // OSR exit code is code generated, so the value of the execute + // counter that this corresponds to is also available directly. + void optimizeAfterWarmUp(); + + // Call this to force an optimization trigger to fire only after + // a lot of warm-up. + void optimizeAfterLongWarmUp(); + + // Call this to cause an optimization trigger to fire soon, but + // not necessarily the next one. This makes sense if optimization + // succeeds. Successfuly optimization means that all calls are + // relinked to the optimized code, so this only affects call + // frames that are still executing this CodeBlock. The value here + // is tuned to strike a balance between the cost of OSR entry + // (which is too high to warrant making every loop back edge to + // trigger OSR immediately) and the cost of executing baseline + // code (which is high enough that we don't necessarily want to + // have a full warm-up). The intuition for calling this instead of + // optimizeNextInvocation() is for the case of recursive functions + // with loops. Consider that there may be N call frames of some + // recursive function, for a reasonably large value of N. The top + // one triggers optimization, and then returns, and then all of + // the others return. We don't want optimization to be triggered on + // each return, as that would be superfluous. It only makes sense + // to trigger optimization if one of those functions becomes hot + // in the baseline code. + void optimizeSoon(); + + uint32_t osrExitCounter() const { return m_osrExitCounter; } + + void countOSRExit() { m_osrExitCounter++; } + + uint32_t* addressOfOSRExitCounter() { return &m_osrExitCounter; } + + static ptrdiff_t offsetOfOSRExitCounter() { return OBJECT_OFFSETOF(CodeBlock, m_osrExitCounter); } - RefPtr m_source; - unsigned m_sourceOffset; +#if ENABLE(JIT) + uint32_t adjustedExitCountThreshold(uint32_t desiredThreshold); + uint32_t exitCountThresholdForReoptimization(); + uint32_t exitCountThresholdForReoptimizationFromLoop(); + bool shouldReoptimizeNow(); + bool shouldReoptimizeFromLoopNow(); +#endif -#if ENABLE(INTERPRETER) - Vector m_propertyAccessInstructions; - Vector m_globalResolveInstructions; +#if ENABLE(VALUE_PROFILER) + bool shouldOptimizeNow(); + void updateAllValueProfilePredictions(OperationInProgress = NoOperation); + void updateAllArrayPredictions(OperationInProgress = NoOperation); + void updateAllPredictions(OperationInProgress = NoOperation); +#else + bool shouldOptimizeNow() { return false; } + void updateAllValueProfilePredictions(OperationInProgress = NoOperation) { } + void updateAllArrayPredictions(OperationInProgress = NoOperation) { } + void updateAllPredictions(OperationInProgress = NoOperation) { } #endif + #if ENABLE(JIT) - Vector m_structureStubInfos; - Vector m_globalResolveInfos; - Vector m_callLinkInfos; - Vector m_methodCallLinkInfos; - Vector m_linkedCallerList; + void reoptimize(); #endif - Vector m_jumpTargets; +#if ENABLE(VERBOSE_VALUE_PROFILE) + void dumpValueProfiles(); +#endif + + // FIXME: Make these remaining members private. - // Constant Pool - Vector m_identifiers; - Vector m_constantRegisters; - Vector > m_functionDecls; - Vector > m_functionExprs; + int m_numCalleeRegisters; + int m_numVars; + bool m_isConstructor; - SymbolTable* m_symbolTable; +protected: +#if ENABLE(JIT) + virtual bool jitCompileImpl(ExecState*) = 0; + virtual void jettisonImpl() = 0; +#endif + virtual void visitWeakReferences(SlotVisitor&); + virtual void finalizeUnconditionally(); - OwnPtr m_exceptionInfo; +#if ENABLE(DFG_JIT) + void tallyFrequentExitSites(); +#else + void tallyFrequentExitSites() { } +#endif - struct RareData : FastAllocBase { - Vector m_exceptionHandlers; +private: + friend class DFGCodeBlocks; + + double optimizationThresholdScalingFactor(); - // Rare Constants - Vector > m_regexps; +#if ENABLE(JIT) + ClosureCallStubRoutine* findClosureCallForReturnPC(ReturnAddressPtr); +#endif + +#if ENABLE(VALUE_PROFILER) + void updateAllPredictionsAndCountLiveness(OperationInProgress, unsigned& numberOfLiveNonArgumentValueProfiles, unsigned& numberOfSamplesInProfiles); +#endif - // Jump Tables - Vector m_immediateSwitchJumpTables; - Vector m_characterSwitchJumpTables; - Vector m_stringSwitchJumpTables; + void setIdentifiers(const Vector& identifiers) + { + RELEASE_ASSERT(m_identifiers.isEmpty()); + m_identifiers.appendVector(identifiers); + } - EvalCodeCache m_evalCodeCache; + void setConstantRegisters(const Vector >& constants) + { + size_t count = constants.size(); + m_constantRegisters.resize(count); + for (size_t i = 0; i < count; i++) + m_constantRegisters[i].set(*m_vm, ownerExecutable(), constants[i].get()); + } -#if ENABLE(JIT) - Vector m_functionRegisterInfos; + void dumpBytecode(PrintStream&, ExecState*, const Instruction* begin, const Instruction*&); + + CString registerName(ExecState*, int r) const; + void printUnaryOp(PrintStream&, ExecState*, int location, const Instruction*&, const char* op); + void printBinaryOp(PrintStream&, ExecState*, int location, const Instruction*&, const char* op); + void printConditionalJump(PrintStream&, ExecState*, const Instruction*, const Instruction*&, int location, const char* op); + void printGetByIdOp(PrintStream&, ExecState*, int location, const Instruction*&); + void printGetByIdCacheStatus(PrintStream&, ExecState*, int location); + enum CacheDumpMode { DumpCaches, DontDumpCaches }; + void printCallOp(PrintStream&, ExecState*, int location, const Instruction*&, const char* op, CacheDumpMode); + void printPutByIdOp(PrintStream&, ExecState*, int location, const Instruction*&, const char* op); + void beginDumpProfiling(PrintStream&, bool& hasPrintedProfiling); + void dumpValueProfiling(PrintStream&, const Instruction*&, bool& hasPrintedProfiling); + void dumpArrayProfiling(PrintStream&, const Instruction*&, bool& hasPrintedProfiling); +#if ENABLE(VALUE_PROFILER) + void dumpRareCaseProfile(PrintStream&, const char* name, RareCaseProfile*, bool& hasPrintedProfiling); #endif - }; - OwnPtr m_rareData; - }; - // Program code is not marked by any function, so we make the global object - // responsible for marking it. + void visitStructures(SlotVisitor&, Instruction* vPC); + +#if ENABLE(DFG_JIT) + bool shouldImmediatelyAssumeLivenessDuringScan() + { + // Null m_dfgData means that this is a baseline JIT CodeBlock. Baseline JIT + // CodeBlocks don't need to be jettisoned when their weak references go + // stale. So if a basline JIT CodeBlock gets scanned, we can assume that + // this means that it's live. + if (!m_dfgData) + return true; + + // For simplicity, we don't attempt to jettison code blocks during GC if + // they are executing. Instead we strongly mark their weak references to + // allow them to continue to execute soundly. + if (m_dfgData->mayBeExecuting) + return true; + + if (Options::forceDFGCodeBlockLiveness()) + return true; - class GlobalCodeBlock : public CodeBlock { - public: - GlobalCodeBlock(ScriptExecutable* ownerExecutable, CodeType codeType, PassRefPtr sourceProvider, unsigned sourceOffset, JSGlobalObject* globalObject) - : CodeBlock(ownerExecutable, codeType, sourceProvider, sourceOffset, &m_unsharedSymbolTable) - , m_globalObject(globalObject) - { - m_globalObject->codeBlocks().add(this); - } + return false; + } +#else + bool shouldImmediatelyAssumeLivenessDuringScan() { return true; } +#endif + + void performTracingFixpointIteration(SlotVisitor&); + + void stronglyVisitStrongReferences(SlotVisitor&); + void stronglyVisitWeakReferences(SlotVisitor&); - ~GlobalCodeBlock() + void createRareDataIfNecessary() + { + if (!m_rareData) + m_rareData = adoptPtr(new RareData); + } + +#if ENABLE(JIT) + void resetStubInternal(RepatchBuffer&, StructureStubInfo&); + void resetStubDuringGCInternal(RepatchBuffer&, StructureStubInfo&); +#endif + WriteBarrier m_unlinkedCode; + int m_numParameters; + WriteBarrier m_ownerExecutable; + VM* m_vm; + + RefCountedArray m_instructions; + int m_thisRegister; + int m_argumentsRegister; + int m_activationRegister; + + bool m_isStrictMode; + bool m_needsActivation; + + RefPtr m_source; + unsigned m_sourceOffset; + unsigned m_firstLineColumnOffset; + unsigned m_codeType; + +#if ENABLE(LLINT) + SegmentedVector m_llintCallLinkInfos; + SentinelLinkedList > m_incomingLLIntCalls; +#endif +#if ENABLE(JIT) + Vector m_structureStubInfos; + Vector m_byValInfos; + Vector m_callLinkInfos; + JITCode m_jitCode; + MacroAssemblerCodePtr m_jitCodeWithArityCheck; + SentinelLinkedList > m_incomingCalls; +#endif +#if ENABLE(DFG_JIT) || ENABLE(LLINT) + OwnPtr m_jitCodeMap; +#endif +#if ENABLE(DFG_JIT) + struct WeakReferenceTransition { + WeakReferenceTransition() { } + + WeakReferenceTransition(VM& vm, JSCell* owner, JSCell* codeOrigin, JSCell* from, JSCell* to) + : m_from(vm, owner, from) + , m_to(vm, owner, to) { - if (m_globalObject) - m_globalObject->codeBlocks().remove(this); + if (!!codeOrigin) + m_codeOrigin.set(vm, owner, codeOrigin); } - void clearGlobalObject() { m_globalObject = 0; } - - private: - JSGlobalObject* m_globalObject; // For program and eval nodes, the global object that marks the constant pool. - SymbolTable m_unsharedSymbolTable; + WriteBarrier m_codeOrigin; + WriteBarrier m_from; + WriteBarrier m_to; }; - - class ProgramCodeBlock : public GlobalCodeBlock { - public: - ProgramCodeBlock(ProgramExecutable* ownerExecutable, CodeType codeType, JSGlobalObject* globalObject, PassRefPtr sourceProvider) - : GlobalCodeBlock(ownerExecutable, codeType, sourceProvider, 0, globalObject) + + struct DFGData { + DFGData() + : mayBeExecuting(false) + , isJettisoned(false) { } + + Vector osrEntry; + SegmentedVector osrExit; + Vector speculationRecovery; + SegmentedVector watchpoints; + Vector transitions; + Vector > weakReferences; + DFG::VariableEventStream variableEventStream; + DFG::MinifiedGraph minifiedDFG; + RefPtr compilation; + bool mayBeExecuting; + bool isJettisoned; + bool livenessHasBeenProved; // Initialized and used on every GC. + bool allTransitionsHaveBeenMarked; // Initialized and used on every GC. + unsigned visitAggregateHasBeenCalled; // Unsigned to make it work seamlessly with the broadest set of CAS implementations. }; + + OwnPtr m_dfgData; + + // This is relevant to non-DFG code blocks that serve as the profiled code block + // for DFG code blocks. + DFG::ExitProfile m_exitProfile; + CompressedLazyOperandValueProfileHolder m_lazyOperandValueProfiles; +#endif +#if ENABLE(VALUE_PROFILER) + Vector m_argumentValueProfiles; + SegmentedVector m_valueProfiles; + SegmentedVector m_rareCaseProfiles; + SegmentedVector m_specialFastCaseProfiles; + SegmentedVector m_arrayAllocationProfiles; + ArrayProfileVector m_arrayProfiles; +#endif + SegmentedVector m_objectAllocationProfiles; + + // Constant Pool + Vector m_identifiers; + COMPILE_ASSERT(sizeof(Register) == sizeof(WriteBarrier), Register_must_be_same_size_as_WriteBarrier_Unknown); + // TODO: This could just be a pointer to m_unlinkedCodeBlock's data, but the DFG mutates + // it, so we're stuck with it for now. + Vector > m_constantRegisters; + Vector > m_functionDecls; + Vector > m_functionExprs; + + OwnPtr m_alternative; + + ExecutionCounter m_llintExecuteCounter; + + ExecutionCounter m_jitExecuteCounter; + int32_t m_totalJITExecutions; + uint32_t m_osrExitCounter; + uint16_t m_optimizationDelayCounter; + uint16_t m_reoptimizationRetryCounter; + + Vector m_resolveOperations; + Vector m_putToBaseOperations; - class EvalCodeBlock : public GlobalCodeBlock { + struct RareData { + WTF_MAKE_FAST_ALLOCATED; public: - EvalCodeBlock(EvalExecutable* ownerExecutable, JSGlobalObject* globalObject, PassRefPtr sourceProvider, int baseScopeDepth) - : GlobalCodeBlock(ownerExecutable, EvalCode, sourceProvider, 0, globalObject) - , m_baseScopeDepth(baseScopeDepth) - { - } + Vector m_exceptionHandlers; - int baseScopeDepth() const { return m_baseScopeDepth; } + // Buffers used for large array literals + Vector > m_constantBuffers; + + // Jump Tables + Vector m_immediateSwitchJumpTables; + Vector m_characterSwitchJumpTables; + Vector m_stringSwitchJumpTables; - const Identifier& variable(unsigned index) { return m_variables[index]; } - unsigned numVariables() { return m_variables.size(); } - void adoptVariables(Vector& variables) - { - ASSERT(m_variables.isEmpty()); - m_variables.swap(variables); - } + EvalCodeCache m_evalCodeCache; - private: - int m_baseScopeDepth; - Vector m_variables; +#if ENABLE(JIT) + Vector m_callReturnIndexVector; +#endif +#if ENABLE(DFG_JIT) + SegmentedVector m_inlineCallFrames; + Vector m_codeOrigins; +#endif }; +#if COMPILER(MSVC) + friend void WTF::deleteOwnedPtr(RareData*); +#endif + OwnPtr m_rareData; +#if ENABLE(JIT) + DFG::CapabilityLevel m_canCompileWithDFGState; +#endif +}; - class FunctionCodeBlock : public CodeBlock { - public: - // Rather than using the usual RefCounted::create idiom for SharedSymbolTable we just use new - // as we need to initialise the CodeBlock before we could initialise any RefPtr to hold the shared - // symbol table, so we just pass as a raw pointer with a ref count of 1. We then manually deref - // in the destructor. - FunctionCodeBlock(FunctionExecutable* ownerExecutable, CodeType codeType, PassRefPtr sourceProvider, unsigned sourceOffset) - : CodeBlock(ownerExecutable, codeType, sourceProvider, sourceOffset, new SharedSymbolTable) - { - } - ~FunctionCodeBlock() - { - sharedSymbolTable()->deref(); - } - }; +// Program code is not marked by any function, so we make the global object +// responsible for marking it. + +class GlobalCodeBlock : public CodeBlock { +protected: + GlobalCodeBlock(CopyParsedBlockTag, GlobalCodeBlock& other) + : CodeBlock(CopyParsedBlock, other) + { + } + + GlobalCodeBlock(ScriptExecutable* ownerExecutable, UnlinkedCodeBlock* unlinkedCodeBlock, JSGlobalObject* globalObject, unsigned baseScopeDepth, PassRefPtr sourceProvider, unsigned sourceOffset, unsigned firstLineColumnOffset, PassOwnPtr alternative) + : CodeBlock(ownerExecutable, unlinkedCodeBlock, globalObject, baseScopeDepth, sourceProvider, sourceOffset, firstLineColumnOffset, alternative) + { + } +}; + +class ProgramCodeBlock : public GlobalCodeBlock { +public: + ProgramCodeBlock(CopyParsedBlockTag, ProgramCodeBlock& other) + : GlobalCodeBlock(CopyParsedBlock, other) + { + } - inline Register& ExecState::r(int index) + ProgramCodeBlock(ProgramExecutable* ownerExecutable, UnlinkedProgramCodeBlock* unlinkedCodeBlock, JSGlobalObject* globalObject, PassRefPtr sourceProvider, unsigned firstLineColumnOffset, PassOwnPtr alternative) + : GlobalCodeBlock(ownerExecutable, unlinkedCodeBlock, globalObject, 0, sourceProvider, 0, firstLineColumnOffset, alternative) { - CodeBlock* codeBlock = this->codeBlock(); - if (codeBlock->isConstantRegisterIndex(index)) - return codeBlock->constantRegister(index); - return this[index]; } +#if ENABLE(JIT) +protected: + virtual JSObject* compileOptimized(ExecState*, JSScope*, unsigned bytecodeIndex); + virtual void jettisonImpl(); + virtual bool jitCompileImpl(ExecState*); + virtual CodeBlock* replacement(); + virtual DFG::CapabilityLevel canCompileWithDFGInternal(); +#endif +}; + +class EvalCodeBlock : public GlobalCodeBlock { +public: + EvalCodeBlock(CopyParsedBlockTag, EvalCodeBlock& other) + : GlobalCodeBlock(CopyParsedBlock, other) + { + } + + EvalCodeBlock(EvalExecutable* ownerExecutable, UnlinkedEvalCodeBlock* unlinkedCodeBlock, JSGlobalObject* globalObject, PassRefPtr sourceProvider, int baseScopeDepth, PassOwnPtr alternative) + : GlobalCodeBlock(ownerExecutable, unlinkedCodeBlock, globalObject, baseScopeDepth, sourceProvider, 0, 1, alternative) + { + } + + const Identifier& variable(unsigned index) { return unlinkedEvalCodeBlock()->variable(index); } + unsigned numVariables() { return unlinkedEvalCodeBlock()->numVariables(); } + +#if ENABLE(JIT) +protected: + virtual JSObject* compileOptimized(ExecState*, JSScope*, unsigned bytecodeIndex); + virtual void jettisonImpl(); + virtual bool jitCompileImpl(ExecState*); + virtual CodeBlock* replacement(); + virtual DFG::CapabilityLevel canCompileWithDFGInternal(); +#endif + +private: + UnlinkedEvalCodeBlock* unlinkedEvalCodeBlock() const { return jsCast(unlinkedCodeBlock()); } +}; + +class FunctionCodeBlock : public CodeBlock { +public: + FunctionCodeBlock(CopyParsedBlockTag, FunctionCodeBlock& other) + : CodeBlock(CopyParsedBlock, other) + { + } + + FunctionCodeBlock(FunctionExecutable* ownerExecutable, UnlinkedFunctionCodeBlock* unlinkedCodeBlock, JSGlobalObject* globalObject, PassRefPtr sourceProvider, unsigned sourceOffset, unsigned firstLineColumnOffset, PassOwnPtr alternative = nullptr) + : CodeBlock(ownerExecutable, unlinkedCodeBlock, globalObject, 0, sourceProvider, sourceOffset, firstLineColumnOffset, alternative) + { + } + +#if ENABLE(JIT) +protected: + virtual JSObject* compileOptimized(ExecState*, JSScope*, unsigned bytecodeIndex); + virtual void jettisonImpl(); + virtual bool jitCompileImpl(ExecState*); + virtual CodeBlock* replacement(); + virtual DFG::CapabilityLevel canCompileWithDFGInternal(); +#endif +}; + +inline CodeBlock* baselineCodeBlockForInlineCallFrame(InlineCallFrame* inlineCallFrame) +{ + RELEASE_ASSERT(inlineCallFrame); + ExecutableBase* executable = inlineCallFrame->executable.get(); + RELEASE_ASSERT(executable->structure()->classInfo() == &FunctionExecutable::s_info); + return static_cast(executable)->baselineCodeBlockFor(inlineCallFrame->isCall ? CodeForCall : CodeForConstruct); +} + +inline CodeBlock* baselineCodeBlockForOriginAndBaselineCodeBlock(const CodeOrigin& codeOrigin, CodeBlock* baselineCodeBlock) +{ + if (codeOrigin.inlineCallFrame) + return baselineCodeBlockForInlineCallFrame(codeOrigin.inlineCallFrame); + return baselineCodeBlock; +} + +inline int CodeBlock::argumentIndexAfterCapture(size_t argument) +{ + if (argument >= static_cast(symbolTable()->parameterCount())) + return CallFrame::argumentOffset(argument); + + const SlowArgument* slowArguments = symbolTable()->slowArguments(); + if (!slowArguments || slowArguments[argument].status == SlowArgument::Normal) + return CallFrame::argumentOffset(argument); + + ASSERT(slowArguments[argument].status == SlowArgument::Captured); + return slowArguments[argument].index; +} + +inline Register& ExecState::r(int index) +{ + CodeBlock* codeBlock = this->codeBlock(); + if (codeBlock->isConstantRegisterIndex(index)) + return *reinterpret_cast(&codeBlock->constantRegister(index)); + return this[index]; +} + +inline Register& ExecState::uncheckedR(int index) +{ + RELEASE_ASSERT(index < FirstConstantRegisterIndex); + return this[index]; +} + +#if ENABLE(DFG_JIT) +inline bool ExecState::isInlineCallFrame() +{ + if (LIKELY(!codeBlock() || codeBlock()->getJITType() != JITCode::DFGJIT)) + return false; + return isInlineCallFrameSlow(); +} +#endif + +inline JSValue ExecState::argumentAfterCapture(size_t argument) +{ + if (argument >= argumentCount()) + return jsUndefined(); + + if (!codeBlock()) + return this[argumentOffset(argument)].jsValue(); + + return this[codeBlock()->argumentIndexAfterCapture(argument)].jsValue(); +} + +#if ENABLE(DFG_JIT) +inline void DFGCodeBlocks::mark(void* candidateCodeBlock) +{ + // We have to check for 0 and -1 because those are used by the HashMap as markers. + uintptr_t value = reinterpret_cast(candidateCodeBlock); + + // This checks for both of those nasty cases in one go. + // 0 + 1 = 1 + // -1 + 1 = 0 + if (value + 1 <= 1) + return; + + HashSet::iterator iter = m_set.find(static_cast(candidateCodeBlock)); + if (iter == m_set.end()) + return; + + (*iter)->m_dfgData->mayBeExecuting = true; +} +#endif + } // namespace JSC #endif // CodeBlock_h