X-Git-Url: https://git.saurik.com/apple/javascriptcore.git/blobdiff_plain/ba379fdc102753d6be2c4d937058fe40257329fe..ef99ff287df9046eb88937225e0554eabb00e33c:/bytecode/CodeBlock.h diff --git a/bytecode/CodeBlock.h b/bytecode/CodeBlock.h index eaf5d1d..18ef0e3 100644 --- a/bytecode/CodeBlock.h +++ b/bytecode/CodeBlock.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2008, 2009 Apple Inc. All rights reserved. + * Copyright (C) 2008, 2009, 2010, 2011, 2012, 2013, 2014 Apple Inc. All rights reserved. * Copyright (C) 2008 Cameron Zwarich * * Redistribution and use in source and binary forms, with or without @@ -11,7 +11,7 @@ * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. - * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of + * 3. Neither the name of Apple Inc. ("Apple") nor the names of * its contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * @@ -30,550 +30,1282 @@ #ifndef CodeBlock_h #define CodeBlock_h +#include "ArrayProfile.h" +#include "ByValInfo.h" +#include "BytecodeConventions.h" +#include "BytecodeLivenessAnalysis.h" +#include "CallLinkInfo.h" +#include "CallReturnOffsetToBytecodeOffset.h" +#include "CodeBlockHash.h" +#include "CodeBlockSet.h" +#include "ConcurrentJITLock.h" +#include "CodeOrigin.h" +#include "CodeType.h" +#include "CompactJITCodeMap.h" +#include "DFGCommon.h" +#include "DFGCommonData.h" +#include "DFGExitProfile.h" +#include "DeferredCompilationCallback.h" #include "EvalCodeCache.h" +#include "ExecutionCounter.h" +#include "ExpressionRangeInfo.h" +#include "HandlerInfo.h" +#include "ObjectAllocationProfile.h" +#include "Options.h" +#include "PutPropertySlot.h" #include "Instruction.h" #include "JITCode.h" +#include "JITWriteBarrier.h" #include "JSGlobalObject.h" #include "JumpTable.h" -#include "Nodes.h" -#include "RegExp.h" -#include "UString.h" -#include +#include "LLIntCallLinkInfo.h" +#include "LazyOperandValueProfile.h" +#include "ProfilerCompilation.h" +#include "ProfilerJettisonReason.h" +#include "RegExpObject.h" +#include "StructureStubInfo.h" +#include "UnconditionalFinalizer.h" +#include "ValueProfile.h" +#include "VirtualRegister.h" +#include "Watchpoint.h" +#include +#include +#include +#include #include +#include #include +#include -#if ENABLE(JIT) -#include "StructureStubInfo.h" -#endif +namespace JSC { -// Register numbers used in bytecode operations have different meaning accoring to their ranges: -// 0x80000000-0xFFFFFFFF Negative indicies from the CallFrame pointer are entries in the call frame, see RegisterFile.h. -// 0x00000000-0x3FFFFFFF Forwards indices from the CallFrame pointer are local vars and temporaries with the function's callframe. -// 0x40000000-0x7FFFFFFF Positive indices from 0x40000000 specify entries in the constant pool on the CodeBlock. -static const int FirstConstantRegisterIndex = 0x40000000; +class ExecState; +class LLIntOffsetsExtractor; +class RepatchBuffer; -namespace JSC { +inline VirtualRegister unmodifiedArgumentsRegister(VirtualRegister argumentsRegister) { return VirtualRegister(argumentsRegister.offset() + 1); } - class ExecState; +static ALWAYS_INLINE int missingThisObjectMarker() { return std::numeric_limits::max(); } - enum CodeType { GlobalCode, EvalCode, FunctionCode, NativeCode }; +enum ReoptimizationMode { DontCountReoptimization, CountReoptimization }; - static ALWAYS_INLINE int missingThisObjectMarker() { return std::numeric_limits::max(); } +class CodeBlock : public ThreadSafeRefCounted, public UnconditionalFinalizer, public WeakReferenceHarvester { + WTF_MAKE_FAST_ALLOCATED; + friend class BytecodeLivenessAnalysis; + friend class JIT; + friend class LLIntOffsetsExtractor; +public: + enum CopyParsedBlockTag { CopyParsedBlock }; +protected: + CodeBlock(CopyParsedBlockTag, CodeBlock& other); + + CodeBlock(ScriptExecutable* ownerExecutable, UnlinkedCodeBlock*, JSScope*, PassRefPtr, unsigned sourceOffset, unsigned firstLineColumnOffset); - struct HandlerInfo { - uint32_t start; - uint32_t end; - uint32_t target; - uint32_t scopeDepth; -#if ENABLE(JIT) - CodeLocationLabel nativeCode; -#endif - }; + WriteBarrier m_globalObject; + Heap* m_heap; - struct ExpressionRangeInfo { - enum { - MaxOffset = (1 << 7) - 1, - MaxDivot = (1 << 25) - 1 - }; - uint32_t instructionOffset : 25; - uint32_t divotPoint : 25; - uint32_t startOffset : 7; - uint32_t endOffset : 7; - }; +public: + JS_EXPORT_PRIVATE virtual ~CodeBlock(); - struct LineInfo { - uint32_t instructionOffset; - int32_t lineNumber; - }; + UnlinkedCodeBlock* unlinkedCodeBlock() const { return m_unlinkedCode.get(); } - // Both op_construct and op_instanceof require a use of op_get_by_id to get - // the prototype property from an object. The exception messages for exceptions - // thrown by these instances op_get_by_id need to reflect this. - struct GetByIdExceptionInfo { - unsigned bytecodeOffset : 31; - bool isOpConstruct : 1; - }; + CString inferredName() const; + CodeBlockHash hash() const; + bool hasHash() const; + bool isSafeToComputeHash() const; + CString hashAsStringIfPossible() const; + CString sourceCodeForTools() const; // Not quite the actual source we parsed; this will do things like prefix the source for a function with a reified signature. + CString sourceCodeOnOneLine() const; // As sourceCodeForTools(), but replaces all whitespace runs with a single space. + void dumpAssumingJITType(PrintStream&, JITCode::JITType) const; + void dump(PrintStream&) const; -#if ENABLE(JIT) - struct CallLinkInfo { - CallLinkInfo() - : callee(0) - { + int numParameters() const { return m_numParameters; } + void setNumParameters(int newValue); + + int* addressOfNumParameters() { return &m_numParameters; } + static ptrdiff_t offsetOfNumParameters() { return OBJECT_OFFSETOF(CodeBlock, m_numParameters); } + + CodeBlock* alternative() { return m_alternative.get(); } + PassRefPtr releaseAlternative() { return m_alternative.release(); } + void setAlternative(PassRefPtr alternative) { m_alternative = alternative; } + + template void forEachRelatedCodeBlock(Functor&& functor) + { + Functor f(std::forward(functor)); + Vector codeBlocks; + codeBlocks.append(this); + + while (!codeBlocks.isEmpty()) { + CodeBlock* currentCodeBlock = codeBlocks.takeLast(); + f(currentCodeBlock); + + if (CodeBlock* alternative = currentCodeBlock->alternative()) + codeBlocks.append(alternative); + if (CodeBlock* osrEntryBlock = currentCodeBlock->specialOSREntryBlockOrNull()) + codeBlocks.append(osrEntryBlock); } + } + + CodeSpecializationKind specializationKind() const + { + return specializationFromIsConstruct(m_isConstructor); + } + + CodeBlock* baselineAlternative(); + + // FIXME: Get rid of this. + // https://bugs.webkit.org/show_bug.cgi?id=123677 + CodeBlock* baselineVersion(); + + void visitAggregate(SlotVisitor&); + + void dumpBytecode(PrintStream& = WTF::dataFile()); + void dumpBytecode( + PrintStream&, unsigned bytecodeOffset, + const StubInfoMap& = StubInfoMap(), const CallLinkInfoMap& = CallLinkInfoMap()); + void printStructures(PrintStream&, const Instruction*); + void printStructure(PrintStream&, const char* name, const Instruction*, int operand); + + bool isStrictMode() const { return m_isStrictMode; } + ECMAMode ecmaMode() const { return isStrictMode() ? StrictMode : NotStrictMode; } + + inline bool isKnownNotImmediate(int index) + { + if (index == m_thisRegister.offset() && !m_isStrictMode) + return true; + + if (isConstantRegisterIndex(index)) + return getConstant(index).isCell(); + + return false; + } + + ALWAYS_INLINE bool isTemporaryRegisterIndex(int index) + { + return index >= m_numVars; + } + + HandlerInfo* handlerForBytecodeOffset(unsigned bytecodeOffset); + unsigned lineNumberForBytecodeOffset(unsigned bytecodeOffset); + unsigned columnNumberForBytecodeOffset(unsigned bytecodeOffset); + void expressionRangeForBytecodeOffset(unsigned bytecodeOffset, int& divot, + int& startOffset, int& endOffset, unsigned& line, unsigned& column); + + void getStubInfoMap(const ConcurrentJITLocker&, StubInfoMap& result); + void getStubInfoMap(StubInfoMap& result); + + void getCallLinkInfoMap(const ConcurrentJITLocker&, CallLinkInfoMap& result); + void getCallLinkInfoMap(CallLinkInfoMap& result); + +#if ENABLE(JIT) + StructureStubInfo* addStubInfo(); + Bag::iterator stubInfoBegin() { return m_stubInfos.begin(); } + Bag::iterator stubInfoEnd() { return m_stubInfos.end(); } + + void resetStub(StructureStubInfo&); - unsigned bytecodeIndex; - CodeLocationNearCall callReturnLocation; - CodeLocationDataLabelPtr hotPathBegin; - CodeLocationNearCall hotPathOther; - CodeBlock* ownerCodeBlock; - CodeBlock* callee; - unsigned position; + ByValInfo& getByValInfo(unsigned bytecodeIndex) + { + return *(binarySearch(m_byValInfos, m_byValInfos.size(), bytecodeIndex, getByValInfoBytecodeIndex)); + } + + CallLinkInfo* addCallLinkInfo(); + Bag::iterator callLinkInfosBegin() { return m_callLinkInfos.begin(); } + Bag::iterator callLinkInfosEnd() { return m_callLinkInfos.end(); } + + // This is a slow function call used primarily for compiling OSR exits in the case + // that there had been inlining. Chances are if you want to use this, you're really + // looking for a CallLinkInfoMap to amortize the cost of calling this. + CallLinkInfo* getCallLinkInfoForBytecodeIndex(unsigned bytecodeIndex); +#endif // ENABLE(JIT) + + void unlinkIncomingCalls(); + +#if ENABLE(JIT) + void unlinkCalls(); - void setUnlinked() { callee = 0; } - bool isLinked() { return callee; } - }; + void linkIncomingCall(ExecState* callerFrame, CallLinkInfo*); + + bool isIncomingCallAlreadyLinked(CallLinkInfo* incoming) + { + return m_incomingCalls.isOnList(incoming); + } +#endif // ENABLE(JIT) - struct MethodCallLinkInfo { - MethodCallLinkInfo() - : cachedStructure(0) - , cachedPrototypeStructure(0) - { - } + void linkIncomingCall(ExecState* callerFrame, LLIntCallLinkInfo*); - CodeLocationCall callReturnLocation; - CodeLocationDataLabelPtr structureLabel; - Structure* cachedStructure; - Structure* cachedPrototypeStructure; - }; + void setJITCodeMap(PassOwnPtr jitCodeMap) + { + m_jitCodeMap = jitCodeMap; + } + CompactJITCodeMap* jitCodeMap() + { + return m_jitCodeMap.get(); + } + + unsigned bytecodeOffset(Instruction* returnAddress) + { + RELEASE_ASSERT(returnAddress >= instructions().begin() && returnAddress < instructions().end()); + return static_cast(returnAddress) - instructions().begin(); + } - struct FunctionRegisterInfo { - FunctionRegisterInfo(unsigned bytecodeOffset, int functionRegisterIndex) - : bytecodeOffset(bytecodeOffset) - , functionRegisterIndex(functionRegisterIndex) - { - } + bool isNumericCompareFunction() { return m_unlinkedCode->isNumericCompareFunction(); } - unsigned bytecodeOffset; - int functionRegisterIndex; - }; + unsigned numberOfInstructions() const { return m_instructions.size(); } + RefCountedArray& instructions() { return m_instructions; } + const RefCountedArray& instructions() const { return m_instructions; } - struct GlobalResolveInfo { - GlobalResolveInfo(unsigned bytecodeOffset) - : structure(0) - , offset(0) - , bytecodeOffset(bytecodeOffset) - { - } + size_t predictedMachineCodeSize(); - Structure* structure; - unsigned offset; - unsigned bytecodeOffset; - }; + bool usesOpcode(OpcodeID); - // This structure is used to map from a call return location - // (given as an offset in bytes into the JIT code) back to - // the bytecode index of the corresponding bytecode operation. - // This is then used to look up the corresponding handler. - struct CallReturnOffsetToBytecodeIndex { - CallReturnOffsetToBytecodeIndex(unsigned callReturnOffset, unsigned bytecodeIndex) - : callReturnOffset(callReturnOffset) - , bytecodeIndex(bytecodeIndex) - { - } + unsigned instructionCount() const { return m_instructions.size(); } - unsigned callReturnOffset; - unsigned bytecodeIndex; - }; + int argumentIndexAfterCapture(size_t argument); + + bool hasSlowArguments(); + const SlowArgument* machineSlowArguments(); - // valueAtPosition helpers for the binaryChop algorithm below. + // Exactly equivalent to codeBlock->ownerExecutable()->installCode(codeBlock); + void install(); + + // Exactly equivalent to codeBlock->ownerExecutable()->newReplacementCodeBlockFor(codeBlock->specializationKind()) + PassRefPtr newReplacement(); + + void setJITCode(PassRefPtr code) + { + ASSERT(m_heap->isDeferred()); + m_heap->reportExtraMemoryCost(code->size()); + ConcurrentJITLocker locker(m_lock); + WTF::storeStoreFence(); // This is probably not needed because the lock will also do something similar, but it's good to be paranoid. + m_jitCode = code; + } + PassRefPtr jitCode() { return m_jitCode; } + JITCode::JITType jitType() const + { + JITCode* jitCode = m_jitCode.get(); + WTF::loadLoadFence(); + JITCode::JITType result = JITCode::jitTypeFor(jitCode); + WTF::loadLoadFence(); // This probably isn't needed. Oh well, paranoia is good. + return result; + } - inline void* getStructureStubInfoReturnLocation(StructureStubInfo* structureStubInfo) + bool hasBaselineJITProfiling() const { - return structureStubInfo->callReturnLocation.executableAddress(); + return jitType() == JITCode::BaselineJIT; } + +#if ENABLE(JIT) + virtual CodeBlock* replacement() = 0; + + virtual DFG::CapabilityLevel capabilityLevelInternal() = 0; + DFG::CapabilityLevel capabilityLevel(); + DFG::CapabilityLevel capabilityLevelState() { return m_capabilityLevelState; } + + bool hasOptimizedReplacement(JITCode::JITType typeToReplace); + bool hasOptimizedReplacement(); // the typeToReplace is my JITType +#endif - inline void* getCallLinkInfoReturnLocation(CallLinkInfo* callLinkInfo) + void jettison(Profiler::JettisonReason, ReoptimizationMode = DontCountReoptimization); + + ScriptExecutable* ownerExecutable() const { return m_ownerExecutable.get(); } + + void setVM(VM* vm) { m_vm = vm; } + VM* vm() { return m_vm; } + + void setThisRegister(VirtualRegister thisRegister) { m_thisRegister = thisRegister; } + VirtualRegister thisRegister() const { return m_thisRegister; } + + bool usesEval() const { return m_unlinkedCode->usesEval(); } + + void setArgumentsRegister(VirtualRegister argumentsRegister) + { + ASSERT(argumentsRegister.isValid()); + m_argumentsRegister = argumentsRegister; + ASSERT(usesArguments()); + } + VirtualRegister argumentsRegister() const + { + ASSERT(usesArguments()); + return m_argumentsRegister; + } + VirtualRegister uncheckedArgumentsRegister() + { + if (!usesArguments()) + return VirtualRegister(); + return argumentsRegister(); + } + void setActivationRegister(VirtualRegister activationRegister) { - return callLinkInfo->callReturnLocation.executableAddress(); + m_activationRegister = activationRegister; } - inline void* getMethodCallLinkInfoReturnLocation(MethodCallLinkInfo* methodCallLinkInfo) + VirtualRegister activationRegister() const { - return methodCallLinkInfo->callReturnLocation.executableAddress(); + ASSERT(m_activationRegister.isValid()); + return m_activationRegister; } - inline unsigned getCallReturnOffset(CallReturnOffsetToBytecodeIndex* pc) + VirtualRegister uncheckedActivationRegister() { - return pc->callReturnOffset; + return m_activationRegister; } - // Binary chop algorithm, calls valueAtPosition on pre-sorted elements in array, - // compares result with key (KeyTypes should be comparable with '--', '<', '>'). - // Optimized for cases where the array contains the key, checked by assertions. - template - inline ArrayType* binaryChop(ArrayType* array, size_t size, KeyType key) + bool usesArguments() const { return m_argumentsRegister.isValid(); } + + bool needsActivation() const + { + ASSERT(m_activationRegister.isValid() == m_needsActivation); + return m_needsActivation; + } + + unsigned captureCount() const + { + if (!symbolTable()) + return 0; + return symbolTable()->captureCount(); + } + + int captureStart() const + { + if (!symbolTable()) + return 0; + return symbolTable()->captureStart(); + } + + int captureEnd() const { - // The array must contain at least one element (pre-condition, array does conatin key). - // If the array only contains one element, no need to do the comparison. - while (size > 1) { - // Pick an element to check, half way through the array, and read the value. - int pos = (size - 1) >> 1; - KeyType val = valueAtPosition(&array[pos]); - - // If the key matches, success! - if (val == key) - return &array[pos]; - // The item we are looking for is smaller than the item being check; reduce the value of 'size', - // chopping off the right hand half of the array. - else if (key < val) - size = pos; - // Discard all values in the left hand half of the array, up to and including the item at pos. - else { - size -= (pos + 1); - array += (pos + 1); - } + if (!symbolTable()) + return 0; + return symbolTable()->captureEnd(); + } - // 'size' should never reach zero. - ASSERT(size); - } - - // If we reach this point we've chopped down to one element, no need to check it matches - ASSERT(size == 1); - ASSERT(key == valueAtPosition(&array[0])); - return &array[0]; + bool isCaptured(VirtualRegister operand, InlineCallFrame* = 0) const; + + int framePointerOffsetToGetActivationRegisters(int machineCaptureStart); + int framePointerOffsetToGetActivationRegisters(); + + CodeType codeType() const { return m_unlinkedCode->codeType(); } + PutPropertySlot::Context putByIdContext() const + { + if (codeType() == EvalCode) + return PutPropertySlot::PutByIdEval; + return PutPropertySlot::PutById; } -#endif - class CodeBlock : public WTF::FastAllocBase { - friend class JIT; - public: - CodeBlock(ScopeNode* ownerNode); - CodeBlock(ScopeNode* ownerNode, CodeType, PassRefPtr, unsigned sourceOffset); - ~CodeBlock(); - - void mark(); - void refStructures(Instruction* vPC) const; - void derefStructures(Instruction* vPC) const; -#if ENABLE(JIT_OPTIMIZE_CALL) - void unlinkCallers(); -#endif + SourceProvider* source() const { return m_source.get(); } + unsigned sourceOffset() const { return m_sourceOffset; } + unsigned firstLineColumnOffset() const { return m_firstLineColumnOffset; } + + size_t numberOfJumpTargets() const { return m_unlinkedCode->numberOfJumpTargets(); } + unsigned jumpTarget(int index) const { return m_unlinkedCode->jumpTarget(index); } + + void clearEvalCache(); - static void dumpStatistics(); + String nameForRegister(VirtualRegister); -#if !defined(NDEBUG) || ENABLE_OPCODE_SAMPLING - void dump(ExecState*) const; - void printStructures(const Instruction*) const; - void printStructure(const char* name, const Instruction*, int operand) const; +#if ENABLE(JIT) + void setNumberOfByValInfos(size_t size) { m_byValInfos.resizeToFit(size); } + size_t numberOfByValInfos() const { return m_byValInfos.size(); } + ByValInfo& byValInfo(size_t index) { return m_byValInfos[index]; } #endif - inline bool isKnownNotImmediate(int index) - { - if (index == m_thisRegister) - return true; + unsigned numberOfArgumentValueProfiles() + { + ASSERT(m_numParameters >= 0); + ASSERT(m_argumentValueProfiles.size() == static_cast(m_numParameters)); + return m_argumentValueProfiles.size(); + } + ValueProfile* valueProfileForArgument(unsigned argumentIndex) + { + ValueProfile* result = &m_argumentValueProfiles[argumentIndex]; + ASSERT(result->m_bytecodeOffset == -1); + return result; + } + + unsigned numberOfValueProfiles() { return m_valueProfiles.size(); } + ValueProfile* valueProfile(int index) { return &m_valueProfiles[index]; } + ValueProfile* valueProfileForBytecodeOffset(int bytecodeOffset) + { + ValueProfile* result = binarySearch( + m_valueProfiles, m_valueProfiles.size(), bytecodeOffset, + getValueProfileBytecodeOffset); + ASSERT(result->m_bytecodeOffset != -1); + ASSERT(instructions()[bytecodeOffset + opcodeLength( + m_vm->interpreter->getOpcodeID( + instructions()[bytecodeOffset].u.opcode)) - 1].u.profile == result); + return result; + } + SpeculatedType valueProfilePredictionForBytecodeOffset(const ConcurrentJITLocker& locker, int bytecodeOffset) + { + return valueProfileForBytecodeOffset(bytecodeOffset)->computeUpdatedPrediction(locker); + } - if (isConstantRegisterIndex(index)) - return getConstant(index).isCell(); + unsigned totalNumberOfValueProfiles() + { + return numberOfArgumentValueProfiles() + numberOfValueProfiles(); + } + ValueProfile* getFromAllValueProfiles(unsigned index) + { + if (index < numberOfArgumentValueProfiles()) + return valueProfileForArgument(index); + return valueProfile(index - numberOfArgumentValueProfiles()); + } + RareCaseProfile* addRareCaseProfile(int bytecodeOffset) + { + m_rareCaseProfiles.append(RareCaseProfile(bytecodeOffset)); + return &m_rareCaseProfiles.last(); + } + unsigned numberOfRareCaseProfiles() { return m_rareCaseProfiles.size(); } + RareCaseProfile* rareCaseProfile(int index) { return &m_rareCaseProfiles[index]; } + RareCaseProfile* rareCaseProfileForBytecodeOffset(int bytecodeOffset); + + bool likelyToTakeSlowCase(int bytecodeOffset) + { + if (!hasBaselineJITProfiling()) return false; - } + unsigned value = rareCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter; + return value >= Options::likelyToTakeSlowCaseMinimumCount(); + } - ALWAYS_INLINE bool isTemporaryRegisterIndex(int index) - { - return index >= m_numVars; - } + bool couldTakeSlowCase(int bytecodeOffset) + { + if (!hasBaselineJITProfiling()) + return false; + unsigned value = rareCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter; + return value >= Options::couldTakeSlowCaseMinimumCount(); + } - HandlerInfo* handlerForBytecodeOffset(unsigned bytecodeOffset); - int lineNumberForBytecodeOffset(CallFrame*, unsigned bytecodeOffset); - int expressionRangeForBytecodeOffset(CallFrame*, unsigned bytecodeOffset, int& divot, int& startOffset, int& endOffset); - bool getByIdExceptionInfoForBytecodeOffset(CallFrame*, unsigned bytecodeOffset, OpcodeID&); + RareCaseProfile* addSpecialFastCaseProfile(int bytecodeOffset) + { + m_specialFastCaseProfiles.append(RareCaseProfile(bytecodeOffset)); + return &m_specialFastCaseProfiles.last(); + } + unsigned numberOfSpecialFastCaseProfiles() { return m_specialFastCaseProfiles.size(); } + RareCaseProfile* specialFastCaseProfile(int index) { return &m_specialFastCaseProfiles[index]; } + RareCaseProfile* specialFastCaseProfileForBytecodeOffset(int bytecodeOffset) + { + return tryBinarySearch( + m_specialFastCaseProfiles, m_specialFastCaseProfiles.size(), bytecodeOffset, + getRareCaseProfileBytecodeOffset); + } -#if ENABLE(JIT) - void addCaller(CallLinkInfo* caller) - { - caller->callee = this; - caller->position = m_linkedCallerList.size(); - m_linkedCallerList.append(caller); - } + bool likelyToTakeSpecialFastCase(int bytecodeOffset) + { + if (!hasBaselineJITProfiling()) + return false; + unsigned specialFastCaseCount = specialFastCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter; + return specialFastCaseCount >= Options::likelyToTakeSlowCaseMinimumCount(); + } - void removeCaller(CallLinkInfo* caller) - { - unsigned pos = caller->position; - unsigned lastPos = m_linkedCallerList.size() - 1; - - if (pos != lastPos) { - m_linkedCallerList[pos] = m_linkedCallerList[lastPos]; - m_linkedCallerList[pos]->position = pos; - } - m_linkedCallerList.shrink(lastPos); - } + bool couldTakeSpecialFastCase(int bytecodeOffset) + { + if (!hasBaselineJITProfiling()) + return false; + unsigned specialFastCaseCount = specialFastCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter; + return specialFastCaseCount >= Options::couldTakeSlowCaseMinimumCount(); + } - StructureStubInfo& getStubInfo(ReturnAddressPtr returnAddress) - { - return *(binaryChop(m_structureStubInfos.begin(), m_structureStubInfos.size(), returnAddress.value())); - } + bool likelyToTakeDeepestSlowCase(int bytecodeOffset) + { + if (!hasBaselineJITProfiling()) + return false; + unsigned slowCaseCount = rareCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter; + unsigned specialFastCaseCount = specialFastCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter; + unsigned value = slowCaseCount - specialFastCaseCount; + return value >= Options::likelyToTakeSlowCaseMinimumCount(); + } - CallLinkInfo& getCallLinkInfo(ReturnAddressPtr returnAddress) - { - return *(binaryChop(m_callLinkInfos.begin(), m_callLinkInfos.size(), returnAddress.value())); - } + bool likelyToTakeAnySlowCase(int bytecodeOffset) + { + if (!hasBaselineJITProfiling()) + return false; + unsigned slowCaseCount = rareCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter; + unsigned specialFastCaseCount = specialFastCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter; + unsigned value = slowCaseCount + specialFastCaseCount; + return value >= Options::likelyToTakeSlowCaseMinimumCount(); + } - MethodCallLinkInfo& getMethodCallLinkInfo(ReturnAddressPtr returnAddress) - { - return *(binaryChop(m_methodCallLinkInfos.begin(), m_methodCallLinkInfos.size(), returnAddress.value())); - } + unsigned numberOfArrayProfiles() const { return m_arrayProfiles.size(); } + const ArrayProfileVector& arrayProfiles() { return m_arrayProfiles; } + ArrayProfile* addArrayProfile(unsigned bytecodeOffset) + { + m_arrayProfiles.append(ArrayProfile(bytecodeOffset)); + return &m_arrayProfiles.last(); + } + ArrayProfile* getArrayProfile(unsigned bytecodeOffset); + ArrayProfile* getOrAddArrayProfile(unsigned bytecodeOffset); - unsigned getBytecodeIndex(CallFrame* callFrame, ReturnAddressPtr returnAddress) - { - reparseForExceptionInfoIfNecessary(callFrame); - return binaryChop(m_exceptionInfo->m_callReturnIndexVector.begin(), m_exceptionInfo->m_callReturnIndexVector.size(), ownerNode()->generatedJITCode().offsetOf(returnAddress.value()))->bytecodeIndex; - } - - bool functionRegisterForBytecodeOffset(unsigned bytecodeOffset, int& functionRegisterIndex); -#endif + // Exception handling support - void setIsNumericCompareFunction(bool isNumericCompareFunction) { m_isNumericCompareFunction = isNumericCompareFunction; } - bool isNumericCompareFunction() { return m_isNumericCompareFunction; } + size_t numberOfExceptionHandlers() const { return m_rareData ? m_rareData->m_exceptionHandlers.size() : 0; } + HandlerInfo& exceptionHandler(int index) { RELEASE_ASSERT(m_rareData); return m_rareData->m_exceptionHandlers[index]; } - Vector& instructions() { return m_instructions; } -#ifndef NDEBUG - void setInstructionCount(unsigned instructionCount) { m_instructionCount = instructionCount; } -#endif + bool hasExpressionInfo() { return m_unlinkedCode->hasExpressionInfo(); } -#if ENABLE(JIT) - JITCode& getJITCode() { return ownerNode()->generatedJITCode(); } - void setJITCode(JITCode); - ExecutablePool* executablePool() { return ownerNode()->getExecutablePool(); } -#endif +#if ENABLE(DFG_JIT) + Vector& codeOrigins() + { + return m_jitCode->dfgCommon()->codeOrigins; + } + + // Having code origins implies that there has been some inlining. + bool hasCodeOrigins() + { + return JITCode::isOptimizingJIT(jitType()); + } + + bool canGetCodeOrigin(unsigned index) + { + if (!hasCodeOrigins()) + return false; + return index < codeOrigins().size(); + } - ScopeNode* ownerNode() const { return m_ownerNode; } + CodeOrigin codeOrigin(unsigned index) + { + return codeOrigins()[index]; + } - void setGlobalData(JSGlobalData* globalData) { m_globalData = globalData; } + bool addFrequentExitSite(const DFG::FrequentExitSite& site) + { + ASSERT(JITCode::isBaselineCode(jitType())); + ConcurrentJITLocker locker(m_lock); + return m_exitProfile.add(locker, site); + } - void setThisRegister(int thisRegister) { m_thisRegister = thisRegister; } - int thisRegister() const { return m_thisRegister; } + bool hasExitSite(const ConcurrentJITLocker& locker, const DFG::FrequentExitSite& site) const + { + return m_exitProfile.hasExitSite(locker, site); + } + bool hasExitSite(const DFG::FrequentExitSite& site) const + { + ConcurrentJITLocker locker(m_lock); + return hasExitSite(locker, site); + } - void setNeedsFullScopeChain(bool needsFullScopeChain) { m_needsFullScopeChain = needsFullScopeChain; } - bool needsFullScopeChain() const { return m_needsFullScopeChain; } - void setUsesEval(bool usesEval) { m_usesEval = usesEval; } - bool usesEval() const { return m_usesEval; } - void setUsesArguments(bool usesArguments) { m_usesArguments = usesArguments; } - bool usesArguments() const { return m_usesArguments; } + DFG::ExitProfile& exitProfile() { return m_exitProfile; } - CodeType codeType() const { return m_codeType; } + CompressedLazyOperandValueProfileHolder& lazyOperandValueProfiles() + { + return m_lazyOperandValueProfiles; + } +#endif // ENABLE(DFG_JIT) - SourceProvider* source() const { ASSERT(m_codeType != NativeCode); return m_source.get(); } - unsigned sourceOffset() const { ASSERT(m_codeType != NativeCode); return m_sourceOffset; } + // Constant Pool +#if ENABLE(DFG_JIT) + size_t numberOfIdentifiers() const { return m_unlinkedCode->numberOfIdentifiers() + numberOfDFGIdentifiers(); } + size_t numberOfDFGIdentifiers() const + { + if (!JITCode::isOptimizingJIT(jitType())) + return 0; - size_t numberOfJumpTargets() const { return m_jumpTargets.size(); } - void addJumpTarget(unsigned jumpTarget) { m_jumpTargets.append(jumpTarget); } - unsigned jumpTarget(int index) const { return m_jumpTargets[index]; } - unsigned lastJumpTarget() const { return m_jumpTargets.last(); } + return m_jitCode->dfgCommon()->dfgIdentifiers.size(); + } -#if !ENABLE(JIT) - void addPropertyAccessInstruction(unsigned propertyAccessInstruction) { m_propertyAccessInstructions.append(propertyAccessInstruction); } - void addGlobalResolveInstruction(unsigned globalResolveInstruction) { m_globalResolveInstructions.append(globalResolveInstruction); } - bool hasGlobalResolveInstructionAtBytecodeOffset(unsigned bytecodeOffset); + const Identifier& identifier(int index) const + { + size_t unlinkedIdentifiers = m_unlinkedCode->numberOfIdentifiers(); + if (static_cast(index) < unlinkedIdentifiers) + return m_unlinkedCode->identifier(index); + ASSERT(JITCode::isOptimizingJIT(jitType())); + return m_jitCode->dfgCommon()->dfgIdentifiers[index - unlinkedIdentifiers]; + } #else - size_t numberOfStructureStubInfos() const { return m_structureStubInfos.size(); } - void addStructureStubInfo(const StructureStubInfo& stubInfo) { m_structureStubInfos.append(stubInfo); } - StructureStubInfo& structureStubInfo(int index) { return m_structureStubInfos[index]; } + size_t numberOfIdentifiers() const { return m_unlinkedCode->numberOfIdentifiers(); } + const Identifier& identifier(int index) const { return m_unlinkedCode->identifier(index); } +#endif - void addGlobalResolveInfo(unsigned globalResolveInstruction) { m_globalResolveInfos.append(GlobalResolveInfo(globalResolveInstruction)); } - GlobalResolveInfo& globalResolveInfo(int index) { return m_globalResolveInfos[index]; } - bool hasGlobalResolveInfoAtBytecodeOffset(unsigned bytecodeOffset); + Vector>& constants() { return m_constantRegisters; } + size_t numberOfConstantRegisters() const { return m_constantRegisters.size(); } + unsigned addConstant(JSValue v) + { + unsigned result = m_constantRegisters.size(); + m_constantRegisters.append(WriteBarrier()); + m_constantRegisters.last().set(m_globalObject->vm(), m_ownerExecutable.get(), v); + return result; + } - size_t numberOfCallLinkInfos() const { return m_callLinkInfos.size(); } - void addCallLinkInfo() { m_callLinkInfos.append(CallLinkInfo()); } - CallLinkInfo& callLinkInfo(int index) { return m_callLinkInfos[index]; } + unsigned addConstantLazily() + { + unsigned result = m_constantRegisters.size(); + m_constantRegisters.append(WriteBarrier()); + return result; + } - void addMethodCallLinkInfos(unsigned n) { m_methodCallLinkInfos.grow(n); } - MethodCallLinkInfo& methodCallLinkInfo(int index) { return m_methodCallLinkInfos[index]; } + bool findConstant(JSValue, unsigned& result); + unsigned addOrFindConstant(JSValue); + WriteBarrier& constantRegister(int index) { return m_constantRegisters[index - FirstConstantRegisterIndex]; } + ALWAYS_INLINE bool isConstantRegisterIndex(int index) const { return index >= FirstConstantRegisterIndex; } + ALWAYS_INLINE JSValue getConstant(int index) const { return m_constantRegisters[index - FirstConstantRegisterIndex].get(); } - void addFunctionRegisterInfo(unsigned bytecodeOffset, int functionIndex) { createRareDataIfNecessary(); m_rareData->m_functionRegisterInfos.append(FunctionRegisterInfo(bytecodeOffset, functionIndex)); } -#endif + FunctionExecutable* functionDecl(int index) { return m_functionDecls[index].get(); } + int numberOfFunctionDecls() { return m_functionDecls.size(); } + FunctionExecutable* functionExpr(int index) { return m_functionExprs[index].get(); } - // Exception handling support + RegExp* regexp(int index) const { return m_unlinkedCode->regexp(index); } - size_t numberOfExceptionHandlers() const { return m_rareData ? m_rareData->m_exceptionHandlers.size() : 0; } - void addExceptionHandler(const HandlerInfo& hanler) { createRareDataIfNecessary(); return m_rareData->m_exceptionHandlers.append(hanler); } - HandlerInfo& exceptionHandler(int index) { ASSERT(m_rareData); return m_rareData->m_exceptionHandlers[index]; } + unsigned numberOfConstantBuffers() const + { + if (!m_rareData) + return 0; + return m_rareData->m_constantBuffers.size(); + } + unsigned addConstantBuffer(const Vector& buffer) + { + createRareDataIfNecessary(); + unsigned size = m_rareData->m_constantBuffers.size(); + m_rareData->m_constantBuffers.append(buffer); + return size; + } - bool hasExceptionInfo() const { return m_exceptionInfo; } - void clearExceptionInfo() { m_exceptionInfo.clear(); } + Vector& constantBufferAsVector(unsigned index) + { + ASSERT(m_rareData); + return m_rareData->m_constantBuffers[index]; + } + JSValue* constantBuffer(unsigned index) + { + return constantBufferAsVector(index).data(); + } - void addExpressionInfo(const ExpressionRangeInfo& expressionInfo) { ASSERT(m_exceptionInfo); m_exceptionInfo->m_expressionInfo.append(expressionInfo); } - void addGetByIdExceptionInfo(const GetByIdExceptionInfo& info) { ASSERT(m_exceptionInfo); m_exceptionInfo->m_getByIdExceptionInfo.append(info); } + Heap* heap() const { return m_heap; } + JSGlobalObject* globalObject() { return m_globalObject.get(); } - size_t numberOfLineInfos() const { ASSERT(m_exceptionInfo); return m_exceptionInfo->m_lineInfo.size(); } - void addLineInfo(const LineInfo& lineInfo) { ASSERT(m_exceptionInfo); m_exceptionInfo->m_lineInfo.append(lineInfo); } - LineInfo& lastLineInfo() { ASSERT(m_exceptionInfo); return m_exceptionInfo->m_lineInfo.last(); } + JSGlobalObject* globalObjectFor(CodeOrigin); -#if ENABLE(JIT) - Vector& callReturnIndexVector() { ASSERT(m_exceptionInfo); return m_exceptionInfo->m_callReturnIndexVector; } -#endif + BytecodeLivenessAnalysis& livenessAnalysis() + { + { + ConcurrentJITLocker locker(m_lock); + if (!!m_livenessAnalysis) + return *m_livenessAnalysis; + } + std::unique_ptr analysis = + std::make_unique(this); + { + ConcurrentJITLocker locker(m_lock); + if (!m_livenessAnalysis) + m_livenessAnalysis = WTF::move(analysis); + return *m_livenessAnalysis; + } + } + + void validate(); - // Constant Pool + // Jump Tables - size_t numberOfIdentifiers() const { return m_identifiers.size(); } - void addIdentifier(const Identifier& i) { return m_identifiers.append(i); } - Identifier& identifier(int index) { return m_identifiers[index]; } + size_t numberOfSwitchJumpTables() const { return m_rareData ? m_rareData->m_switchJumpTables.size() : 0; } + SimpleJumpTable& addSwitchJumpTable() { createRareDataIfNecessary(); m_rareData->m_switchJumpTables.append(SimpleJumpTable()); return m_rareData->m_switchJumpTables.last(); } + SimpleJumpTable& switchJumpTable(int tableIndex) { RELEASE_ASSERT(m_rareData); return m_rareData->m_switchJumpTables[tableIndex]; } + void clearSwitchJumpTables() + { + if (!m_rareData) + return; + m_rareData->m_switchJumpTables.clear(); + } - size_t numberOfConstantRegisters() const { return m_constantRegisters.size(); } - void addConstantRegister(const Register& r) { return m_constantRegisters.append(r); } - Register& constantRegister(int index) { return m_constantRegisters[index - FirstConstantRegisterIndex]; } - ALWAYS_INLINE bool isConstantRegisterIndex(int index) { return index >= FirstConstantRegisterIndex; } - ALWAYS_INLINE JSValue getConstant(int index) const { return m_constantRegisters[index - FirstConstantRegisterIndex].jsValue(); } + size_t numberOfStringSwitchJumpTables() const { return m_rareData ? m_rareData->m_stringSwitchJumpTables.size() : 0; } + StringJumpTable& addStringSwitchJumpTable() { createRareDataIfNecessary(); m_rareData->m_stringSwitchJumpTables.append(StringJumpTable()); return m_rareData->m_stringSwitchJumpTables.last(); } + StringJumpTable& stringSwitchJumpTable(int tableIndex) { RELEASE_ASSERT(m_rareData); return m_rareData->m_stringSwitchJumpTables[tableIndex]; } - unsigned addFunctionExpression(FuncExprNode* n) { unsigned size = m_functionExpressions.size(); m_functionExpressions.append(n); return size; } - FuncExprNode* functionExpression(int index) const { return m_functionExpressions[index].get(); } - unsigned addFunction(FuncDeclNode* n) { createRareDataIfNecessary(); unsigned size = m_rareData->m_functions.size(); m_rareData->m_functions.append(n); return size; } - FuncDeclNode* function(int index) const { ASSERT(m_rareData); return m_rareData->m_functions[index].get(); } + SymbolTable* symbolTable() const { return m_symbolTable.get(); } - bool hasFunctions() const { return m_functionExpressions.size() || (m_rareData && m_rareData->m_functions.size()); } + EvalCodeCache& evalCodeCache() { createRareDataIfNecessary(); return m_rareData->m_evalCodeCache; } - unsigned addRegExp(RegExp* r) { createRareDataIfNecessary(); unsigned size = m_rareData->m_regexps.size(); m_rareData->m_regexps.append(r); return size; } - RegExp* regexp(int index) const { ASSERT(m_rareData); return m_rareData->m_regexps[index].get(); } + enum ShrinkMode { + // Shrink prior to generating machine code that may point directly into vectors. + EarlyShrink, + // Shrink after generating machine code, and after possibly creating new vectors + // and appending to others. At this time it is not safe to shrink certain vectors + // because we would have generated machine code that references them directly. + LateShrink + }; + void shrinkToFit(ShrinkMode); - // Jump Tables + // Functions for controlling when JITting kicks in, in a mixed mode + // execution world. - size_t numberOfImmediateSwitchJumpTables() const { return m_rareData ? m_rareData->m_immediateSwitchJumpTables.size() : 0; } - SimpleJumpTable& addImmediateSwitchJumpTable() { createRareDataIfNecessary(); m_rareData->m_immediateSwitchJumpTables.append(SimpleJumpTable()); return m_rareData->m_immediateSwitchJumpTables.last(); } - SimpleJumpTable& immediateSwitchJumpTable(int tableIndex) { ASSERT(m_rareData); return m_rareData->m_immediateSwitchJumpTables[tableIndex]; } + bool checkIfJITThresholdReached() + { + return m_llintExecuteCounter.checkIfThresholdCrossedAndSet(this); + } - size_t numberOfCharacterSwitchJumpTables() const { return m_rareData ? m_rareData->m_characterSwitchJumpTables.size() : 0; } - SimpleJumpTable& addCharacterSwitchJumpTable() { createRareDataIfNecessary(); m_rareData->m_characterSwitchJumpTables.append(SimpleJumpTable()); return m_rareData->m_characterSwitchJumpTables.last(); } - SimpleJumpTable& characterSwitchJumpTable(int tableIndex) { ASSERT(m_rareData); return m_rareData->m_characterSwitchJumpTables[tableIndex]; } + void dontJITAnytimeSoon() + { + m_llintExecuteCounter.deferIndefinitely(); + } - size_t numberOfStringSwitchJumpTables() const { return m_rareData ? m_rareData->m_stringSwitchJumpTables.size() : 0; } - StringJumpTable& addStringSwitchJumpTable() { createRareDataIfNecessary(); m_rareData->m_stringSwitchJumpTables.append(StringJumpTable()); return m_rareData->m_stringSwitchJumpTables.last(); } - StringJumpTable& stringSwitchJumpTable(int tableIndex) { ASSERT(m_rareData); return m_rareData->m_stringSwitchJumpTables[tableIndex]; } + void jitAfterWarmUp() + { + m_llintExecuteCounter.setNewThreshold(Options::thresholdForJITAfterWarmUp(), this); + } + void jitSoon() + { + m_llintExecuteCounter.setNewThreshold(Options::thresholdForJITSoon(), this); + } - SymbolTable& symbolTable() { return m_symbolTable; } + const BaselineExecutionCounter& llintExecuteCounter() const + { + return m_llintExecuteCounter; + } - EvalCodeCache& evalCodeCache() { ASSERT(m_codeType != NativeCode); createRareDataIfNecessary(); return m_rareData->m_evalCodeCache; } + // Functions for controlling when tiered compilation kicks in. This + // controls both when the optimizing compiler is invoked and when OSR + // entry happens. Two triggers exist: the loop trigger and the return + // trigger. In either case, when an addition to m_jitExecuteCounter + // causes it to become non-negative, the optimizing compiler is + // invoked. This includes a fast check to see if this CodeBlock has + // already been optimized (i.e. replacement() returns a CodeBlock + // that was optimized with a higher tier JIT than this one). In the + // case of the loop trigger, if the optimized compilation succeeds + // (or has already succeeded in the past) then OSR is attempted to + // redirect program flow into the optimized code. + + // These functions are called from within the optimization triggers, + // and are used as a single point at which we define the heuristics + // for how much warm-up is mandated before the next optimization + // trigger files. All CodeBlocks start out with optimizeAfterWarmUp(), + // as this is called from the CodeBlock constructor. + + // When we observe a lot of speculation failures, we trigger a + // reoptimization. But each time, we increase the optimization trigger + // to avoid thrashing. + JS_EXPORT_PRIVATE unsigned reoptimizationRetryCounter() const; + void countReoptimization(); +#if ENABLE(JIT) + unsigned numberOfDFGCompiles(); - void shrinkToFit(); + int32_t codeTypeThresholdMultiplier() const; - // FIXME: Make these remaining members private. + int32_t adjustedCounterValue(int32_t desiredThreshold); - int m_numCalleeRegisters; - int m_numVars; - int m_numParameters; + int32_t* addressOfJITExecuteCounter() + { + return &m_jitExecuteCounter.m_counter; + } - private: -#if !defined(NDEBUG) || ENABLE(OPCODE_SAMPLING) - void dump(ExecState*, const Vector::const_iterator& begin, Vector::const_iterator&) const; -#endif + static ptrdiff_t offsetOfJITExecuteCounter() { return OBJECT_OFFSETOF(CodeBlock, m_jitExecuteCounter) + OBJECT_OFFSETOF(BaselineExecutionCounter, m_counter); } + static ptrdiff_t offsetOfJITExecutionActiveThreshold() { return OBJECT_OFFSETOF(CodeBlock, m_jitExecuteCounter) + OBJECT_OFFSETOF(BaselineExecutionCounter, m_activeThreshold); } + static ptrdiff_t offsetOfJITExecutionTotalCount() { return OBJECT_OFFSETOF(CodeBlock, m_jitExecuteCounter) + OBJECT_OFFSETOF(BaselineExecutionCounter, m_totalCount); } + + const BaselineExecutionCounter& jitExecuteCounter() const { return m_jitExecuteCounter; } + + unsigned optimizationDelayCounter() const { return m_optimizationDelayCounter; } + + // Check if the optimization threshold has been reached, and if not, + // adjust the heuristics accordingly. Returns true if the threshold has + // been reached. + bool checkIfOptimizationThresholdReached(); + + // Call this to force the next optimization trigger to fire. This is + // rarely wise, since optimization triggers are typically more + // expensive than executing baseline code. + void optimizeNextInvocation(); + + // Call this to prevent optimization from happening again. Note that + // optimization will still happen after roughly 2^29 invocations, + // so this is really meant to delay that as much as possible. This + // is called if optimization failed, and we expect it to fail in + // the future as well. + void dontOptimizeAnytimeSoon(); + + // Call this to reinitialize the counter to its starting state, + // forcing a warm-up to happen before the next optimization trigger + // fires. This is called in the CodeBlock constructor. It also + // makes sense to call this if an OSR exit occurred. Note that + // OSR exit code is code generated, so the value of the execute + // counter that this corresponds to is also available directly. + void optimizeAfterWarmUp(); + + // Call this to force an optimization trigger to fire only after + // a lot of warm-up. + void optimizeAfterLongWarmUp(); + + // Call this to cause an optimization trigger to fire soon, but + // not necessarily the next one. This makes sense if optimization + // succeeds. Successfuly optimization means that all calls are + // relinked to the optimized code, so this only affects call + // frames that are still executing this CodeBlock. The value here + // is tuned to strike a balance between the cost of OSR entry + // (which is too high to warrant making every loop back edge to + // trigger OSR immediately) and the cost of executing baseline + // code (which is high enough that we don't necessarily want to + // have a full warm-up). The intuition for calling this instead of + // optimizeNextInvocation() is for the case of recursive functions + // with loops. Consider that there may be N call frames of some + // recursive function, for a reasonably large value of N. The top + // one triggers optimization, and then returns, and then all of + // the others return. We don't want optimization to be triggered on + // each return, as that would be superfluous. It only makes sense + // to trigger optimization if one of those functions becomes hot + // in the baseline code. + void optimizeSoon(); + + void forceOptimizationSlowPathConcurrently(); + + void setOptimizationThresholdBasedOnCompilationResult(CompilationResult); + + uint32_t osrExitCounter() const { return m_osrExitCounter; } - void reparseForExceptionInfoIfNecessary(CallFrame*); + void countOSRExit() { m_osrExitCounter++; } - void createRareDataIfNecessary() - { - ASSERT(m_codeType != NativeCode); - if (!m_rareData) - m_rareData.set(new RareData); - } + uint32_t* addressOfOSRExitCounter() { return &m_osrExitCounter; } - ScopeNode* m_ownerNode; - JSGlobalData* m_globalData; + static ptrdiff_t offsetOfOSRExitCounter() { return OBJECT_OFFSETOF(CodeBlock, m_osrExitCounter); } - Vector m_instructions; -#ifndef NDEBUG - unsigned m_instructionCount; + uint32_t adjustedExitCountThreshold(uint32_t desiredThreshold); + uint32_t exitCountThresholdForReoptimization(); + uint32_t exitCountThresholdForReoptimizationFromLoop(); + bool shouldReoptimizeNow(); + bool shouldReoptimizeFromLoopNow(); +#else // No JIT + void optimizeAfterWarmUp() { } + unsigned numberOfDFGCompiles() { return 0; } #endif - int m_thisRegister; + bool shouldOptimizeNow(); + void updateAllValueProfilePredictions(); + void updateAllArrayPredictions(); + void updateAllPredictions(); - bool m_needsFullScopeChain; - bool m_usesEval; - bool m_usesArguments; - bool m_isNumericCompareFunction; + unsigned frameRegisterCount(); + int stackPointerOffset(); - CodeType m_codeType; + bool hasOpDebugForLineAndColumn(unsigned line, unsigned column); - RefPtr m_source; - unsigned m_sourceOffset; + bool hasDebuggerRequests() const { return m_debuggerRequests; } + void* debuggerRequestsAddress() { return &m_debuggerRequests; } -#if !ENABLE(JIT) - Vector m_propertyAccessInstructions; - Vector m_globalResolveInstructions; -#else - Vector m_structureStubInfos; - Vector m_globalResolveInfos; - Vector m_callLinkInfos; - Vector m_methodCallLinkInfos; - Vector m_linkedCallerList; -#endif + void addBreakpoint(unsigned numBreakpoints); + void removeBreakpoint(unsigned numBreakpoints) + { + ASSERT(m_numBreakpoints >= numBreakpoints); + m_numBreakpoints -= numBreakpoints; + } - Vector m_jumpTargets; + enum SteppingMode { + SteppingModeDisabled, + SteppingModeEnabled + }; + void setSteppingMode(SteppingMode); - // Constant Pool - Vector m_identifiers; - Vector m_constantRegisters; - Vector > m_functionExpressions; + void clearDebuggerRequests() + { + m_steppingMode = SteppingModeDisabled; + m_numBreakpoints = 0; + } + + // FIXME: Make these remaining members private. - SymbolTable m_symbolTable; + int m_numCalleeRegisters; + int m_numVars; + bool m_isConstructor : 1; + + // This is intentionally public; it's the responsibility of anyone doing any + // of the following to hold the lock: + // + // - Modifying any inline cache in this code block. + // + // - Quering any inline cache in this code block, from a thread other than + // the main thread. + // + // Additionally, it's only legal to modify the inline cache on the main + // thread. This means that the main thread can query the inline cache without + // locking. This is crucial since executing the inline cache is effectively + // "querying" it. + // + // Another exception to the rules is that the GC can do whatever it wants + // without holding any locks, because the GC is guaranteed to wait until any + // concurrent compilation threads finish what they're doing. + mutable ConcurrentJITLock m_lock; + + bool m_shouldAlwaysBeInlined; // Not a bitfield because the JIT wants to store to it. + bool m_allTransitionsHaveBeenMarked : 1; // Initialized and used on every GC. + + bool m_didFailFTLCompilation : 1; + bool m_hasBeenCompiledWithFTL : 1; - struct ExceptionInfo { - Vector m_expressionInfo; - Vector m_lineInfo; - Vector m_getByIdExceptionInfo; + // Internal methods for use by validation code. It would be private if it wasn't + // for the fact that we use it from anonymous namespaces. + void beginValidationDidFail(); + NO_RETURN_DUE_TO_CRASH void endValidationDidFail(); -#if ENABLE(JIT) - Vector m_callReturnIndexVector; + bool isKnownToBeLiveDuringGC(); // Will only return valid results when called during GC. Assumes that you've already established that the owner executable is live. + +protected: + virtual void visitWeakReferences(SlotVisitor&) override; + virtual void finalizeUnconditionally() override; + +#if ENABLE(DFG_JIT) + void tallyFrequentExitSites(); +#else + void tallyFrequentExitSites() { } #endif - }; - OwnPtr m_exceptionInfo; - struct RareData { - Vector m_exceptionHandlers; +private: + friend class CodeBlockSet; + + CodeBlock* specialOSREntryBlockOrNull(); + + void noticeIncomingCall(ExecState* callerFrame); + + double optimizationThresholdScalingFactor(); - // Rare Constants - Vector > m_functions; - Vector > m_regexps; +#if ENABLE(JIT) + ClosureCallStubRoutine* findClosureCallForReturnPC(ReturnAddressPtr); +#endif + + void updateAllPredictionsAndCountLiveness(unsigned& numberOfLiveNonArgumentValueProfiles, unsigned& numberOfSamplesInProfiles); - // Jump Tables - Vector m_immediateSwitchJumpTables; - Vector m_characterSwitchJumpTables; - Vector m_stringSwitchJumpTables; + void setConstantRegisters(const Vector>& constants) + { + size_t count = constants.size(); + m_constantRegisters.resize(count); + for (size_t i = 0; i < count; i++) + m_constantRegisters[i].set(*m_vm, ownerExecutable(), constants[i].get()); + } - EvalCodeCache m_evalCodeCache; + void dumpBytecode( + PrintStream&, ExecState*, const Instruction* begin, const Instruction*&, + const StubInfoMap& = StubInfoMap(), const CallLinkInfoMap& = CallLinkInfoMap()); + + CString registerName(int r) const; + void printUnaryOp(PrintStream&, ExecState*, int location, const Instruction*&, const char* op); + void printBinaryOp(PrintStream&, ExecState*, int location, const Instruction*&, const char* op); + void printConditionalJump(PrintStream&, ExecState*, const Instruction*, const Instruction*&, int location, const char* op); + void printGetByIdOp(PrintStream&, ExecState*, int location, const Instruction*&); + void printGetByIdCacheStatus(PrintStream&, ExecState*, int location, const StubInfoMap&); + enum CacheDumpMode { DumpCaches, DontDumpCaches }; + void printCallOp(PrintStream&, ExecState*, int location, const Instruction*&, const char* op, CacheDumpMode, bool& hasPrintedProfiling, const CallLinkInfoMap&); + void printPutByIdOp(PrintStream&, ExecState*, int location, const Instruction*&, const char* op); + void printLocationAndOp(PrintStream&, ExecState*, int location, const Instruction*&, const char* op); + void printLocationOpAndRegisterOperand(PrintStream&, ExecState*, int location, const Instruction*& it, const char* op, int operand); + + void beginDumpProfiling(PrintStream&, bool& hasPrintedProfiling); + void dumpValueProfiling(PrintStream&, const Instruction*&, bool& hasPrintedProfiling); + void dumpArrayProfiling(PrintStream&, const Instruction*&, bool& hasPrintedProfiling); + void dumpRareCaseProfile(PrintStream&, const char* name, RareCaseProfile*, bool& hasPrintedProfiling); + + bool shouldImmediatelyAssumeLivenessDuringScan(); + + void propagateTransitions(SlotVisitor&); + void determineLiveness(SlotVisitor&); + + void stronglyVisitStrongReferences(SlotVisitor&); + void stronglyVisitWeakReferences(SlotVisitor&); + void createRareDataIfNecessary() + { + if (!m_rareData) + m_rareData = adoptPtr(new RareData); + } + #if ENABLE(JIT) - Vector m_functionRegisterInfos; + void resetStubInternal(RepatchBuffer&, StructureStubInfo&); + void resetStubDuringGCInternal(RepatchBuffer&, StructureStubInfo&); #endif + WriteBarrier m_unlinkedCode; + int m_numParameters; + union { + unsigned m_debuggerRequests; + struct { + unsigned m_hasDebuggerStatement : 1; + unsigned m_steppingMode : 1; + unsigned m_numBreakpoints : 30; }; - OwnPtr m_rareData; }; + WriteBarrier m_ownerExecutable; + VM* m_vm; + + RefCountedArray m_instructions; + WriteBarrier m_symbolTable; + VirtualRegister m_thisRegister; + VirtualRegister m_argumentsRegister; + VirtualRegister m_activationRegister; + + bool m_isStrictMode; + bool m_needsActivation; + bool m_mayBeExecuting; + uint8_t m_visitAggregateHasBeenCalled; + + RefPtr m_source; + unsigned m_sourceOffset; + unsigned m_firstLineColumnOffset; + unsigned m_codeType; + + Vector m_llintCallLinkInfos; + SentinelLinkedList> m_incomingLLIntCalls; + RefPtr m_jitCode; +#if ENABLE(JIT) + Bag m_stubInfos; + Vector m_byValInfos; + Bag m_callLinkInfos; + SentinelLinkedList> m_incomingCalls; +#endif + OwnPtr m_jitCodeMap; +#if ENABLE(DFG_JIT) + // This is relevant to non-DFG code blocks that serve as the profiled code block + // for DFG code blocks. + DFG::ExitProfile m_exitProfile; + CompressedLazyOperandValueProfileHolder m_lazyOperandValueProfiles; +#endif + Vector m_argumentValueProfiles; + Vector m_valueProfiles; + SegmentedVector m_rareCaseProfiles; + SegmentedVector m_specialFastCaseProfiles; + Vector m_arrayAllocationProfiles; + ArrayProfileVector m_arrayProfiles; + Vector m_objectAllocationProfiles; + + // Constant Pool + COMPILE_ASSERT(sizeof(Register) == sizeof(WriteBarrier), Register_must_be_same_size_as_WriteBarrier_Unknown); + // TODO: This could just be a pointer to m_unlinkedCodeBlock's data, but the DFG mutates + // it, so we're stuck with it for now. + Vector> m_constantRegisters; + Vector> m_functionDecls; + Vector> m_functionExprs; + + RefPtr m_alternative; + + BaselineExecutionCounter m_llintExecuteCounter; - // Program code is not marked by any function, so we make the global object - // responsible for marking it. + BaselineExecutionCounter m_jitExecuteCounter; + int32_t m_totalJITExecutions; + uint32_t m_osrExitCounter; + uint16_t m_optimizationDelayCounter; + uint16_t m_reoptimizationRetryCounter; + + mutable CodeBlockHash m_hash; - class ProgramCodeBlock : public CodeBlock { + std::unique_ptr m_livenessAnalysis; + + struct RareData { + WTF_MAKE_FAST_ALLOCATED; public: - ProgramCodeBlock(ScopeNode* ownerNode, CodeType codeType, JSGlobalObject* globalObject, PassRefPtr sourceProvider) - : CodeBlock(ownerNode, codeType, sourceProvider, 0) - , m_globalObject(globalObject) - { - m_globalObject->codeBlocks().add(this); - } + Vector m_exceptionHandlers; - ~ProgramCodeBlock() - { - if (m_globalObject) - m_globalObject->codeBlocks().remove(this); - } + // Buffers used for large array literals + Vector> m_constantBuffers; - void clearGlobalObject() { m_globalObject = 0; } + // Jump Tables + Vector m_switchJumpTables; + Vector m_stringSwitchJumpTables; - private: - JSGlobalObject* m_globalObject; // For program and eval nodes, the global object that marks the constant pool. + EvalCodeCache m_evalCodeCache; }; +#if COMPILER(MSVC) + friend void WTF::deleteOwnedPtr(RareData*); +#endif + OwnPtr m_rareData; +#if ENABLE(JIT) + DFG::CapabilityLevel m_capabilityLevelState; +#endif +}; - class EvalCodeBlock : public ProgramCodeBlock { - public: - EvalCodeBlock(ScopeNode* ownerNode, JSGlobalObject* globalObject, PassRefPtr sourceProvider, int baseScopeDepth) - : ProgramCodeBlock(ownerNode, EvalCode, globalObject, sourceProvider) - , m_baseScopeDepth(baseScopeDepth) - { - } +// Program code is not marked by any function, so we make the global object +// responsible for marking it. + +class GlobalCodeBlock : public CodeBlock { +protected: + GlobalCodeBlock(CopyParsedBlockTag, GlobalCodeBlock& other) + : CodeBlock(CopyParsedBlock, other) + { + } + + GlobalCodeBlock(ScriptExecutable* ownerExecutable, UnlinkedCodeBlock* unlinkedCodeBlock, JSScope* scope, PassRefPtr sourceProvider, unsigned sourceOffset, unsigned firstLineColumnOffset) + : CodeBlock(ownerExecutable, unlinkedCodeBlock, scope, sourceProvider, sourceOffset, firstLineColumnOffset) + { + } +}; - int baseScopeDepth() const { return m_baseScopeDepth; } +class ProgramCodeBlock : public GlobalCodeBlock { +public: + ProgramCodeBlock(CopyParsedBlockTag, ProgramCodeBlock& other) + : GlobalCodeBlock(CopyParsedBlock, other) + { + } - private: - int m_baseScopeDepth; - }; + ProgramCodeBlock(ProgramExecutable* ownerExecutable, UnlinkedProgramCodeBlock* unlinkedCodeBlock, JSScope* scope, PassRefPtr sourceProvider, unsigned firstLineColumnOffset) + : GlobalCodeBlock(ownerExecutable, unlinkedCodeBlock, scope, sourceProvider, 0, firstLineColumnOffset) + { + } + +#if ENABLE(JIT) +protected: + virtual CodeBlock* replacement() override; + virtual DFG::CapabilityLevel capabilityLevelInternal() override; +#endif +}; + +class EvalCodeBlock : public GlobalCodeBlock { +public: + EvalCodeBlock(CopyParsedBlockTag, EvalCodeBlock& other) + : GlobalCodeBlock(CopyParsedBlock, other) + { + } + + EvalCodeBlock(EvalExecutable* ownerExecutable, UnlinkedEvalCodeBlock* unlinkedCodeBlock, JSScope* scope, PassRefPtr sourceProvider) + : GlobalCodeBlock(ownerExecutable, unlinkedCodeBlock, scope, sourceProvider, 0, 1) + { + } + + const Identifier& variable(unsigned index) { return unlinkedEvalCodeBlock()->variable(index); } + unsigned numVariables() { return unlinkedEvalCodeBlock()->numVariables(); } + +#if ENABLE(JIT) +protected: + virtual CodeBlock* replacement() override; + virtual DFG::CapabilityLevel capabilityLevelInternal() override; +#endif + +private: + UnlinkedEvalCodeBlock* unlinkedEvalCodeBlock() const { return jsCast(unlinkedCodeBlock()); } +}; + +class FunctionCodeBlock : public CodeBlock { +public: + FunctionCodeBlock(CopyParsedBlockTag, FunctionCodeBlock& other) + : CodeBlock(CopyParsedBlock, other) + { + } - inline Register& ExecState::r(int index) + FunctionCodeBlock(FunctionExecutable* ownerExecutable, UnlinkedFunctionCodeBlock* unlinkedCodeBlock, JSScope* scope, PassRefPtr sourceProvider, unsigned sourceOffset, unsigned firstLineColumnOffset) + : CodeBlock(ownerExecutable, unlinkedCodeBlock, scope, sourceProvider, sourceOffset, firstLineColumnOffset) { - CodeBlock* codeBlock = this->codeBlock(); - if (codeBlock->isConstantRegisterIndex(index)) - return codeBlock->constantRegister(index); - return this[index]; } + +#if ENABLE(JIT) +protected: + virtual CodeBlock* replacement() override; + virtual DFG::CapabilityLevel capabilityLevelInternal() override; +#endif +}; + +inline CodeBlock* baselineCodeBlockForInlineCallFrame(InlineCallFrame* inlineCallFrame) +{ + RELEASE_ASSERT(inlineCallFrame); + ExecutableBase* executable = inlineCallFrame->executable.get(); + RELEASE_ASSERT(executable->structure()->classInfo() == FunctionExecutable::info()); + return static_cast(executable)->baselineCodeBlockFor(inlineCallFrame->isCall ? CodeForCall : CodeForConstruct); +} + +inline CodeBlock* baselineCodeBlockForOriginAndBaselineCodeBlock(const CodeOrigin& codeOrigin, CodeBlock* baselineCodeBlock) +{ + if (codeOrigin.inlineCallFrame) + return baselineCodeBlockForInlineCallFrame(codeOrigin.inlineCallFrame); + return baselineCodeBlock; +} + +inline int CodeBlock::argumentIndexAfterCapture(size_t argument) +{ + if (argument >= static_cast(symbolTable()->parameterCount())) + return CallFrame::argumentOffset(argument); + + const SlowArgument* slowArguments = symbolTable()->slowArguments(); + if (!slowArguments || slowArguments[argument].status == SlowArgument::Normal) + return CallFrame::argumentOffset(argument); + + ASSERT(slowArguments[argument].status == SlowArgument::Captured); + return slowArguments[argument].index; +} + +inline bool CodeBlock::hasSlowArguments() +{ + return !!symbolTable()->slowArguments(); +} + +inline Register& ExecState::r(int index) +{ + CodeBlock* codeBlock = this->codeBlock(); + if (codeBlock->isConstantRegisterIndex(index)) + return *reinterpret_cast(&codeBlock->constantRegister(index)); + return this[index]; +} + +inline Register& ExecState::uncheckedR(int index) +{ + RELEASE_ASSERT(index < FirstConstantRegisterIndex); + return this[index]; +} + +inline JSValue ExecState::argumentAfterCapture(size_t argument) +{ + if (argument >= argumentCount()) + return jsUndefined(); + + if (!codeBlock()) + return this[argumentOffset(argument)].jsValue(); + + return this[codeBlock()->argumentIndexAfterCapture(argument)].jsValue(); +} + +inline void CodeBlockSet::mark(void* candidateCodeBlock) +{ + // We have to check for 0 and -1 because those are used by the HashMap as markers. + uintptr_t value = reinterpret_cast(candidateCodeBlock); + + // This checks for both of those nasty cases in one go. + // 0 + 1 = 1 + // -1 + 1 = 0 + if (value + 1 <= 1) + return; + + CodeBlock* codeBlock = static_cast(candidateCodeBlock); + if (!m_oldCodeBlocks.contains(codeBlock) && !m_newCodeBlocks.contains(codeBlock)) + return; + + mark(codeBlock); +} + +inline void CodeBlockSet::mark(CodeBlock* codeBlock) +{ + if (!codeBlock) + return; + + if (codeBlock->m_mayBeExecuting) + return; + + codeBlock->m_mayBeExecuting = true; + // We might not have cleared the marks for this CodeBlock, but we need to visit it. + codeBlock->m_visitAggregateHasBeenCalled = false; +#if ENABLE(GGC) + m_currentlyExecuting.append(codeBlock); +#endif +} + +template inline void ScriptExecutable::forEachCodeBlock(Functor&& functor) +{ + switch (type()) { + case ProgramExecutableType: { + if (CodeBlock* codeBlock = jsCast(this)->m_programCodeBlock.get()) + codeBlock->forEachRelatedCodeBlock(std::forward(functor)); + break; + } + + case EvalExecutableType: { + if (CodeBlock* codeBlock = jsCast(this)->m_evalCodeBlock.get()) + codeBlock->forEachRelatedCodeBlock(std::forward(functor)); + break; + } + + case FunctionExecutableType: { + Functor f(std::forward(functor)); + FunctionExecutable* executable = jsCast(this); + if (CodeBlock* codeBlock = executable->m_codeBlockForCall.get()) + codeBlock->forEachRelatedCodeBlock(f); + if (CodeBlock* codeBlock = executable->m_codeBlockForConstruct.get()) + codeBlock->forEachRelatedCodeBlock(f); + break; + } + default: + RELEASE_ASSERT_NOT_REACHED(); + } +} } // namespace JSC