]> git.saurik.com Git - apple/javascriptcore.git/blobdiff - bytecode/CodeBlock.h
JavaScriptCore-7600.1.4.11.8.tar.gz
[apple/javascriptcore.git] / bytecode / CodeBlock.h
index 778376f94c62f92eb06a8b523bb301e2ecec9e60..18ef0e3c22abeda24df3a6afcb325911cbf3122e 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2008, 2009, 2010 Apple Inc. All rights reserved.
+ * Copyright (C) 2008, 2009, 2010, 2011, 2012, 2013, 2014 Apple Inc. All rights reserved.
  * Copyright (C) 2008 Cameron Zwarich <cwzwarich@uwaterloo.ca>
  *
  * Redistribution and use in source and binary forms, with or without
@@ -11,7 +11,7 @@
  * 2.  Redistributions in binary form must reproduce the above copyright
  *     notice, this list of conditions and the following disclaimer in the
  *     documentation and/or other materials provided with the distribution.
- * 3.  Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * 3.  Neither the name of Apple Inc. ("Apple") nor the names of
  *     its contributors may be used to endorse or promote products derived
  *     from this software without specific prior written permission.
  *
 #ifndef CodeBlock_h
 #define CodeBlock_h
 
+#include "ArrayProfile.h"
+#include "ByValInfo.h"
 #include "BytecodeConventions.h"
+#include "BytecodeLivenessAnalysis.h"
 #include "CallLinkInfo.h"
 #include "CallReturnOffsetToBytecodeOffset.h"
+#include "CodeBlockHash.h"
+#include "CodeBlockSet.h"
+#include "ConcurrentJITLock.h"
 #include "CodeOrigin.h"
 #include "CodeType.h"
 #include "CompactJITCodeMap.h"
-#include "DFGCodeBlocks.h"
+#include "DFGCommon.h"
+#include "DFGCommonData.h"
 #include "DFGExitProfile.h"
-#include "DFGOSREntry.h"
-#include "DFGOSRExit.h"
+#include "DeferredCompilationCallback.h"
 #include "EvalCodeCache.h"
 #include "ExecutionCounter.h"
 #include "ExpressionRangeInfo.h"
-#include "GlobalResolveInfo.h"
 #include "HandlerInfo.h"
-#include "MethodCallLinkInfo.h"
+#include "ObjectAllocationProfile.h"
 #include "Options.h"
+#include "PutPropertySlot.h"
 #include "Instruction.h"
 #include "JITCode.h"
 #include "JITWriteBarrier.h"
 #include "JumpTable.h"
 #include "LLIntCallLinkInfo.h"
 #include "LazyOperandValueProfile.h"
-#include "LineInfo.h"
-#include "Nodes.h"
+#include "ProfilerCompilation.h"
+#include "ProfilerJettisonReason.h"
 #include "RegExpObject.h"
 #include "StructureStubInfo.h"
-#include "UString.h"
 #include "UnconditionalFinalizer.h"
 #include "ValueProfile.h"
-#include <wtf/RefCountedArray.h>
-#include <wtf/FastAllocBase.h>
+#include "VirtualRegister.h"
+#include "Watchpoint.h"
+#include <wtf/Bag.h>
+#include <wtf/FastMalloc.h>
 #include <wtf/PassOwnPtr.h>
+#include <wtf/RefCountedArray.h>
 #include <wtf/RefPtr.h>
 #include <wtf/SegmentedVector.h>
 #include <wtf/Vector.h>
-#include "StructureStubInfo.h"
+#include <wtf/text/WTFString.h>
 
 namespace JSC {
 
-    class DFGCodeBlocks;
-    class ExecState;
-    class LLIntOffsetsExtractor;
+class ExecState;
+class LLIntOffsetsExtractor;
+class RepatchBuffer;
 
-    inline int unmodifiedArgumentsRegister(int argumentsRegister) { return argumentsRegister - 1; }
+inline VirtualRegister unmodifiedArgumentsRegister(VirtualRegister argumentsRegister) { return VirtualRegister(argumentsRegister.offset() + 1); }
 
-    static ALWAYS_INLINE int missingThisObjectMarker() { return std::numeric_limits<int>::max(); }
+static ALWAYS_INLINE int missingThisObjectMarker() { return std::numeric_limits<int>::max(); }
 
-    class CodeBlock : public UnconditionalFinalizer, public WeakReferenceHarvester {
-        WTF_MAKE_FAST_ALLOCATED;
-        friend class JIT;
-        friend class LLIntOffsetsExtractor;
-    public:
-        enum CopyParsedBlockTag { CopyParsedBlock };
-    protected:
-        CodeBlock(CopyParsedBlockTag, CodeBlock& other, SymbolTable*);
+enum ReoptimizationMode { DontCountReoptimization, CountReoptimization };
+
+class CodeBlock : public ThreadSafeRefCounted<CodeBlock>, public UnconditionalFinalizer, public WeakReferenceHarvester {
+    WTF_MAKE_FAST_ALLOCATED;
+    friend class BytecodeLivenessAnalysis;
+    friend class JIT;
+    friend class LLIntOffsetsExtractor;
+public:
+    enum CopyParsedBlockTag { CopyParsedBlock };
+protected:
+    CodeBlock(CopyParsedBlockTag, CodeBlock& other);
         
-        CodeBlock(ScriptExecutable* ownerExecutable, CodeType, JSGlobalObject*, PassRefPtr<SourceProvider>, unsigned sourceOffset, SymbolTable*, bool isConstructor, PassOwnPtr<CodeBlock> alternative);
+    CodeBlock(ScriptExecutable* ownerExecutable, UnlinkedCodeBlock*, JSScope*, PassRefPtr<SourceProvider>, unsigned sourceOffset, unsigned firstLineColumnOffset);
 
-        WriteBarrier<JSGlobalObject> m_globalObject;
-        Heap* m_heap;
+    WriteBarrier<JSGlobalObject> m_globalObject;
+    Heap* m_heap;
 
-    public:
-        JS_EXPORT_PRIVATE virtual ~CodeBlock();
-        
-        int numParameters() const { return m_numParameters; }
-        void setNumParameters(int newValue);
-        void addParameter();
-        
-        int* addressOfNumParameters() { return &m_numParameters; }
-        static ptrdiff_t offsetOfNumParameters() { return OBJECT_OFFSETOF(CodeBlock, m_numParameters); }
+public:
+    JS_EXPORT_PRIVATE virtual ~CodeBlock();
 
-        CodeBlock* alternative() { return m_alternative.get(); }
-        PassOwnPtr<CodeBlock> releaseAlternative() { return m_alternative.release(); }
-        void setAlternative(PassOwnPtr<CodeBlock> alternative) { m_alternative = alternative; }
-        
-        CodeSpecializationKind specializationKind()
-        {
-            if (m_isConstructor)
-                return CodeForConstruct;
-            return CodeForCall;
-        }
-        
-#if ENABLE(JIT)
-        CodeBlock* baselineVersion()
-        {
-            CodeBlock* result = replacement();
-            if (!result)
-                return 0; // This can happen if we're in the process of creating the baseline version.
-            while (result->alternative())
-                result = result->alternative();
-            ASSERT(result);
-            ASSERT(JITCode::isBaselineCode(result->getJITType()));
-            return result;
-        }
-#endif
-        
-        void visitAggregate(SlotVisitor&);
+    UnlinkedCodeBlock* unlinkedCodeBlock() const { return m_unlinkedCode.get(); }
 
-        static void dumpStatistics();
+    CString inferredName() const;
+    CodeBlockHash hash() const;
+    bool hasHash() const;
+    bool isSafeToComputeHash() const;
+    CString hashAsStringIfPossible() const;
+    CString sourceCodeForTools() const; // Not quite the actual source we parsed; this will do things like prefix the source for a function with a reified signature.
+    CString sourceCodeOnOneLine() const; // As sourceCodeForTools(), but replaces all whitespace runs with a single space.
+    void dumpAssumingJITType(PrintStream&, JITCode::JITType) const;
+    void dump(PrintStream&) const;
 
-        void dump(ExecState*) const;
-        void printStructures(const Instruction*) const;
-        void printStructure(const char* name, const Instruction*, int operand) const;
+    int numParameters() const { return m_numParameters; }
+    void setNumParameters(int newValue);
 
-        bool isStrictMode() const { return m_isStrictMode; }
+    int* addressOfNumParameters() { return &m_numParameters; }
+    static ptrdiff_t offsetOfNumParameters() { return OBJECT_OFFSETOF(CodeBlock, m_numParameters); }
 
-        inline bool isKnownNotImmediate(int index)
-        {
-            if (index == m_thisRegister && !m_isStrictMode)
-                return true;
+    CodeBlock* alternative() { return m_alternative.get(); }
+    PassRefPtr<CodeBlock> releaseAlternative() { return m_alternative.release(); }
+    void setAlternative(PassRefPtr<CodeBlock> alternative) { m_alternative = alternative; }
 
-            if (isConstantRegisterIndex(index))
-                return getConstant(index).isCell();
+    template <typename Functor> void forEachRelatedCodeBlock(Functor&& functor)
+    {
+        Functor f(std::forward<Functor>(functor));
+        Vector<CodeBlock*, 4> codeBlocks;
+        codeBlocks.append(this);
 
-            return false;
-        }
+        while (!codeBlocks.isEmpty()) {
+            CodeBlock* currentCodeBlock = codeBlocks.takeLast();
+            f(currentCodeBlock);
 
-        ALWAYS_INLINE bool isTemporaryRegisterIndex(int index)
-        {
-            return index >= m_numVars;
+            if (CodeBlock* alternative = currentCodeBlock->alternative())
+                codeBlocks.append(alternative);
+            if (CodeBlock* osrEntryBlock = currentCodeBlock->specialOSREntryBlockOrNull())
+                codeBlocks.append(osrEntryBlock);
         }
+    }
+    
+    CodeSpecializationKind specializationKind() const
+    {
+        return specializationFromIsConstruct(m_isConstructor);
+    }
+    
+    CodeBlock* baselineAlternative();
+    
+    // FIXME: Get rid of this.
+    // https://bugs.webkit.org/show_bug.cgi?id=123677
+    CodeBlock* baselineVersion();
 
-        HandlerInfo* handlerForBytecodeOffset(unsigned bytecodeOffset);
-        int lineNumberForBytecodeOffset(unsigned bytecodeOffset);
-        void expressionRangeForBytecodeOffset(unsigned bytecodeOffset, int& divot, int& startOffset, int& endOffset);
+    void visitAggregate(SlotVisitor&);
 
-#if ENABLE(JIT)
+    void dumpBytecode(PrintStream& = WTF::dataFile());
+    void dumpBytecode(
+        PrintStream&, unsigned bytecodeOffset,
+        const StubInfoMap& = StubInfoMap(), const CallLinkInfoMap& = CallLinkInfoMap());
+    void printStructures(PrintStream&, const Instruction*);
+    void printStructure(PrintStream&, const char* name, const Instruction*, int operand);
 
-        StructureStubInfo& getStubInfo(ReturnAddressPtr returnAddress)
-        {
-            return *(binarySearch<StructureStubInfo, void*, getStructureStubInfoReturnLocation>(m_structureStubInfos.begin(), m_structureStubInfos.size(), returnAddress.value()));
-        }
+    bool isStrictMode() const { return m_isStrictMode; }
+    ECMAMode ecmaMode() const { return isStrictMode() ? StrictMode : NotStrictMode; }
 
-        StructureStubInfo& getStubInfo(unsigned bytecodeIndex)
-        {
-            return *(binarySearch<StructureStubInfo, unsigned, getStructureStubInfoBytecodeIndex>(m_structureStubInfos.begin(), m_structureStubInfos.size(), bytecodeIndex));
-        }
+    inline bool isKnownNotImmediate(int index)
+    {
+        if (index == m_thisRegister.offset() && !m_isStrictMode)
+            return true;
 
-        CallLinkInfo& getCallLinkInfo(ReturnAddressPtr returnAddress)
-        {
-            return *(binarySearch<CallLinkInfo, void*, getCallLinkInfoReturnLocation>(m_callLinkInfos.begin(), m_callLinkInfos.size(), returnAddress.value()));
-        }
-        
-        CallLinkInfo& getCallLinkInfo(unsigned bytecodeIndex)
-        {
-            return *(binarySearch<CallLinkInfo, unsigned, getCallLinkInfoBytecodeIndex>(m_callLinkInfos.begin(), m_callLinkInfos.size(), bytecodeIndex));
-        }
+        if (isConstantRegisterIndex(index))
+            return getConstant(index).isCell();
 
-        MethodCallLinkInfo& getMethodCallLinkInfo(ReturnAddressPtr returnAddress)
-        {
-            return *(binarySearch<MethodCallLinkInfo, void*, getMethodCallLinkInfoReturnLocation>(m_methodCallLinkInfos.begin(), m_methodCallLinkInfos.size(), returnAddress.value()));
-        }
+        return false;
+    }
 
-        MethodCallLinkInfo& getMethodCallLinkInfo(unsigned bytecodeIndex)
-        {
-            return *(binarySearch<MethodCallLinkInfo, unsigned, getMethodCallLinkInfoBytecodeIndex>(m_methodCallLinkInfos.begin(), m_methodCallLinkInfos.size(), bytecodeIndex));
-        }
+    ALWAYS_INLINE bool isTemporaryRegisterIndex(int index)
+    {
+        return index >= m_numVars;
+    }
 
-        unsigned bytecodeOffset(ExecState*, ReturnAddressPtr);
+    HandlerInfo* handlerForBytecodeOffset(unsigned bytecodeOffset);
+    unsigned lineNumberForBytecodeOffset(unsigned bytecodeOffset);
+    unsigned columnNumberForBytecodeOffset(unsigned bytecodeOffset);
+    void expressionRangeForBytecodeOffset(unsigned bytecodeOffset, int& divot,
+                                          int& startOffset, int& endOffset, unsigned& line, unsigned& column);
 
-        unsigned bytecodeOffsetForCallAtIndex(unsigned index)
-        {
-            if (!m_rareData)
-                return 1;
-            Vector<CallReturnOffsetToBytecodeOffset>& callIndices = m_rareData->m_callReturnIndexVector;
-            if (!callIndices.size())
-                return 1;
-            ASSERT(index < m_rareData->m_callReturnIndexVector.size());
-            return m_rareData->m_callReturnIndexVector[index].bytecodeOffset;
-        }
+    void getStubInfoMap(const ConcurrentJITLocker&, StubInfoMap& result);
+    void getStubInfoMap(StubInfoMap& result);
+    
+    void getCallLinkInfoMap(const ConcurrentJITLocker&, CallLinkInfoMap& result);
+    void getCallLinkInfoMap(CallLinkInfoMap& result);
+    
+#if ENABLE(JIT)
+    StructureStubInfo* addStubInfo();
+    Bag<StructureStubInfo>::iterator stubInfoBegin() { return m_stubInfos.begin(); }
+    Bag<StructureStubInfo>::iterator stubInfoEnd() { return m_stubInfos.end(); }
 
-        void unlinkCalls();
-        
-        bool hasIncomingCalls() { return m_incomingCalls.begin() != m_incomingCalls.end(); }
-        
-        void linkIncomingCall(CallLinkInfo* incoming)
-        {
-            m_incomingCalls.push(incoming);
-        }
-#if ENABLE(LLINT)
-        void linkIncomingCall(LLIntCallLinkInfo* incoming)
-        {
-            m_incomingLLIntCalls.push(incoming);
-        }
-#endif // ENABLE(LLINT)
-        
-        void unlinkIncomingCalls();
+    void resetStub(StructureStubInfo&);
+    
+    ByValInfo& getByValInfo(unsigned bytecodeIndex)
+    {
+        return *(binarySearch<ByValInfo, unsigned>(m_byValInfos, m_byValInfos.size(), bytecodeIndex, getByValInfoBytecodeIndex));
+    }
+
+    CallLinkInfo* addCallLinkInfo();
+    Bag<CallLinkInfo>::iterator callLinkInfosBegin() { return m_callLinkInfos.begin(); }
+    Bag<CallLinkInfo>::iterator callLinkInfosEnd() { return m_callLinkInfos.end(); }
+
+    // This is a slow function call used primarily for compiling OSR exits in the case
+    // that there had been inlining. Chances are if you want to use this, you're really
+    // looking for a CallLinkInfoMap to amortize the cost of calling this.
+    CallLinkInfo* getCallLinkInfoForBytecodeIndex(unsigned bytecodeIndex);
 #endif // ENABLE(JIT)
 
-#if ENABLE(DFG_JIT) || ENABLE(LLINT)
-        void setJITCodeMap(PassOwnPtr<CompactJITCodeMap> jitCodeMap)
-        {
-            m_jitCodeMap = jitCodeMap;
-        }
-        CompactJITCodeMap* jitCodeMap()
-        {
-            return m_jitCodeMap.get();
-        }
-#endif
-        
-#if ENABLE(DFG_JIT)
-        void createDFGDataIfNecessary()
-        {
-            if (!!m_dfgData)
-                return;
-            
-            m_dfgData = adoptPtr(new DFGData);
-        }
-        
-        DFG::OSREntryData* appendDFGOSREntryData(unsigned bytecodeIndex, unsigned machineCodeOffset)
-        {
-            createDFGDataIfNecessary();
-            DFG::OSREntryData entry;
-            entry.m_bytecodeIndex = bytecodeIndex;
-            entry.m_machineCodeOffset = machineCodeOffset;
-            m_dfgData->osrEntry.append(entry);
-            return &m_dfgData->osrEntry.last();
-        }
-        unsigned numberOfDFGOSREntries() const
-        {
-            if (!m_dfgData)
-                return 0;
-            return m_dfgData->osrEntry.size();
-        }
-        DFG::OSREntryData* dfgOSREntryData(unsigned i) { return &m_dfgData->osrEntry[i]; }
-        DFG::OSREntryData* dfgOSREntryDataForBytecodeIndex(unsigned bytecodeIndex)
-        {
-            return binarySearch<DFG::OSREntryData, unsigned, DFG::getOSREntryDataBytecodeIndex>(m_dfgData->osrEntry.begin(), m_dfgData->osrEntry.size(), bytecodeIndex);
-        }
-        
-        void appendOSRExit(const DFG::OSRExit& osrExit)
-        {
-            createDFGDataIfNecessary();
-            m_dfgData->osrExit.append(osrExit);
-        }
-        
-        DFG::OSRExit& lastOSRExit()
-        {
-            return m_dfgData->osrExit.last();
-        }
-        
-        void appendSpeculationRecovery(const DFG::SpeculationRecovery& recovery)
-        {
-            createDFGDataIfNecessary();
-            m_dfgData->speculationRecovery.append(recovery);
-        }
-        
-        unsigned numberOfOSRExits()
-        {
-            if (!m_dfgData)
-                return 0;
-            return m_dfgData->osrExit.size();
-        }
-        
-        unsigned numberOfSpeculationRecoveries()
-        {
-            if (!m_dfgData)
-                return 0;
-            return m_dfgData->speculationRecovery.size();
-        }
-        
-        DFG::OSRExit& osrExit(unsigned index)
-        {
-            return m_dfgData->osrExit[index];
-        }
-        
-        DFG::SpeculationRecovery& speculationRecovery(unsigned index)
-        {
-            return m_dfgData->speculationRecovery[index];
-        }
-        
-        void appendWeakReference(JSCell* target)
-        {
-            createDFGDataIfNecessary();
-            m_dfgData->weakReferences.append(WriteBarrier<JSCell>(*globalData(), ownerExecutable(), target));
-        }
-        
-        void shrinkWeakReferencesToFit()
-        {
-            if (!m_dfgData)
-                return;
-            m_dfgData->weakReferences.shrinkToFit();
-        }
+    void unlinkIncomingCalls();
+
+#if ENABLE(JIT)
+    void unlinkCalls();
         
-        void appendWeakReferenceTransition(JSCell* codeOrigin, JSCell* from, JSCell* to)
-        {
-            createDFGDataIfNecessary();
-            m_dfgData->transitions.append(
-                WeakReferenceTransition(*globalData(), ownerExecutable(), codeOrigin, from, to));
-        }
+    void linkIncomingCall(ExecState* callerFrame, CallLinkInfo*);
         
-        void shrinkWeakReferenceTransitionsToFit()
-        {
-            if (!m_dfgData)
-                return;
-            m_dfgData->transitions.shrinkToFit();
-        }
-#endif
+    bool isIncomingCallAlreadyLinked(CallLinkInfo* incoming)
+    {
+        return m_incomingCalls.isOnList(incoming);
+    }
+#endif // ENABLE(JIT)
 
-        unsigned bytecodeOffset(Instruction* returnAddress)
-        {
-            ASSERT(returnAddress >= instructions().begin() && returnAddress < instructions().end());
-            return static_cast<Instruction*>(returnAddress) - instructions().begin();
-        }
+    void linkIncomingCall(ExecState* callerFrame, LLIntCallLinkInfo*);
+
+    void setJITCodeMap(PassOwnPtr<CompactJITCodeMap> jitCodeMap)
+    {
+        m_jitCodeMap = jitCodeMap;
+    }
+    CompactJITCodeMap* jitCodeMap()
+    {
+        return m_jitCodeMap.get();
+    }
+    
+    unsigned bytecodeOffset(Instruction* returnAddress)
+    {
+        RELEASE_ASSERT(returnAddress >= instructions().begin() && returnAddress < instructions().end());
+        return static_cast<Instruction*>(returnAddress) - instructions().begin();
+    }
 
-        void setIsNumericCompareFunction(bool isNumericCompareFunction) { m_isNumericCompareFunction = isNumericCompareFunction; }
-        bool isNumericCompareFunction() { return m_isNumericCompareFunction; }
+    bool isNumericCompareFunction() { return m_unlinkedCode->isNumericCompareFunction(); }
 
-        unsigned numberOfInstructions() const { return m_instructions.size(); }
-        RefCountedArray<Instruction>& instructions() { return m_instructions; }
-        const RefCountedArray<Instruction>& instructions() const { return m_instructions; }
-        
-        size_t predictedMachineCodeSize();
-        
-        bool usesOpcode(OpcodeID);
+    unsigned numberOfInstructions() const { return m_instructions.size(); }
+    RefCountedArray<Instruction>& instructions() { return m_instructions; }
+    const RefCountedArray<Instruction>& instructions() const { return m_instructions; }
 
-        unsigned instructionCount() { return m_instructions.size(); }
+    size_t predictedMachineCodeSize();
 
+    bool usesOpcode(OpcodeID);
+
+    unsigned instructionCount() const { return m_instructions.size(); }
+
+    int argumentIndexAfterCapture(size_t argument);
+    
+    bool hasSlowArguments();
+    const SlowArgument* machineSlowArguments();
+
+    // Exactly equivalent to codeBlock->ownerExecutable()->installCode(codeBlock);
+    void install();
+    
+    // Exactly equivalent to codeBlock->ownerExecutable()->newReplacementCodeBlockFor(codeBlock->specializationKind())
+    PassRefPtr<CodeBlock> newReplacement();
+    
+    void setJITCode(PassRefPtr<JITCode> code)
+    {
+        ASSERT(m_heap->isDeferred());
+        m_heap->reportExtraMemoryCost(code->size());
+        ConcurrentJITLocker locker(m_lock);
+        WTF::storeStoreFence(); // This is probably not needed because the lock will also do something similar, but it's good to be paranoid.
+        m_jitCode = code;
+    }
+    PassRefPtr<JITCode> jitCode() { return m_jitCode; }
+    JITCode::JITType jitType() const
+    {
+        JITCode* jitCode = m_jitCode.get();
+        WTF::loadLoadFence();
+        JITCode::JITType result = JITCode::jitTypeFor(jitCode);
+        WTF::loadLoadFence(); // This probably isn't needed. Oh well, paranoia is good.
+        return result;
+    }
+
+    bool hasBaselineJITProfiling() const
+    {
+        return jitType() == JITCode::BaselineJIT;
+    }
+    
 #if ENABLE(JIT)
-        void setJITCode(const JITCode& code, MacroAssemblerCodePtr codeWithArityCheck)
-        {
-            m_jitCode = code;
-            m_jitCodeWithArityCheck = codeWithArityCheck;
-#if ENABLE(DFG_JIT)
-            if (m_jitCode.jitType() == JITCode::DFGJIT) {
-                createDFGDataIfNecessary();
-                m_globalData->heap.m_dfgCodeBlocks.m_set.add(this);
-            }
-#endif
-        }
-        JITCode& getJITCode() { return m_jitCode; }
-        MacroAssemblerCodePtr getJITCodeWithArityCheck() { return m_jitCodeWithArityCheck; }
-        JITCode::JITType getJITType() { return m_jitCode.jitType(); }
-        ExecutableMemoryHandle* executableMemory() { return getJITCode().getExecutableMemory(); }
-        virtual JSObject* compileOptimized(ExecState*, ScopeChainNode*) = 0;
-        virtual void jettison() = 0;
-        enum JITCompilationResult { AlreadyCompiled, CouldNotCompile, CompiledSuccessfully };
-        JITCompilationResult jitCompile(JSGlobalData& globalData)
-        {
-            if (getJITType() != JITCode::InterpreterThunk) {
-                ASSERT(getJITType() == JITCode::BaselineJIT);
-                return AlreadyCompiled;
-            }
-#if ENABLE(JIT)
-            if (jitCompileImpl(globalData))
-                return CompiledSuccessfully;
-            return CouldNotCompile;
-#else
-            UNUSED_PARAM(globalData);
-            return CouldNotCompile;
+    virtual CodeBlock* replacement() = 0;
+
+    virtual DFG::CapabilityLevel capabilityLevelInternal() = 0;
+    DFG::CapabilityLevel capabilityLevel();
+    DFG::CapabilityLevel capabilityLevelState() { return m_capabilityLevelState; }
+
+    bool hasOptimizedReplacement(JITCode::JITType typeToReplace);
+    bool hasOptimizedReplacement(); // the typeToReplace is my JITType
 #endif
-        }
-        virtual CodeBlock* replacement() = 0;
 
-        enum CompileWithDFGState {
-            CompileWithDFGFalse,
-            CompileWithDFGTrue,
-            CompileWithDFGUnset
-        };
+    void jettison(Profiler::JettisonReason, ReoptimizationMode = DontCountReoptimization);
+    
+    ScriptExecutable* ownerExecutable() const { return m_ownerExecutable.get(); }
 
-        virtual bool canCompileWithDFGInternal() = 0;
-        bool canCompileWithDFG()
-        {
-            bool result = canCompileWithDFGInternal();
-            m_canCompileWithDFGState = result ? CompileWithDFGTrue : CompileWithDFGFalse;
-            return result;
-        }
-        CompileWithDFGState canCompileWithDFGState() { return m_canCompileWithDFGState; }
+    void setVM(VM* vm) { m_vm = vm; }
+    VM* vm() { return m_vm; }
 
-        bool hasOptimizedReplacement()
-        {
-            ASSERT(JITCode::isBaselineCode(getJITType()));
-            bool result = replacement()->getJITType() > getJITType();
-#if !ASSERT_DISABLED
-            if (result)
-                ASSERT(replacement()->getJITType() == JITCode::DFGJIT);
-            else {
-                ASSERT(JITCode::isBaselineCode(replacement()->getJITType()));
-                ASSERT(replacement() == this);
-            }
-#endif
-            return result;
-        }
-#else
-        JITCode::JITType getJITType() { return JITCode::BaselineJIT; }
-#endif
+    void setThisRegister(VirtualRegister thisRegister) { m_thisRegister = thisRegister; }
+    VirtualRegister thisRegister() const { return m_thisRegister; }
 
-        ScriptExecutable* ownerExecutable() const { return m_ownerExecutable.get(); }
+    bool usesEval() const { return m_unlinkedCode->usesEval(); }
 
-        void setGlobalData(JSGlobalData* globalData) { m_globalData = globalData; }
-        JSGlobalData* globalData() { return m_globalData; }
+    void setArgumentsRegister(VirtualRegister argumentsRegister)
+    {
+        ASSERT(argumentsRegister.isValid());
+        m_argumentsRegister = argumentsRegister;
+        ASSERT(usesArguments());
+    }
+    VirtualRegister argumentsRegister() const
+    {
+        ASSERT(usesArguments());
+        return m_argumentsRegister;
+    }
+    VirtualRegister uncheckedArgumentsRegister()
+    {
+        if (!usesArguments())
+            return VirtualRegister();
+        return argumentsRegister();
+    }
+    void setActivationRegister(VirtualRegister activationRegister)
+    {
+        m_activationRegister = activationRegister;
+    }
 
-        void setThisRegister(int thisRegister) { m_thisRegister = thisRegister; }
-        int thisRegister() const { return m_thisRegister; }
+    VirtualRegister activationRegister() const
+    {
+        ASSERT(m_activationRegister.isValid());
+        return m_activationRegister;
+    }
 
-        void setNeedsFullScopeChain(bool needsFullScopeChain) { m_needsFullScopeChain = needsFullScopeChain; }
-        bool needsFullScopeChain() const { return m_needsFullScopeChain; }
-        void setUsesEval(bool usesEval) { m_usesEval = usesEval; }
-        bool usesEval() const { return m_usesEval; }
-        
-        void setArgumentsRegister(int argumentsRegister)
-        {
-            ASSERT(argumentsRegister != -1);
-            m_argumentsRegister = argumentsRegister;
-            ASSERT(usesArguments());
-        }
-        int argumentsRegister()
-        {
-            ASSERT(usesArguments());
-            return m_argumentsRegister;
-        }
-        void setActivationRegister(int activationRegister)
-        {
-            m_activationRegister = activationRegister;
-        }
-        int activationRegister()
-        {
-            ASSERT(needsFullScopeChain());
-            return m_activationRegister;
-        }
-        bool usesArguments() const { return m_argumentsRegister != -1; }
+    VirtualRegister uncheckedActivationRegister()
+    {
+        return m_activationRegister;
+    }
 
-        CodeType codeType() const { return m_codeType; }
+    bool usesArguments() const { return m_argumentsRegister.isValid(); }
 
-        SourceProvider* source() const { return m_source.get(); }
-        unsigned sourceOffset() const { return m_sourceOffset; }
+    bool needsActivation() const
+    {
+        ASSERT(m_activationRegister.isValid() == m_needsActivation);
+        return m_needsActivation;
+    }
+    
+    unsigned captureCount() const
+    {
+        if (!symbolTable())
+            return 0;
+        return symbolTable()->captureCount();
+    }
+    
+    int captureStart() const
+    {
+        if (!symbolTable())
+            return 0;
+        return symbolTable()->captureStart();
+    }
+    
+    int captureEnd() const
+    {
+        if (!symbolTable())
+            return 0;
+        return symbolTable()->captureEnd();
+    }
 
-        size_t numberOfJumpTargets() const { return m_jumpTargets.size(); }
-        void addJumpTarget(unsigned jumpTarget) { m_jumpTargets.append(jumpTarget); }
-        unsigned jumpTarget(int index) const { return m_jumpTargets[index]; }
-        unsigned lastJumpTarget() const { return m_jumpTargets.last(); }
+    bool isCaptured(VirtualRegister operand, InlineCallFrame* = 0) const;
+    
+    int framePointerOffsetToGetActivationRegisters(int machineCaptureStart);
+    int framePointerOffsetToGetActivationRegisters();
 
-        void createActivation(CallFrame*);
+    CodeType codeType() const { return m_unlinkedCode->codeType(); }
+    PutPropertySlot::Context putByIdContext() const
+    {
+        if (codeType() == EvalCode)
+            return PutPropertySlot::PutByIdEval;
+        return PutPropertySlot::PutById;
+    }
 
-        void clearEvalCache();
+    SourceProvider* source() const { return m_source.get(); }
+    unsigned sourceOffset() const { return m_sourceOffset; }
+    unsigned firstLineColumnOffset() const { return m_firstLineColumnOffset; }
 
-        void addPropertyAccessInstruction(unsigned propertyAccessInstruction)
-        {
-            m_propertyAccessInstructions.append(propertyAccessInstruction);
-        }
-        void addGlobalResolveInstruction(unsigned globalResolveInstruction)
-        {
-            m_globalResolveInstructions.append(globalResolveInstruction);
-        }
-        bool hasGlobalResolveInstructionAtBytecodeOffset(unsigned bytecodeOffset);
-#if ENABLE(LLINT)
-        LLIntCallLinkInfo* addLLIntCallLinkInfo()
-        {
-            m_llintCallLinkInfos.append(LLIntCallLinkInfo());
-            return &m_llintCallLinkInfos.last();
-        }
-#endif
-#if ENABLE(JIT)
-        void setNumberOfStructureStubInfos(size_t size) { m_structureStubInfos.grow(size); }
-        size_t numberOfStructureStubInfos() const { return m_structureStubInfos.size(); }
-        StructureStubInfo& structureStubInfo(int index) { return m_structureStubInfos[index]; }
+    size_t numberOfJumpTargets() const { return m_unlinkedCode->numberOfJumpTargets(); }
+    unsigned jumpTarget(int index) const { return m_unlinkedCode->jumpTarget(index); }
 
-        void addGlobalResolveInfo(unsigned globalResolveInstruction)
-        {
-            m_globalResolveInfos.append(GlobalResolveInfo(globalResolveInstruction));
-        }
-        GlobalResolveInfo& globalResolveInfo(int index) { return m_globalResolveInfos[index]; }
-        bool hasGlobalResolveInfoAtBytecodeOffset(unsigned bytecodeOffset);
+    void clearEvalCache();
 
-        void setNumberOfCallLinkInfos(size_t size) { m_callLinkInfos.grow(size); }
-        size_t numberOfCallLinkInfos() const { return m_callLinkInfos.size(); }
-        CallLinkInfo& callLinkInfo(int index) { return m_callLinkInfos[index]; }
+    String nameForRegister(VirtualRegister);
 
-        void addMethodCallLinkInfos(unsigned n) { ASSERT(m_globalData->canUseJIT()); m_methodCallLinkInfos.grow(n); }
-        MethodCallLinkInfo& methodCallLinkInfo(int index) { return m_methodCallLinkInfos[index]; }
-        size_t numberOfMethodCallLinkInfos() { return m_methodCallLinkInfos.size(); }
-#endif
-        
-#if ENABLE(VALUE_PROFILER)
-        unsigned numberOfArgumentValueProfiles()
-        {
-            ASSERT(m_numParameters >= 0);
-            ASSERT(m_argumentValueProfiles.size() == static_cast<unsigned>(m_numParameters));
-            return m_argumentValueProfiles.size();
-        }
-        ValueProfile* valueProfileForArgument(unsigned argumentIndex)
-        {
-            ValueProfile* result = &m_argumentValueProfiles[argumentIndex];
-            ASSERT(result->m_bytecodeOffset == -1);
-            return result;
-        }
-        
-        ValueProfile* addValueProfile(int bytecodeOffset)
-        {
-            ASSERT(bytecodeOffset != -1);
-            ASSERT(m_valueProfiles.isEmpty() || m_valueProfiles.last().m_bytecodeOffset < bytecodeOffset);
-            m_valueProfiles.append(ValueProfile(bytecodeOffset));
-            return &m_valueProfiles.last();
-        }
-        unsigned numberOfValueProfiles() { return m_valueProfiles.size(); }
-        ValueProfile* valueProfile(int index)
-        {
-            ValueProfile* result = &m_valueProfiles[index];
-            ASSERT(result->m_bytecodeOffset != -1);
-            return result;
-        }
-        ValueProfile* valueProfileForBytecodeOffset(int bytecodeOffset)
-        {
-            ValueProfile* result = WTF::genericBinarySearch<ValueProfile, int, getValueProfileBytecodeOffset>(m_valueProfiles, m_valueProfiles.size(), bytecodeOffset);
-            ASSERT(result->m_bytecodeOffset != -1);
-            ASSERT(instructions()[bytecodeOffset + opcodeLength(
-                       m_globalData->interpreter->getOpcodeID(
-                           instructions()[
-                               bytecodeOffset].u.opcode)) - 1].u.profile == result);
-            return result;
-        }
-        PredictedType valueProfilePredictionForBytecodeOffset(int bytecodeOffset)
-        {
-            return valueProfileForBytecodeOffset(bytecodeOffset)->computeUpdatedPrediction();
-        }
-        
-        unsigned totalNumberOfValueProfiles()
-        {
-            return numberOfArgumentValueProfiles() + numberOfValueProfiles();
-        }
-        ValueProfile* getFromAllValueProfiles(unsigned index)
-        {
-            if (index < numberOfArgumentValueProfiles())
-                return valueProfileForArgument(index);
-            return valueProfile(index - numberOfArgumentValueProfiles());
-        }
-        
-        RareCaseProfile* addRareCaseProfile(int bytecodeOffset)
-        {
-            m_rareCaseProfiles.append(RareCaseProfile(bytecodeOffset));
-            return &m_rareCaseProfiles.last();
-        }
-        unsigned numberOfRareCaseProfiles() { return m_rareCaseProfiles.size(); }
-        RareCaseProfile* rareCaseProfile(int index) { return &m_rareCaseProfiles[index]; }
-        RareCaseProfile* rareCaseProfileForBytecodeOffset(int bytecodeOffset)
-        {
-            return WTF::genericBinarySearch<RareCaseProfile, int, getRareCaseProfileBytecodeOffset>(m_rareCaseProfiles, m_rareCaseProfiles.size(), bytecodeOffset);
-        }
-        
-        bool likelyToTakeSlowCase(int bytecodeOffset)
-        {
-            if (!numberOfRareCaseProfiles())
-                return false;
-            unsigned value = rareCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
-            return value >= Options::likelyToTakeSlowCaseMinimumCount && static_cast<double>(value) / m_executionEntryCount >= Options::likelyToTakeSlowCaseThreshold;
-        }
-        
-        bool couldTakeSlowCase(int bytecodeOffset)
-        {
-            if (!numberOfRareCaseProfiles())
-                return false;
-            unsigned value = rareCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
-            return value >= Options::couldTakeSlowCaseMinimumCount && static_cast<double>(value) / m_executionEntryCount >= Options::couldTakeSlowCaseThreshold;
-        }
-        
-        RareCaseProfile* addSpecialFastCaseProfile(int bytecodeOffset)
-        {
-            m_specialFastCaseProfiles.append(RareCaseProfile(bytecodeOffset));
-            return &m_specialFastCaseProfiles.last();
-        }
-        unsigned numberOfSpecialFastCaseProfiles() { return m_specialFastCaseProfiles.size(); }
-        RareCaseProfile* specialFastCaseProfile(int index) { return &m_specialFastCaseProfiles[index]; }
-        RareCaseProfile* specialFastCaseProfileForBytecodeOffset(int bytecodeOffset)
-        {
-            return WTF::genericBinarySearch<RareCaseProfile, int, getRareCaseProfileBytecodeOffset>(m_specialFastCaseProfiles, m_specialFastCaseProfiles.size(), bytecodeOffset);
-        }
-        
-        bool likelyToTakeSpecialFastCase(int bytecodeOffset)
-        {
-            if (!numberOfRareCaseProfiles())
-                return false;
-            unsigned specialFastCaseCount = specialFastCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
-            return specialFastCaseCount >= Options::likelyToTakeSlowCaseMinimumCount && static_cast<double>(specialFastCaseCount) / m_executionEntryCount >= Options::likelyToTakeSlowCaseThreshold;
-        }
-        
-        bool likelyToTakeDeepestSlowCase(int bytecodeOffset)
-        {
-            if (!numberOfRareCaseProfiles())
-                return false;
-            unsigned slowCaseCount = rareCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
-            unsigned specialFastCaseCount = specialFastCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
-            unsigned value = slowCaseCount - specialFastCaseCount;
-            return value >= Options::likelyToTakeSlowCaseMinimumCount && static_cast<double>(value) / m_executionEntryCount >= Options::likelyToTakeSlowCaseThreshold;
-        }
-        
-        bool likelyToTakeAnySlowCase(int bytecodeOffset)
-        {
-            if (!numberOfRareCaseProfiles())
-                return false;
-            unsigned slowCaseCount = rareCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
-            unsigned specialFastCaseCount = specialFastCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
-            unsigned value = slowCaseCount + specialFastCaseCount;
-            return value >= Options::likelyToTakeSlowCaseMinimumCount && static_cast<double>(value) / m_executionEntryCount >= Options::likelyToTakeSlowCaseThreshold;
-        }
-        
-        unsigned executionEntryCount() const { return m_executionEntryCount; }
+#if ENABLE(JIT)
+    void setNumberOfByValInfos(size_t size) { m_byValInfos.resizeToFit(size); }
+    size_t numberOfByValInfos() const { return m_byValInfos.size(); }
+    ByValInfo& byValInfo(size_t index) { return m_byValInfos[index]; }
 #endif
 
-        unsigned globalResolveInfoCount() const
-        {
-#if ENABLE(JIT)    
-            if (m_globalData->canUseJIT())
-                return m_globalResolveInfos.size();
-#endif
-            return 0;
-        }
+    unsigned numberOfArgumentValueProfiles()
+    {
+        ASSERT(m_numParameters >= 0);
+        ASSERT(m_argumentValueProfiles.size() == static_cast<unsigned>(m_numParameters));
+        return m_argumentValueProfiles.size();
+    }
+    ValueProfile* valueProfileForArgument(unsigned argumentIndex)
+    {
+        ValueProfile* result = &m_argumentValueProfiles[argumentIndex];
+        ASSERT(result->m_bytecodeOffset == -1);
+        return result;
+    }
 
-        // Exception handling support
+    unsigned numberOfValueProfiles() { return m_valueProfiles.size(); }
+    ValueProfile* valueProfile(int index) { return &m_valueProfiles[index]; }
+    ValueProfile* valueProfileForBytecodeOffset(int bytecodeOffset)
+    {
+        ValueProfile* result = binarySearch<ValueProfile, int>(
+            m_valueProfiles, m_valueProfiles.size(), bytecodeOffset,
+            getValueProfileBytecodeOffset<ValueProfile>);
+        ASSERT(result->m_bytecodeOffset != -1);
+        ASSERT(instructions()[bytecodeOffset + opcodeLength(
+            m_vm->interpreter->getOpcodeID(
+                instructions()[bytecodeOffset].u.opcode)) - 1].u.profile == result);
+        return result;
+    }
+    SpeculatedType valueProfilePredictionForBytecodeOffset(const ConcurrentJITLocker& locker, int bytecodeOffset)
+    {
+        return valueProfileForBytecodeOffset(bytecodeOffset)->computeUpdatedPrediction(locker);
+    }
 
-        size_t numberOfExceptionHandlers() const { return m_rareData ? m_rareData->m_exceptionHandlers.size() : 0; }
-        void addExceptionHandler(const HandlerInfo& hanler) { createRareDataIfNecessary(); return m_rareData->m_exceptionHandlers.append(hanler); }
-        HandlerInfo& exceptionHandler(int index) { ASSERT(m_rareData); return m_rareData->m_exceptionHandlers[index]; }
+    unsigned totalNumberOfValueProfiles()
+    {
+        return numberOfArgumentValueProfiles() + numberOfValueProfiles();
+    }
+    ValueProfile* getFromAllValueProfiles(unsigned index)
+    {
+        if (index < numberOfArgumentValueProfiles())
+            return valueProfileForArgument(index);
+        return valueProfile(index - numberOfArgumentValueProfiles());
+    }
 
-        void addExpressionInfo(const ExpressionRangeInfo& expressionInfo)
-        {
-            createRareDataIfNecessary();
-            m_rareData->m_expressionInfo.append(expressionInfo);
-        }
+    RareCaseProfile* addRareCaseProfile(int bytecodeOffset)
+    {
+        m_rareCaseProfiles.append(RareCaseProfile(bytecodeOffset));
+        return &m_rareCaseProfiles.last();
+    }
+    unsigned numberOfRareCaseProfiles() { return m_rareCaseProfiles.size(); }
+    RareCaseProfile* rareCaseProfile(int index) { return &m_rareCaseProfiles[index]; }
+    RareCaseProfile* rareCaseProfileForBytecodeOffset(int bytecodeOffset);
 
-        void addLineInfo(unsigned bytecodeOffset, int lineNo)
-        {
-            createRareDataIfNecessary();
-            Vector<LineInfo>& lineInfo = m_rareData->m_lineInfo;
-            if (!lineInfo.size() || lineInfo.last().lineNumber != lineNo) {
-                LineInfo info = { bytecodeOffset, lineNo };
-                lineInfo.append(info);
-            }
-        }
+    bool likelyToTakeSlowCase(int bytecodeOffset)
+    {
+        if (!hasBaselineJITProfiling())
+            return false;
+        unsigned value = rareCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
+        return value >= Options::likelyToTakeSlowCaseMinimumCount();
+    }
 
-        bool hasExpressionInfo() { return m_rareData && m_rareData->m_expressionInfo.size(); }
-        bool hasLineInfo() { return m_rareData && m_rareData->m_lineInfo.size(); }
-        //  We only generate exception handling info if the user is debugging
-        // (and may want line number info), or if the function contains exception handler.
-        bool needsCallReturnIndices()
-        {
-            return m_rareData &&
-                (m_rareData->m_expressionInfo.size() || m_rareData->m_lineInfo.size() || m_rareData->m_exceptionHandlers.size());
-        }
+    bool couldTakeSlowCase(int bytecodeOffset)
+    {
+        if (!hasBaselineJITProfiling())
+            return false;
+        unsigned value = rareCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
+        return value >= Options::couldTakeSlowCaseMinimumCount();
+    }
 
-#if ENABLE(JIT)
-        Vector<CallReturnOffsetToBytecodeOffset>& callReturnIndexVector()
-        {
-            createRareDataIfNecessary();
-            return m_rareData->m_callReturnIndexVector;
-        }
-#endif
+    RareCaseProfile* addSpecialFastCaseProfile(int bytecodeOffset)
+    {
+        m_specialFastCaseProfiles.append(RareCaseProfile(bytecodeOffset));
+        return &m_specialFastCaseProfiles.last();
+    }
+    unsigned numberOfSpecialFastCaseProfiles() { return m_specialFastCaseProfiles.size(); }
+    RareCaseProfile* specialFastCaseProfile(int index) { return &m_specialFastCaseProfiles[index]; }
+    RareCaseProfile* specialFastCaseProfileForBytecodeOffset(int bytecodeOffset)
+    {
+        return tryBinarySearch<RareCaseProfile, int>(
+            m_specialFastCaseProfiles, m_specialFastCaseProfiles.size(), bytecodeOffset,
+            getRareCaseProfileBytecodeOffset);
+    }
+
+    bool likelyToTakeSpecialFastCase(int bytecodeOffset)
+    {
+        if (!hasBaselineJITProfiling())
+            return false;
+        unsigned specialFastCaseCount = specialFastCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
+        return specialFastCaseCount >= Options::likelyToTakeSlowCaseMinimumCount();
+    }
+
+    bool couldTakeSpecialFastCase(int bytecodeOffset)
+    {
+        if (!hasBaselineJITProfiling())
+            return false;
+        unsigned specialFastCaseCount = specialFastCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
+        return specialFastCaseCount >= Options::couldTakeSlowCaseMinimumCount();
+    }
+
+    bool likelyToTakeDeepestSlowCase(int bytecodeOffset)
+    {
+        if (!hasBaselineJITProfiling())
+            return false;
+        unsigned slowCaseCount = rareCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
+        unsigned specialFastCaseCount = specialFastCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
+        unsigned value = slowCaseCount - specialFastCaseCount;
+        return value >= Options::likelyToTakeSlowCaseMinimumCount();
+    }
+
+    bool likelyToTakeAnySlowCase(int bytecodeOffset)
+    {
+        if (!hasBaselineJITProfiling())
+            return false;
+        unsigned slowCaseCount = rareCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
+        unsigned specialFastCaseCount = specialFastCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
+        unsigned value = slowCaseCount + specialFastCaseCount;
+        return value >= Options::likelyToTakeSlowCaseMinimumCount();
+    }
+
+    unsigned numberOfArrayProfiles() const { return m_arrayProfiles.size(); }
+    const ArrayProfileVector& arrayProfiles() { return m_arrayProfiles; }
+    ArrayProfile* addArrayProfile(unsigned bytecodeOffset)
+    {
+        m_arrayProfiles.append(ArrayProfile(bytecodeOffset));
+        return &m_arrayProfiles.last();
+    }
+    ArrayProfile* getArrayProfile(unsigned bytecodeOffset);
+    ArrayProfile* getOrAddArrayProfile(unsigned bytecodeOffset);
+
+    // Exception handling support
+
+    size_t numberOfExceptionHandlers() const { return m_rareData ? m_rareData->m_exceptionHandlers.size() : 0; }
+    HandlerInfo& exceptionHandler(int index) { RELEASE_ASSERT(m_rareData); return m_rareData->m_exceptionHandlers[index]; }
+
+    bool hasExpressionInfo() { return m_unlinkedCode->hasExpressionInfo(); }
 
 #if ENABLE(DFG_JIT)
-        SegmentedVector<InlineCallFrame, 4>& inlineCallFrames()
-        {
-            createRareDataIfNecessary();
-            return m_rareData->m_inlineCallFrames;
-        }
-        
-        Vector<CodeOriginAtCallReturnOffset>& codeOrigins()
-        {
-            createRareDataIfNecessary();
-            return m_rareData->m_codeOrigins;
-        }
-        
-        // Having code origins implies that there has been some inlining.
-        bool hasCodeOrigins()
-        {
-            return m_rareData && !!m_rareData->m_codeOrigins.size();
-        }
-        
-        bool codeOriginForReturn(ReturnAddressPtr returnAddress, CodeOrigin& codeOrigin)
-        {
-            if (!hasCodeOrigins())
-                return false;
-            unsigned offset = getJITCode().offsetOf(returnAddress.value());
-            CodeOriginAtCallReturnOffset* entry = binarySearch<CodeOriginAtCallReturnOffset, unsigned, getCallReturnOffsetForCodeOrigin>(codeOrigins().begin(), codeOrigins().size(), offset, WTF::KeyMustNotBePresentInArray);
-            if (entry->callReturnOffset != offset)
-                return false;
-            codeOrigin = entry->codeOrigin;
-            return true;
-        }
-        
-        CodeOrigin codeOrigin(unsigned index)
-        {
-            ASSERT(m_rareData);
-            return m_rareData->m_codeOrigins[index].codeOrigin;
-        }
+    Vector<CodeOrigin, 0, UnsafeVectorOverflow>& codeOrigins()
+    {
+        return m_jitCode->dfgCommon()->codeOrigins;
+    }
+    
+    // Having code origins implies that there has been some inlining.
+    bool hasCodeOrigins()
+    {
+        return JITCode::isOptimizingJIT(jitType());
+    }
         
-        bool addFrequentExitSite(const DFG::FrequentExitSite& site)
-        {
-            ASSERT(JITCode::isBaselineCode(getJITType()));
-            return m_exitProfile.add(site);
-        }
+    bool canGetCodeOrigin(unsigned index)
+    {
+        if (!hasCodeOrigins())
+            return false;
+        return index < codeOrigins().size();
+    }
 
-        DFG::ExitProfile& exitProfile() { return m_exitProfile; }
-        
-        CompressedLazyOperandValueProfileHolder& lazyOperandValueProfiles()
-        {
-            return m_lazyOperandValueProfiles;
-        }
+    CodeOrigin codeOrigin(unsigned index)
+    {
+        return codeOrigins()[index];
+    }
+
+    bool addFrequentExitSite(const DFG::FrequentExitSite& site)
+    {
+        ASSERT(JITCode::isBaselineCode(jitType()));
+        ConcurrentJITLocker locker(m_lock);
+        return m_exitProfile.add(locker, site);
+    }
+
+    bool hasExitSite(const ConcurrentJITLocker& locker, const DFG::FrequentExitSite& site) const
+    {
+        return m_exitProfile.hasExitSite(locker, site);
+    }
+    bool hasExitSite(const DFG::FrequentExitSite& site) const
+    {
+        ConcurrentJITLocker locker(m_lock);
+        return hasExitSite(locker, site);
+    }
+
+    DFG::ExitProfile& exitProfile() { return m_exitProfile; }
+
+    CompressedLazyOperandValueProfileHolder& lazyOperandValueProfiles()
+    {
+        return m_lazyOperandValueProfiles;
+    }
+#endif // ENABLE(DFG_JIT)
+
+    // Constant Pool
+#if ENABLE(DFG_JIT)
+    size_t numberOfIdentifiers() const { return m_unlinkedCode->numberOfIdentifiers() + numberOfDFGIdentifiers(); }
+    size_t numberOfDFGIdentifiers() const
+    {
+        if (!JITCode::isOptimizingJIT(jitType()))
+            return 0;
+
+        return m_jitCode->dfgCommon()->dfgIdentifiers.size();
+    }
+
+    const Identifier& identifier(int index) const
+    {
+        size_t unlinkedIdentifiers = m_unlinkedCode->numberOfIdentifiers();
+        if (static_cast<unsigned>(index) < unlinkedIdentifiers)
+            return m_unlinkedCode->identifier(index);
+        ASSERT(JITCode::isOptimizingJIT(jitType()));
+        return m_jitCode->dfgCommon()->dfgIdentifiers[index - unlinkedIdentifiers];
+    }
+#else
+    size_t numberOfIdentifiers() const { return m_unlinkedCode->numberOfIdentifiers(); }
+    const Identifier& identifier(int index) const { return m_unlinkedCode->identifier(index); }
 #endif
 
-        // Constant Pool
+    Vector<WriteBarrier<Unknown>>& constants() { return m_constantRegisters; }
+    size_t numberOfConstantRegisters() const { return m_constantRegisters.size(); }
+    unsigned addConstant(JSValue v)
+    {
+        unsigned result = m_constantRegisters.size();
+        m_constantRegisters.append(WriteBarrier<Unknown>());
+        m_constantRegisters.last().set(m_globalObject->vm(), m_ownerExecutable.get(), v);
+        return result;
+    }
 
-        size_t numberOfIdentifiers() const { return m_identifiers.size(); }
-        void addIdentifier(const Identifier& i) { return m_identifiers.append(i); }
-        Identifier& identifier(int index) { return m_identifiers[index]; }
+    unsigned addConstantLazily()
+    {
+        unsigned result = m_constantRegisters.size();
+        m_constantRegisters.append(WriteBarrier<Unknown>());
+        return result;
+    }
 
-        size_t numberOfConstantRegisters() const { return m_constantRegisters.size(); }
-        unsigned addConstant(JSValue v)
-        {
-            unsigned result = m_constantRegisters.size();
-            m_constantRegisters.append(WriteBarrier<Unknown>());
-            m_constantRegisters.last().set(m_globalObject->globalData(), m_ownerExecutable.get(), v);
-            return result;
-        }
-        unsigned addOrFindConstant(JSValue);
-        WriteBarrier<Unknown>& constantRegister(int index) { return m_constantRegisters[index - FirstConstantRegisterIndex]; }
-        ALWAYS_INLINE bool isConstantRegisterIndex(int index) const { return index >= FirstConstantRegisterIndex; }
-        ALWAYS_INLINE JSValue getConstant(int index) const { return m_constantRegisters[index - FirstConstantRegisterIndex].get(); }
+    bool findConstant(JSValue, unsigned& result);
+    unsigned addOrFindConstant(JSValue);
+    WriteBarrier<Unknown>& constantRegister(int index) { return m_constantRegisters[index - FirstConstantRegisterIndex]; }
+    ALWAYS_INLINE bool isConstantRegisterIndex(int index) const { return index >= FirstConstantRegisterIndex; }
+    ALWAYS_INLINE JSValue getConstant(int index) const { return m_constantRegisters[index - FirstConstantRegisterIndex].get(); }
 
-        unsigned addFunctionDecl(FunctionExecutable* n)
-        {
-            unsigned size = m_functionDecls.size();
-            m_functionDecls.append(WriteBarrier<FunctionExecutable>());
-            m_functionDecls.last().set(m_globalObject->globalData(), m_ownerExecutable.get(), n);
-            return size;
-        }
-        FunctionExecutable* functionDecl(int index) { return m_functionDecls[index].get(); }
-        int numberOfFunctionDecls() { return m_functionDecls.size(); }
-        unsigned addFunctionExpr(FunctionExecutable* n)
-        {
-            unsigned size = m_functionExprs.size();
-            m_functionExprs.append(WriteBarrier<FunctionExecutable>());
-            m_functionExprs.last().set(m_globalObject->globalData(), m_ownerExecutable.get(), n);
-            return size;
-        }
-        FunctionExecutable* functionExpr(int index) { return m_functionExprs[index].get(); }
+    FunctionExecutable* functionDecl(int index) { return m_functionDecls[index].get(); }
+    int numberOfFunctionDecls() { return m_functionDecls.size(); }
+    FunctionExecutable* functionExpr(int index) { return m_functionExprs[index].get(); }
 
-        unsigned addRegExp(RegExp* r)
-        {
-            createRareDataIfNecessary();
-            unsigned size = m_rareData->m_regexps.size();
-            m_rareData->m_regexps.append(WriteBarrier<RegExp>(*m_globalData, ownerExecutable(), r));
-            return size;
-        }
-        unsigned numberOfRegExps() const
-        {
-            if (!m_rareData)
-                return 0;
-            return m_rareData->m_regexps.size();
-        }
-        RegExp* regexp(int index) const { ASSERT(m_rareData); return m_rareData->m_regexps[index].get(); }
+    RegExp* regexp(int index) const { return m_unlinkedCode->regexp(index); }
 
-        unsigned addConstantBuffer(unsigned length)
-        {
-            createRareDataIfNecessary();
-            unsigned size = m_rareData->m_constantBuffers.size();
-            m_rareData->m_constantBuffers.append(Vector<JSValue>(length));
-            return size;
-        }
+    unsigned numberOfConstantBuffers() const
+    {
+        if (!m_rareData)
+            return 0;
+        return m_rareData->m_constantBuffers.size();
+    }
+    unsigned addConstantBuffer(const Vector<JSValue>& buffer)
+    {
+        createRareDataIfNecessary();
+        unsigned size = m_rareData->m_constantBuffers.size();
+        m_rareData->m_constantBuffers.append(buffer);
+        return size;
+    }
+
+    Vector<JSValue>& constantBufferAsVector(unsigned index)
+    {
+        ASSERT(m_rareData);
+        return m_rareData->m_constantBuffers[index];
+    }
+    JSValue* constantBuffer(unsigned index)
+    {
+        return constantBufferAsVector(index).data();
+    }
 
-        JSValue* constantBuffer(unsigned index)
+    Heap* heap() const { return m_heap; }
+    JSGlobalObject* globalObject() { return m_globalObject.get(); }
+
+    JSGlobalObject* globalObjectFor(CodeOrigin);
+
+    BytecodeLivenessAnalysis& livenessAnalysis()
+    {
         {
-            ASSERT(m_rareData);
-            return m_rareData->m_constantBuffers[index].data();
+            ConcurrentJITLocker locker(m_lock);
+            if (!!m_livenessAnalysis)
+                return *m_livenessAnalysis;
         }
-
-        JSGlobalObject* globalObject() { return m_globalObject.get(); }
-        
-        JSGlobalObject* globalObjectFor(CodeOrigin codeOrigin)
+        std::unique_ptr<BytecodeLivenessAnalysis> analysis =
+            std::make_unique<BytecodeLivenessAnalysis>(this);
         {
-            if (!codeOrigin.inlineCallFrame)
-                return globalObject();
-            // FIXME: if we ever inline based on executable not function, this code will need to change.
-            return codeOrigin.inlineCallFrame->callee->scope()->globalObject.get();
+            ConcurrentJITLocker locker(m_lock);
+            if (!m_livenessAnalysis)
+                m_livenessAnalysis = WTF::move(analysis);
+            return *m_livenessAnalysis;
         }
+    }
+    
+    void validate();
 
-        // Jump Tables
+    // Jump Tables
 
-        size_t numberOfImmediateSwitchJumpTables() const { return m_rareData ? m_rareData->m_immediateSwitchJumpTables.size() : 0; }
-        SimpleJumpTable& addImmediateSwitchJumpTable() { createRareDataIfNecessary(); m_rareData->m_immediateSwitchJumpTables.append(SimpleJumpTable()); return m_rareData->m_immediateSwitchJumpTables.last(); }
-        SimpleJumpTable& immediateSwitchJumpTable(int tableIndex) { ASSERT(m_rareData); return m_rareData->m_immediateSwitchJumpTables[tableIndex]; }
+    size_t numberOfSwitchJumpTables() const { return m_rareData ? m_rareData->m_switchJumpTables.size() : 0; }
+    SimpleJumpTable& addSwitchJumpTable() { createRareDataIfNecessary(); m_rareData->m_switchJumpTables.append(SimpleJumpTable()); return m_rareData->m_switchJumpTables.last(); }
+    SimpleJumpTable& switchJumpTable(int tableIndex) { RELEASE_ASSERT(m_rareData); return m_rareData->m_switchJumpTables[tableIndex]; }
+    void clearSwitchJumpTables()
+    {
+        if (!m_rareData)
+            return;
+        m_rareData->m_switchJumpTables.clear();
+    }
 
-        size_t numberOfCharacterSwitchJumpTables() const { return m_rareData ? m_rareData->m_characterSwitchJumpTables.size() : 0; }
-        SimpleJumpTable& addCharacterSwitchJumpTable() { createRareDataIfNecessary(); m_rareData->m_characterSwitchJumpTables.append(SimpleJumpTable()); return m_rareData->m_characterSwitchJumpTables.last(); }
-        SimpleJumpTable& characterSwitchJumpTable(int tableIndex) { ASSERT(m_rareData); return m_rareData->m_characterSwitchJumpTables[tableIndex]; }
+    size_t numberOfStringSwitchJumpTables() const { return m_rareData ? m_rareData->m_stringSwitchJumpTables.size() : 0; }
+    StringJumpTable& addStringSwitchJumpTable() { createRareDataIfNecessary(); m_rareData->m_stringSwitchJumpTables.append(StringJumpTable()); return m_rareData->m_stringSwitchJumpTables.last(); }
+    StringJumpTable& stringSwitchJumpTable(int tableIndex) { RELEASE_ASSERT(m_rareData); return m_rareData->m_stringSwitchJumpTables[tableIndex]; }
 
-        size_t numberOfStringSwitchJumpTables() const { return m_rareData ? m_rareData->m_stringSwitchJumpTables.size() : 0; }
-        StringJumpTable& addStringSwitchJumpTable() { createRareDataIfNecessary(); m_rareData->m_stringSwitchJumpTables.append(StringJumpTable()); return m_rareData->m_stringSwitchJumpTables.last(); }
-        StringJumpTable& stringSwitchJumpTable(int tableIndex) { ASSERT(m_rareData); return m_rareData->m_stringSwitchJumpTables[tableIndex]; }
 
+    SymbolTable* symbolTable() const { return m_symbolTable.get(); }
 
-        SymbolTable* symbolTable() { return m_symbolTable; }
-        SharedSymbolTable* sharedSymbolTable() { ASSERT(m_codeType == FunctionCode); return static_cast<SharedSymbolTable*>(m_symbolTable); }
+    EvalCodeCache& evalCodeCache() { createRareDataIfNecessary(); return m_rareData->m_evalCodeCache; }
 
-        EvalCodeCache& evalCodeCache() { createRareDataIfNecessary(); return m_rareData->m_evalCodeCache; }
+    enum ShrinkMode {
+        // Shrink prior to generating machine code that may point directly into vectors.
+        EarlyShrink,
 
-        void shrinkToFit();
-        
-        void copyPostParseDataFrom(CodeBlock* alternative);
-        void copyPostParseDataFromAlternative();
-        
-        // Functions for controlling when JITting kicks in, in a mixed mode
-        // execution world.
-        
-        bool checkIfJITThresholdReached()
-        {
-            return m_llintExecuteCounter.checkIfThresholdCrossedAndSet(this);
-        }
-        
-        void dontJITAnytimeSoon()
-        {
-            m_llintExecuteCounter.deferIndefinitely();
-        }
-        
-        void jitAfterWarmUp()
-        {
-            m_llintExecuteCounter.setNewThreshold(Options::thresholdForJITAfterWarmUp, this);
-        }
-        
-        void jitSoon()
-        {
-            m_llintExecuteCounter.setNewThreshold(Options::thresholdForJITSoon, this);
-        }
-        
-        int32_t llintExecuteCounter() const
-        {
-            return m_llintExecuteCounter.m_counter;
-        }
-        
-        // Functions for controlling when tiered compilation kicks in. This
-        // controls both when the optimizing compiler is invoked and when OSR
-        // entry happens. Two triggers exist: the loop trigger and the return
-        // trigger. In either case, when an addition to m_jitExecuteCounter
-        // causes it to become non-negative, the optimizing compiler is
-        // invoked. This includes a fast check to see if this CodeBlock has
-        // already been optimized (i.e. replacement() returns a CodeBlock
-        // that was optimized with a higher tier JIT than this one). In the
-        // case of the loop trigger, if the optimized compilation succeeds
-        // (or has already succeeded in the past) then OSR is attempted to
-        // redirect program flow into the optimized code.
-        
-        // These functions are called from within the optimization triggers,
-        // and are used as a single point at which we define the heuristics
-        // for how much warm-up is mandated before the next optimization
-        // trigger files. All CodeBlocks start out with optimizeAfterWarmUp(),
-        // as this is called from the CodeBlock constructor.
-        
-        // When we observe a lot of speculation failures, we trigger a
-        // reoptimization. But each time, we increase the optimization trigger
-        // to avoid thrashing.
-        unsigned reoptimizationRetryCounter() const
-        {
-            ASSERT(m_reoptimizationRetryCounter <= Options::reoptimizationRetryCounterMax);
-            return m_reoptimizationRetryCounter;
-        }
-        
-        void countReoptimization()
-        {
-            m_reoptimizationRetryCounter++;
-            if (m_reoptimizationRetryCounter > Options::reoptimizationRetryCounterMax)
-                m_reoptimizationRetryCounter = Options::reoptimizationRetryCounterMax;
-        }
-        
-        int32_t counterValueForOptimizeAfterWarmUp()
-        {
-            return Options::thresholdForOptimizeAfterWarmUp << reoptimizationRetryCounter();
-        }
-        
-        int32_t counterValueForOptimizeAfterLongWarmUp()
-        {
-            return Options::thresholdForOptimizeAfterLongWarmUp << reoptimizationRetryCounter();
-        }
-        
-        int32_t* addressOfJITExecuteCounter()
-        {
-            return &m_jitExecuteCounter.m_counter;
-        }
-        
-        static ptrdiff_t offsetOfJITExecuteCounter() { return OBJECT_OFFSETOF(CodeBlock, m_jitExecuteCounter) + OBJECT_OFFSETOF(ExecutionCounter, m_counter); }
-        static ptrdiff_t offsetOfJITExecutionActiveThreshold() { return OBJECT_OFFSETOF(CodeBlock, m_jitExecuteCounter) + OBJECT_OFFSETOF(ExecutionCounter, m_activeThreshold); }
-        static ptrdiff_t offsetOfJITExecutionTotalCount() { return OBJECT_OFFSETOF(CodeBlock, m_jitExecuteCounter) + OBJECT_OFFSETOF(ExecutionCounter, m_totalCount); }
+        // Shrink after generating machine code, and after possibly creating new vectors
+        // and appending to others. At this time it is not safe to shrink certain vectors
+        // because we would have generated machine code that references them directly.
+        LateShrink
+    };
+    void shrinkToFit(ShrinkMode);
 
-        int32_t jitExecuteCounter() const { return m_jitExecuteCounter.m_counter; }
-        
-        unsigned optimizationDelayCounter() const { return m_optimizationDelayCounter; }
-        
-        // Check if the optimization threshold has been reached, and if not,
-        // adjust the heuristics accordingly. Returns true if the threshold has
-        // been reached.
-        bool checkIfOptimizationThresholdReached()
-        {
-            return m_jitExecuteCounter.checkIfThresholdCrossedAndSet(this);
-        }
-        
-        // Call this to force the next optimization trigger to fire. This is
-        // rarely wise, since optimization triggers are typically more
-        // expensive than executing baseline code.
-        void optimizeNextInvocation()
-        {
-            m_jitExecuteCounter.setNewThreshold(0, this);
-        }
-        
-        // Call this to prevent optimization from happening again. Note that
-        // optimization will still happen after roughly 2^29 invocations,
-        // so this is really meant to delay that as much as possible. This
-        // is called if optimization failed, and we expect it to fail in
-        // the future as well.
-        void dontOptimizeAnytimeSoon()
-        {
-            m_jitExecuteCounter.deferIndefinitely();
-        }
-        
-        // Call this to reinitialize the counter to its starting state,
-        // forcing a warm-up to happen before the next optimization trigger
-        // fires. This is called in the CodeBlock constructor. It also
-        // makes sense to call this if an OSR exit occurred. Note that
-        // OSR exit code is code generated, so the value of the execute
-        // counter that this corresponds to is also available directly.
-        void optimizeAfterWarmUp()
-        {
-            m_jitExecuteCounter.setNewThreshold(counterValueForOptimizeAfterWarmUp(), this);
-        }
-        
-        // Call this to force an optimization trigger to fire only after
-        // a lot of warm-up.
-        void optimizeAfterLongWarmUp()
-        {
-            m_jitExecuteCounter.setNewThreshold(counterValueForOptimizeAfterLongWarmUp(), this);
-        }
-        
-        // Call this to cause an optimization trigger to fire soon, but
-        // not necessarily the next one. This makes sense if optimization
-        // succeeds. Successfuly optimization means that all calls are
-        // relinked to the optimized code, so this only affects call
-        // frames that are still executing this CodeBlock. The value here
-        // is tuned to strike a balance between the cost of OSR entry
-        // (which is too high to warrant making every loop back edge to
-        // trigger OSR immediately) and the cost of executing baseline
-        // code (which is high enough that we don't necessarily want to
-        // have a full warm-up). The intuition for calling this instead of
-        // optimizeNextInvocation() is for the case of recursive functions
-        // with loops. Consider that there may be N call frames of some
-        // recursive function, for a reasonably large value of N. The top
-        // one triggers optimization, and then returns, and then all of
-        // the others return. We don't want optimization to be triggered on
-        // each return, as that would be superfluous. It only makes sense
-        // to trigger optimization if one of those functions becomes hot
-        // in the baseline code.
-        void optimizeSoon()
-        {
-            m_jitExecuteCounter.setNewThreshold(Options::thresholdForOptimizeSoon << reoptimizationRetryCounter(), this);
-        }
-        
-        // The speculative JIT tracks its success rate, so that we can
-        // decide when to reoptimize. It's interesting to note that these
-        // counters may overflow without any protection. The success
-        // counter will overflow before the fail one does, becuase the
-        // fail one is used as a trigger to reoptimize. So the worst case
-        // is that the success counter overflows and we reoptimize without
-        // needing to. But this is harmless. If a method really did
-        // execute 2^32 times then compiling it again probably won't hurt
-        // anyone.
-        
-        void countSpeculationSuccess()
-        {
-            m_speculativeSuccessCounter++;
-        }
-        
-        void countSpeculationFailure()
-        {
-            m_speculativeFailCounter++;
-        }
-        
-        uint32_t speculativeSuccessCounter() const { return m_speculativeSuccessCounter; }
-        uint32_t speculativeFailCounter() const { return m_speculativeFailCounter; }
-        uint32_t forcedOSRExitCounter() const { return m_forcedOSRExitCounter; }
-        
-        uint32_t* addressOfSpeculativeSuccessCounter() { return &m_speculativeSuccessCounter; }
-        uint32_t* addressOfSpeculativeFailCounter() { return &m_speculativeFailCounter; }
-        uint32_t* addressOfForcedOSRExitCounter() { return &m_forcedOSRExitCounter; }
-        
-        static ptrdiff_t offsetOfSpeculativeSuccessCounter() { return OBJECT_OFFSETOF(CodeBlock, m_speculativeSuccessCounter); }
-        static ptrdiff_t offsetOfSpeculativeFailCounter() { return OBJECT_OFFSETOF(CodeBlock, m_speculativeFailCounter); }
-        static ptrdiff_t offsetOfForcedOSRExitCounter() { return OBJECT_OFFSETOF(CodeBlock, m_forcedOSRExitCounter); }
+    // Functions for controlling when JITting kicks in, in a mixed mode
+    // execution world.
 
-#if ENABLE(JIT)
-        // The number of failures that triggers the use of the ratio.
-        unsigned largeFailCountThreshold() { return Options::largeFailCountThresholdBase << baselineVersion()->reoptimizationRetryCounter(); }
-        unsigned largeFailCountThresholdForLoop() { return Options::largeFailCountThresholdBaseForLoop << baselineVersion()->reoptimizationRetryCounter(); }
+    bool checkIfJITThresholdReached()
+    {
+        return m_llintExecuteCounter.checkIfThresholdCrossedAndSet(this);
+    }
 
-        bool shouldReoptimizeNow()
-        {
-            return (Options::desiredSpeculativeSuccessFailRatio *
-                        speculativeFailCounter() >= speculativeSuccessCounter()
-                    && speculativeFailCounter() >= largeFailCountThreshold())
-                || forcedOSRExitCounter() >=
-                       Options::forcedOSRExitCountForReoptimization;
-        }
+    void dontJITAnytimeSoon()
+    {
+        m_llintExecuteCounter.deferIndefinitely();
+    }
 
-        bool shouldReoptimizeFromLoopNow()
-        {
-            return (Options::desiredSpeculativeSuccessFailRatio *
-                        speculativeFailCounter() >= speculativeSuccessCounter()
-                    && speculativeFailCounter() >= largeFailCountThresholdForLoop())
-                || forcedOSRExitCounter() >=
-                       Options::forcedOSRExitCountForReoptimization;
-        }
-#endif
+    void jitAfterWarmUp()
+    {
+        m_llintExecuteCounter.setNewThreshold(Options::thresholdForJITAfterWarmUp(), this);
+    }
 
-#if ENABLE(VALUE_PROFILER)
-        bool shouldOptimizeNow();
-#else
-        bool shouldOptimizeNow() { return false; }
-#endif
-        
+    void jitSoon()
+    {
+        m_llintExecuteCounter.setNewThreshold(Options::thresholdForJITSoon(), this);
+    }
+
+    const BaselineExecutionCounter& llintExecuteCounter() const
+    {
+        return m_llintExecuteCounter;
+    }
+
+    // Functions for controlling when tiered compilation kicks in. This
+    // controls both when the optimizing compiler is invoked and when OSR
+    // entry happens. Two triggers exist: the loop trigger and the return
+    // trigger. In either case, when an addition to m_jitExecuteCounter
+    // causes it to become non-negative, the optimizing compiler is
+    // invoked. This includes a fast check to see if this CodeBlock has
+    // already been optimized (i.e. replacement() returns a CodeBlock
+    // that was optimized with a higher tier JIT than this one). In the
+    // case of the loop trigger, if the optimized compilation succeeds
+    // (or has already succeeded in the past) then OSR is attempted to
+    // redirect program flow into the optimized code.
+
+    // These functions are called from within the optimization triggers,
+    // and are used as a single point at which we define the heuristics
+    // for how much warm-up is mandated before the next optimization
+    // trigger files. All CodeBlocks start out with optimizeAfterWarmUp(),
+    // as this is called from the CodeBlock constructor.
+
+    // When we observe a lot of speculation failures, we trigger a
+    // reoptimization. But each time, we increase the optimization trigger
+    // to avoid thrashing.
+    JS_EXPORT_PRIVATE unsigned reoptimizationRetryCounter() const;
+    void countReoptimization();
 #if ENABLE(JIT)
-        void reoptimize()
-        {
-            ASSERT(replacement() != this);
-            ASSERT(replacement()->alternative() == this);
-            replacement()->tallyFrequentExitSites();
-            replacement()->jettison();
-            countReoptimization();
-            optimizeAfterWarmUp();
-        }
-#endif
+    unsigned numberOfDFGCompiles();
 
-#if ENABLE(VERBOSE_VALUE_PROFILE)
-        void dumpValueProfiles();
-#endif
-        
-        // FIXME: Make these remaining members private.
+    int32_t codeTypeThresholdMultiplier() const;
 
-        int m_numCalleeRegisters;
-        int m_numVars;
-        int m_numCapturedVars;
-        bool m_isConstructor;
+    int32_t adjustedCounterValue(int32_t desiredThreshold);
 
-    protected:
-#if ENABLE(JIT)
-        virtual bool jitCompileImpl(JSGlobalData&) = 0;
-#endif
-        virtual void visitWeakReferences(SlotVisitor&);
-        virtual void finalizeUnconditionally();
-        
-    private:
-        friend class DFGCodeBlocks;
-        
-#if ENABLE(DFG_JIT)
-        void tallyFrequentExitSites();
-#else
-        void tallyFrequentExitSites() { }
-#endif
-        
-        void dump(ExecState*, const Vector<Instruction>::const_iterator& begin, Vector<Instruction>::const_iterator&) const;
-
-        CString registerName(ExecState*, int r) const;
-        void printUnaryOp(ExecState*, int location, Vector<Instruction>::const_iterator&, const char* op) const;
-        void printBinaryOp(ExecState*, int location, Vector<Instruction>::const_iterator&, const char* op) const;
-        void printConditionalJump(ExecState*, const Vector<Instruction>::const_iterator&, Vector<Instruction>::const_iterator&, int location, const char* op) const;
-        void printGetByIdOp(ExecState*, int location, Vector<Instruction>::const_iterator&, const char* op) const;
-        void printCallOp(ExecState*, int location, Vector<Instruction>::const_iterator&, const char* op) const;
-        void printPutByIdOp(ExecState*, int location, Vector<Instruction>::const_iterator&, const char* op) const;
-        void visitStructures(SlotVisitor&, Instruction* vPC) const;
-        
-#if ENABLE(DFG_JIT)
-        bool shouldImmediatelyAssumeLivenessDuringScan()
-        {
-            // Null m_dfgData means that this is a baseline JIT CodeBlock. Baseline JIT
-            // CodeBlocks don't need to be jettisoned when their weak references go
-            // stale. So if a basline JIT CodeBlock gets scanned, we can assume that
-            // this means that it's live.
-            if (!m_dfgData)
-                return true;
-            
-            // For simplicity, we don't attempt to jettison code blocks during GC if
-            // they are executing. Instead we strongly mark their weak references to
-            // allow them to continue to execute soundly.
-            if (m_dfgData->mayBeExecuting)
-                return true;
+    int32_t* addressOfJITExecuteCounter()
+    {
+        return &m_jitExecuteCounter.m_counter;
+    }
 
-            return false;
-        }
-#else
-        bool shouldImmediatelyAssumeLivenessDuringScan() { return true; }
+    static ptrdiff_t offsetOfJITExecuteCounter() { return OBJECT_OFFSETOF(CodeBlock, m_jitExecuteCounter) + OBJECT_OFFSETOF(BaselineExecutionCounter, m_counter); }
+    static ptrdiff_t offsetOfJITExecutionActiveThreshold() { return OBJECT_OFFSETOF(CodeBlock, m_jitExecuteCounter) + OBJECT_OFFSETOF(BaselineExecutionCounter, m_activeThreshold); }
+    static ptrdiff_t offsetOfJITExecutionTotalCount() { return OBJECT_OFFSETOF(CodeBlock, m_jitExecuteCounter) + OBJECT_OFFSETOF(BaselineExecutionCounter, m_totalCount); }
+
+    const BaselineExecutionCounter& jitExecuteCounter() const { return m_jitExecuteCounter; }
+
+    unsigned optimizationDelayCounter() const { return m_optimizationDelayCounter; }
+
+    // Check if the optimization threshold has been reached, and if not,
+    // adjust the heuristics accordingly. Returns true if the threshold has
+    // been reached.
+    bool checkIfOptimizationThresholdReached();
+
+    // Call this to force the next optimization trigger to fire. This is
+    // rarely wise, since optimization triggers are typically more
+    // expensive than executing baseline code.
+    void optimizeNextInvocation();
+
+    // Call this to prevent optimization from happening again. Note that
+    // optimization will still happen after roughly 2^29 invocations,
+    // so this is really meant to delay that as much as possible. This
+    // is called if optimization failed, and we expect it to fail in
+    // the future as well.
+    void dontOptimizeAnytimeSoon();
+
+    // Call this to reinitialize the counter to its starting state,
+    // forcing a warm-up to happen before the next optimization trigger
+    // fires. This is called in the CodeBlock constructor. It also
+    // makes sense to call this if an OSR exit occurred. Note that
+    // OSR exit code is code generated, so the value of the execute
+    // counter that this corresponds to is also available directly.
+    void optimizeAfterWarmUp();
+
+    // Call this to force an optimization trigger to fire only after
+    // a lot of warm-up.
+    void optimizeAfterLongWarmUp();
+
+    // Call this to cause an optimization trigger to fire soon, but
+    // not necessarily the next one. This makes sense if optimization
+    // succeeds. Successfuly optimization means that all calls are
+    // relinked to the optimized code, so this only affects call
+    // frames that are still executing this CodeBlock. The value here
+    // is tuned to strike a balance between the cost of OSR entry
+    // (which is too high to warrant making every loop back edge to
+    // trigger OSR immediately) and the cost of executing baseline
+    // code (which is high enough that we don't necessarily want to
+    // have a full warm-up). The intuition for calling this instead of
+    // optimizeNextInvocation() is for the case of recursive functions
+    // with loops. Consider that there may be N call frames of some
+    // recursive function, for a reasonably large value of N. The top
+    // one triggers optimization, and then returns, and then all of
+    // the others return. We don't want optimization to be triggered on
+    // each return, as that would be superfluous. It only makes sense
+    // to trigger optimization if one of those functions becomes hot
+    // in the baseline code.
+    void optimizeSoon();
+
+    void forceOptimizationSlowPathConcurrently();
+
+    void setOptimizationThresholdBasedOnCompilationResult(CompilationResult);
+    
+    uint32_t osrExitCounter() const { return m_osrExitCounter; }
+
+    void countOSRExit() { m_osrExitCounter++; }
+
+    uint32_t* addressOfOSRExitCounter() { return &m_osrExitCounter; }
+
+    static ptrdiff_t offsetOfOSRExitCounter() { return OBJECT_OFFSETOF(CodeBlock, m_osrExitCounter); }
+
+    uint32_t adjustedExitCountThreshold(uint32_t desiredThreshold);
+    uint32_t exitCountThresholdForReoptimization();
+    uint32_t exitCountThresholdForReoptimizationFromLoop();
+    bool shouldReoptimizeNow();
+    bool shouldReoptimizeFromLoopNow();
+#else // No JIT
+    void optimizeAfterWarmUp() { }
+    unsigned numberOfDFGCompiles() { return 0; }
 #endif
-        
-        void performTracingFixpointIteration(SlotVisitor&);
-        
-        void stronglyVisitStrongReferences(SlotVisitor&);
-        void stronglyVisitWeakReferences(SlotVisitor&);
 
-        void createRareDataIfNecessary()
-        {
-            if (!m_rareData)
-                m_rareData = adoptPtr(new RareData);
-        }
-        
-        int m_numParameters;
+    bool shouldOptimizeNow();
+    void updateAllValueProfilePredictions();
+    void updateAllArrayPredictions();
+    void updateAllPredictions();
+
+    unsigned frameRegisterCount();
+    int stackPointerOffset();
+
+    bool hasOpDebugForLineAndColumn(unsigned line, unsigned column);
+
+    bool hasDebuggerRequests() const { return m_debuggerRequests; }
+    void* debuggerRequestsAddress() { return &m_debuggerRequests; }
 
-        WriteBarrier<ScriptExecutable> m_ownerExecutable;
-        JSGlobalData* m_globalData;
+    void addBreakpoint(unsigned numBreakpoints);
+    void removeBreakpoint(unsigned numBreakpoints)
+    {
+        ASSERT(m_numBreakpoints >= numBreakpoints);
+        m_numBreakpoints -= numBreakpoints;
+    }
+
+    enum SteppingMode {
+        SteppingModeDisabled,
+        SteppingModeEnabled
+    };
+    void setSteppingMode(SteppingMode);
 
-        RefCountedArray<Instruction> m_instructions;
+    void clearDebuggerRequests()
+    {
+        m_steppingMode = SteppingModeDisabled;
+        m_numBreakpoints = 0;
+    }
+    
+    // FIXME: Make these remaining members private.
 
-        int m_thisRegister;
-        int m_argumentsRegister;
-        int m_activationRegister;
+    int m_numCalleeRegisters;
+    int m_numVars;
+    bool m_isConstructor : 1;
+    
+    // This is intentionally public; it's the responsibility of anyone doing any
+    // of the following to hold the lock:
+    //
+    // - Modifying any inline cache in this code block.
+    //
+    // - Quering any inline cache in this code block, from a thread other than
+    //   the main thread.
+    //
+    // Additionally, it's only legal to modify the inline cache on the main
+    // thread. This means that the main thread can query the inline cache without
+    // locking. This is crucial since executing the inline cache is effectively
+    // "querying" it.
+    //
+    // Another exception to the rules is that the GC can do whatever it wants
+    // without holding any locks, because the GC is guaranteed to wait until any
+    // concurrent compilation threads finish what they're doing.
+    mutable ConcurrentJITLock m_lock;
+    
+    bool m_shouldAlwaysBeInlined; // Not a bitfield because the JIT wants to store to it.
+    bool m_allTransitionsHaveBeenMarked : 1; // Initialized and used on every GC.
+    
+    bool m_didFailFTLCompilation : 1;
+    bool m_hasBeenCompiledWithFTL : 1;
 
-        bool m_needsFullScopeChain;
-        bool m_usesEval;
-        bool m_isNumericCompareFunction;
-        bool m_isStrictMode;
+    // Internal methods for use by validation code. It would be private if it wasn't
+    // for the fact that we use it from anonymous namespaces.
+    void beginValidationDidFail();
+    NO_RETURN_DUE_TO_CRASH void endValidationDidFail();
 
-        CodeType m_codeType;
+    bool isKnownToBeLiveDuringGC(); // Will only return valid results when called during GC. Assumes that you've already established that the owner executable is live.
 
-        RefPtr<SourceProvider> m_source;
-        unsigned m_sourceOffset;
+protected:
+    virtual void visitWeakReferences(SlotVisitor&) override;
+    virtual void finalizeUnconditionally() override;
 
-        Vector<unsigned> m_propertyAccessInstructions;
-        Vector<unsigned> m_globalResolveInstructions;
-#if ENABLE(LLINT)
-        SegmentedVector<LLIntCallLinkInfo, 8> m_llintCallLinkInfos;
-        SentinelLinkedList<LLIntCallLinkInfo, BasicRawSentinelNode<LLIntCallLinkInfo> > m_incomingLLIntCalls;
-#endif
-#if ENABLE(JIT)
-        Vector<StructureStubInfo> m_structureStubInfos;
-        Vector<GlobalResolveInfo> m_globalResolveInfos;
-        Vector<CallLinkInfo> m_callLinkInfos;
-        Vector<MethodCallLinkInfo> m_methodCallLinkInfos;
-        JITCode m_jitCode;
-        MacroAssemblerCodePtr m_jitCodeWithArityCheck;
-        SentinelLinkedList<CallLinkInfo, BasicRawSentinelNode<CallLinkInfo> > m_incomingCalls;
-#endif
-#if ENABLE(DFG_JIT) || ENABLE(LLINT)
-        OwnPtr<CompactJITCodeMap> m_jitCodeMap;
-#endif
 #if ENABLE(DFG_JIT)
-        struct WeakReferenceTransition {
-            WeakReferenceTransition() { }
-            
-            WeakReferenceTransition(JSGlobalData& globalData, JSCell* owner, JSCell* codeOrigin, JSCell* from, JSCell* to)
-                : m_from(globalData, owner, from)
-                , m_to(globalData, owner, to)
-            {
-                if (!!codeOrigin)
-                    m_codeOrigin.set(globalData, owner, codeOrigin);
-            }
-
-            WriteBarrier<JSCell> m_codeOrigin;
-            WriteBarrier<JSCell> m_from;
-            WriteBarrier<JSCell> m_to;
-        };
-        
-        struct DFGData {
-            DFGData()
-                : mayBeExecuting(false)
-                , isJettisoned(false)
-            {
-            }
-            
-            Vector<DFG::OSREntryData> osrEntry;
-            SegmentedVector<DFG::OSRExit, 8> osrExit;
-            Vector<DFG::SpeculationRecovery> speculationRecovery;
-            Vector<WeakReferenceTransition> transitions;
-            Vector<WriteBarrier<JSCell> > weakReferences;
-            bool mayBeExecuting;
-            bool isJettisoned;
-            bool livenessHasBeenProved; // Initialized and used on every GC.
-            bool allTransitionsHaveBeenMarked; // Initialized and used on every GC.
-            unsigned visitAggregateHasBeenCalled; // Unsigned to make it work seamlessly with the broadest set of CAS implementations.
-        };
-        
-        OwnPtr<DFGData> m_dfgData;
-        
-        // This is relevant to non-DFG code blocks that serve as the profiled code block
-        // for DFG code blocks.
-        DFG::ExitProfile m_exitProfile;
-        CompressedLazyOperandValueProfileHolder m_lazyOperandValueProfiles;
-#endif
-#if ENABLE(VALUE_PROFILER)
-        Vector<ValueProfile> m_argumentValueProfiles;
-        SegmentedVector<ValueProfile, 8> m_valueProfiles;
-        SegmentedVector<RareCaseProfile, 8> m_rareCaseProfiles;
-        SegmentedVector<RareCaseProfile, 8> m_specialFastCaseProfiles;
-        unsigned m_executionEntryCount;
+    void tallyFrequentExitSites();
+#else
+    void tallyFrequentExitSites() { }
 #endif
 
-        Vector<unsigned> m_jumpTargets;
-        Vector<unsigned> m_loopTargets;
+private:
+    friend class CodeBlockSet;
+    
+    CodeBlock* specialOSREntryBlockOrNull();
+    
+    void noticeIncomingCall(ExecState* callerFrame);
+    
+    double optimizationThresholdScalingFactor();
 
-        // Constant Pool
-        Vector<Identifier> m_identifiers;
-        COMPILE_ASSERT(sizeof(Register) == sizeof(WriteBarrier<Unknown>), Register_must_be_same_size_as_WriteBarrier_Unknown);
-        Vector<WriteBarrier<Unknown> > m_constantRegisters;
-        Vector<WriteBarrier<FunctionExecutable> > m_functionDecls;
-        Vector<WriteBarrier<FunctionExecutable> > m_functionExprs;
+#if ENABLE(JIT)
+    ClosureCallStubRoutine* findClosureCallForReturnPC(ReturnAddressPtr);
+#endif
+        
+    void updateAllPredictionsAndCountLiveness(unsigned& numberOfLiveNonArgumentValueProfiles, unsigned& numberOfSamplesInProfiles);
 
-        SymbolTable* m_symbolTable;
+    void setConstantRegisters(const Vector<WriteBarrier<Unknown>>& constants)
+    {
+        size_t count = constants.size();
+        m_constantRegisters.resize(count);
+        for (size_t i = 0; i < count; i++)
+            m_constantRegisters[i].set(*m_vm, ownerExecutable(), constants[i].get());
+    }
 
-        OwnPtr<CodeBlock> m_alternative;
-        
-        ExecutionCounter m_llintExecuteCounter;
-        
-        ExecutionCounter m_jitExecuteCounter;
-        int32_t m_totalJITExecutions;
-        uint32_t m_speculativeSuccessCounter;
-        uint32_t m_speculativeFailCounter;
-        uint32_t m_forcedOSRExitCounter;
-        uint16_t m_optimizationDelayCounter;
-        uint16_t m_reoptimizationRetryCounter;
+    void dumpBytecode(
+        PrintStream&, ExecState*, const Instruction* begin, const Instruction*&,
+        const StubInfoMap& = StubInfoMap(), const CallLinkInfoMap& = CallLinkInfoMap());
+
+    CString registerName(int r) const;
+    void printUnaryOp(PrintStream&, ExecState*, int location, const Instruction*&, const char* op);
+    void printBinaryOp(PrintStream&, ExecState*, int location, const Instruction*&, const char* op);
+    void printConditionalJump(PrintStream&, ExecState*, const Instruction*, const Instruction*&, int location, const char* op);
+    void printGetByIdOp(PrintStream&, ExecState*, int location, const Instruction*&);
+    void printGetByIdCacheStatus(PrintStream&, ExecState*, int location, const StubInfoMap&);
+    enum CacheDumpMode { DumpCaches, DontDumpCaches };
+    void printCallOp(PrintStream&, ExecState*, int location, const Instruction*&, const char* op, CacheDumpMode, bool& hasPrintedProfiling, const CallLinkInfoMap&);
+    void printPutByIdOp(PrintStream&, ExecState*, int location, const Instruction*&, const char* op);
+    void printLocationAndOp(PrintStream&, ExecState*, int location, const Instruction*&, const char* op);
+    void printLocationOpAndRegisterOperand(PrintStream&, ExecState*, int location, const Instruction*& it, const char* op, int operand);
+
+    void beginDumpProfiling(PrintStream&, bool& hasPrintedProfiling);
+    void dumpValueProfiling(PrintStream&, const Instruction*&, bool& hasPrintedProfiling);
+    void dumpArrayProfiling(PrintStream&, const Instruction*&, bool& hasPrintedProfiling);
+    void dumpRareCaseProfile(PrintStream&, const char* name, RareCaseProfile*, bool& hasPrintedProfiling);
+        
+    bool shouldImmediatelyAssumeLivenessDuringScan();
+    
+    void propagateTransitions(SlotVisitor&);
+    void determineLiveness(SlotVisitor&);
         
-        struct RareData {
-           WTF_MAKE_FAST_ALLOCATED;
-        public:
-            Vector<HandlerInfo> m_exceptionHandlers;
-
-            // Rare Constants
-            Vector<WriteBarrier<RegExp> > m_regexps;
-
-            // Buffers used for large array literals
-            Vector<Vector<JSValue> > m_constantBuffers;
-            
-            // Jump Tables
-            Vector<SimpleJumpTable> m_immediateSwitchJumpTables;
-            Vector<SimpleJumpTable> m_characterSwitchJumpTables;
-            Vector<StringJumpTable> m_stringSwitchJumpTables;
-
-            EvalCodeCache m_evalCodeCache;
-
-            // Expression info - present if debugging.
-            Vector<ExpressionRangeInfo> m_expressionInfo;
-            // Line info - present if profiling or debugging.
-            Vector<LineInfo> m_lineInfo;
+    void stronglyVisitStrongReferences(SlotVisitor&);
+    void stronglyVisitWeakReferences(SlotVisitor&);
+
+    void createRareDataIfNecessary()
+    {
+        if (!m_rareData)
+            m_rareData = adoptPtr(new RareData);
+    }
+    
 #if ENABLE(JIT)
-            Vector<CallReturnOffsetToBytecodeOffset> m_callReturnIndexVector;
-#endif
-#if ENABLE(DFG_JIT)
-            SegmentedVector<InlineCallFrame, 4> m_inlineCallFrames;
-            Vector<CodeOriginAtCallReturnOffset> m_codeOrigins;
+    void resetStubInternal(RepatchBuffer&, StructureStubInfo&);
+    void resetStubDuringGCInternal(RepatchBuffer&, StructureStubInfo&);
 #endif
+    WriteBarrier<UnlinkedCodeBlock> m_unlinkedCode;
+    int m_numParameters;
+    union {
+        unsigned m_debuggerRequests;
+        struct {
+            unsigned m_hasDebuggerStatement : 1;
+            unsigned m_steppingMode : 1;
+            unsigned m_numBreakpoints : 30;
         };
-#if COMPILER(MSVC)
-        friend void WTF::deleteOwnedPtr<RareData>(RareData*);
-#endif
-        OwnPtr<RareData> m_rareData;
+    };
+    WriteBarrier<ScriptExecutable> m_ownerExecutable;
+    VM* m_vm;
+
+    RefCountedArray<Instruction> m_instructions;
+    WriteBarrier<SymbolTable> m_symbolTable;
+    VirtualRegister m_thisRegister;
+    VirtualRegister m_argumentsRegister;
+    VirtualRegister m_activationRegister;
+
+    bool m_isStrictMode;
+    bool m_needsActivation;
+    bool m_mayBeExecuting;
+    uint8_t m_visitAggregateHasBeenCalled;
+
+    RefPtr<SourceProvider> m_source;
+    unsigned m_sourceOffset;
+    unsigned m_firstLineColumnOffset;
+    unsigned m_codeType;
+
+    Vector<LLIntCallLinkInfo> m_llintCallLinkInfos;
+    SentinelLinkedList<LLIntCallLinkInfo, BasicRawSentinelNode<LLIntCallLinkInfo>> m_incomingLLIntCalls;
+    RefPtr<JITCode> m_jitCode;
 #if ENABLE(JIT)
-        CompileWithDFGState m_canCompileWithDFGState;
+    Bag<StructureStubInfo> m_stubInfos;
+    Vector<ByValInfo> m_byValInfos;
+    Bag<CallLinkInfo> m_callLinkInfos;
+    SentinelLinkedList<CallLinkInfo, BasicRawSentinelNode<CallLinkInfo>> m_incomingCalls;
 #endif
-    };
-
-    // Program code is not marked by any function, so we make the global object
-    // responsible for marking it.
+    OwnPtr<CompactJITCodeMap> m_jitCodeMap;
+#if ENABLE(DFG_JIT)
+    // This is relevant to non-DFG code blocks that serve as the profiled code block
+    // for DFG code blocks.
+    DFG::ExitProfile m_exitProfile;
+    CompressedLazyOperandValueProfileHolder m_lazyOperandValueProfiles;
+#endif
+    Vector<ValueProfile> m_argumentValueProfiles;
+    Vector<ValueProfile> m_valueProfiles;
+    SegmentedVector<RareCaseProfile, 8> m_rareCaseProfiles;
+    SegmentedVector<RareCaseProfile, 8> m_specialFastCaseProfiles;
+    Vector<ArrayAllocationProfile> m_arrayAllocationProfiles;
+    ArrayProfileVector m_arrayProfiles;
+    Vector<ObjectAllocationProfile> m_objectAllocationProfiles;
+
+    // Constant Pool
+    COMPILE_ASSERT(sizeof(Register) == sizeof(WriteBarrier<Unknown>), Register_must_be_same_size_as_WriteBarrier_Unknown);
+    // TODO: This could just be a pointer to m_unlinkedCodeBlock's data, but the DFG mutates
+    // it, so we're stuck with it for now.
+    Vector<WriteBarrier<Unknown>> m_constantRegisters;
+    Vector<WriteBarrier<FunctionExecutable>> m_functionDecls;
+    Vector<WriteBarrier<FunctionExecutable>> m_functionExprs;
+
+    RefPtr<CodeBlock> m_alternative;
+    
+    BaselineExecutionCounter m_llintExecuteCounter;
 
-    class GlobalCodeBlock : public CodeBlock {
-    protected:
-        GlobalCodeBlock(CopyParsedBlockTag, GlobalCodeBlock& other)
-            : CodeBlock(CopyParsedBlock, other, &m_unsharedSymbolTable)
-            , m_unsharedSymbolTable(other.m_unsharedSymbolTable)
-        {
-        }
-        
-        GlobalCodeBlock(ScriptExecutable* ownerExecutable, CodeType codeType, JSGlobalObject* globalObject, PassRefPtr<SourceProvider> sourceProvider, unsigned sourceOffset, PassOwnPtr<CodeBlock> alternative)
-            : CodeBlock(ownerExecutable, codeType, globalObject, sourceProvider, sourceOffset, &m_unsharedSymbolTable, false, alternative)
-        {
-        }
+    BaselineExecutionCounter m_jitExecuteCounter;
+    int32_t m_totalJITExecutions;
+    uint32_t m_osrExitCounter;
+    uint16_t m_optimizationDelayCounter;
+    uint16_t m_reoptimizationRetryCounter;
+    
+    mutable CodeBlockHash m_hash;
 
-    private:
-        SymbolTable m_unsharedSymbolTable;
-    };
+    std::unique_ptr<BytecodeLivenessAnalysis> m_livenessAnalysis;
 
-    class ProgramCodeBlock : public GlobalCodeBlock {
+    struct RareData {
+        WTF_MAKE_FAST_ALLOCATED;
     public:
-        ProgramCodeBlock(CopyParsedBlockTag, ProgramCodeBlock& other)
-            : GlobalCodeBlock(CopyParsedBlock, other)
-        {
-        }
+        Vector<HandlerInfo> m_exceptionHandlers;
 
-        ProgramCodeBlock(ProgramExecutable* ownerExecutable, CodeType codeType, JSGlobalObject* globalObject, PassRefPtr<SourceProvider> sourceProvider, PassOwnPtr<CodeBlock> alternative)
-            : GlobalCodeBlock(ownerExecutable, codeType, globalObject, sourceProvider, 0, alternative)
-        {
-        }
-        
+        // Buffers used for large array literals
+        Vector<Vector<JSValue>> m_constantBuffers;
+
+        // Jump Tables
+        Vector<SimpleJumpTable> m_switchJumpTables;
+        Vector<StringJumpTable> m_stringSwitchJumpTables;
+
+        EvalCodeCache m_evalCodeCache;
+    };
+#if COMPILER(MSVC)
+    friend void WTF::deleteOwnedPtr<RareData>(RareData*);
+#endif
+    OwnPtr<RareData> m_rareData;
 #if ENABLE(JIT)
-    protected:
-        virtual JSObject* compileOptimized(ExecState*, ScopeChainNode*);
-        virtual void jettison();
-        virtual bool jitCompileImpl(JSGlobalData&);
-        virtual CodeBlock* replacement();
-        virtual bool canCompileWithDFGInternal();
+    DFG::CapabilityLevel m_capabilityLevelState;
 #endif
-    };
-
-    class EvalCodeBlock : public GlobalCodeBlock {
-    public:
-        EvalCodeBlock(CopyParsedBlockTag, EvalCodeBlock& other)
-            : GlobalCodeBlock(CopyParsedBlock, other)
-            , m_baseScopeDepth(other.m_baseScopeDepth)
-            , m_variables(other.m_variables)
-        {
-        }
-        
-        EvalCodeBlock(EvalExecutable* ownerExecutable, JSGlobalObject* globalObject, PassRefPtr<SourceProvider> sourceProvider, int baseScopeDepth, PassOwnPtr<CodeBlock> alternative)
-            : GlobalCodeBlock(ownerExecutable, EvalCode, globalObject, sourceProvider, 0, alternative)
-            , m_baseScopeDepth(baseScopeDepth)
-        {
-        }
+};
 
-        int baseScopeDepth() const { return m_baseScopeDepth; }
+// Program code is not marked by any function, so we make the global object
+// responsible for marking it.
 
-        const Identifier& variable(unsigned index) { return m_variables[index]; }
-        unsigned numVariables() { return m_variables.size(); }
-        void adoptVariables(Vector<Identifier>& variables)
-        {
-            ASSERT(m_variables.isEmpty());
-            m_variables.swap(variables);
-        }
+class GlobalCodeBlock : public CodeBlock {
+protected:
+    GlobalCodeBlock(CopyParsedBlockTag, GlobalCodeBlock& other)
+    : CodeBlock(CopyParsedBlock, other)
+    {
+    }
         
-#if ENABLE(JIT)
-    protected:
-        virtual JSObject* compileOptimized(ExecState*, ScopeChainNode*);
-        virtual void jettison();
-        virtual bool jitCompileImpl(JSGlobalData&);
-        virtual CodeBlock* replacement();
-        virtual bool canCompileWithDFGInternal();
-#endif
+    GlobalCodeBlock(ScriptExecutable* ownerExecutable, UnlinkedCodeBlock* unlinkedCodeBlock, JSScope* scope, PassRefPtr<SourceProvider> sourceProvider, unsigned sourceOffset, unsigned firstLineColumnOffset)
+        : CodeBlock(ownerExecutable, unlinkedCodeBlock, scope, sourceProvider, sourceOffset, firstLineColumnOffset)
+    {
+    }
+};
 
-    private:
-        int m_baseScopeDepth;
-        Vector<Identifier> m_variables;
-    };
+class ProgramCodeBlock : public GlobalCodeBlock {
+public:
+    ProgramCodeBlock(CopyParsedBlockTag, ProgramCodeBlock& other)
+    : GlobalCodeBlock(CopyParsedBlock, other)
+    {
+    }
 
-    class FunctionCodeBlock : public CodeBlock {
-    public:
-        FunctionCodeBlock(CopyParsedBlockTag, FunctionCodeBlock& other)
-            : CodeBlock(CopyParsedBlock, other, other.sharedSymbolTable())
-        {
-            // The fact that we have to do this is yucky, but is necessary because of the
-            // class hierarchy issues described in the comment block for the main
-            // constructor, below.
-            sharedSymbolTable()->ref();
-        }
+    ProgramCodeBlock(ProgramExecutable* ownerExecutable, UnlinkedProgramCodeBlock* unlinkedCodeBlock, JSScope* scope, PassRefPtr<SourceProvider> sourceProvider, unsigned firstLineColumnOffset)
+        : GlobalCodeBlock(ownerExecutable, unlinkedCodeBlock, scope, sourceProvider, 0, firstLineColumnOffset)
+    {
+    }
 
-        // Rather than using the usual RefCounted::create idiom for SharedSymbolTable we just use new
-        // as we need to initialise the CodeBlock before we could initialise any RefPtr to hold the shared
-        // symbol table, so we just pass as a raw pointer with a ref count of 1.  We then manually deref
-        // in the destructor.
-        FunctionCodeBlock(FunctionExecutable* ownerExecutable, CodeType codeType, JSGlobalObject* globalObject, PassRefPtr<SourceProvider> sourceProvider, unsigned sourceOffset, bool isConstructor, PassOwnPtr<CodeBlock> alternative = nullptr)
-            : CodeBlock(ownerExecutable, codeType, globalObject, sourceProvider, sourceOffset, SharedSymbolTable::create().leakRef(), isConstructor, alternative)
-        {
-        }
-        ~FunctionCodeBlock()
-        {
-            sharedSymbolTable()->deref();
-        }
-        
 #if ENABLE(JIT)
-    protected:
-        virtual JSObject* compileOptimized(ExecState*, ScopeChainNode*);
-        virtual void jettison();
-        virtual bool jitCompileImpl(JSGlobalData&);
-        virtual CodeBlock* replacement();
-        virtual bool canCompileWithDFGInternal();
+protected:
+    virtual CodeBlock* replacement() override;
+    virtual DFG::CapabilityLevel capabilityLevelInternal() override;
 #endif
-    };
+};
 
-    inline CodeBlock* baselineCodeBlockForInlineCallFrame(InlineCallFrame* inlineCallFrame)
+class EvalCodeBlock : public GlobalCodeBlock {
+public:
+    EvalCodeBlock(CopyParsedBlockTag, EvalCodeBlock& other)
+    : GlobalCodeBlock(CopyParsedBlock, other)
     {
-        ASSERT(inlineCallFrame);
-        ExecutableBase* executable = inlineCallFrame->executable.get();
-        ASSERT(executable->structure()->classInfo() == &FunctionExecutable::s_info);
-        return static_cast<FunctionExecutable*>(executable)->baselineCodeBlockFor(inlineCallFrame->isCall ? CodeForCall : CodeForConstruct);
     }
-    
-    inline CodeBlock* baselineCodeBlockForOriginAndBaselineCodeBlock(const CodeOrigin& codeOrigin, CodeBlock* baselineCodeBlock)
+        
+    EvalCodeBlock(EvalExecutable* ownerExecutable, UnlinkedEvalCodeBlock* unlinkedCodeBlock, JSScope* scope, PassRefPtr<SourceProvider> sourceProvider)
+        : GlobalCodeBlock(ownerExecutable, unlinkedCodeBlock, scope, sourceProvider, 0, 1)
     {
-        if (codeOrigin.inlineCallFrame)
-            return baselineCodeBlockForInlineCallFrame(codeOrigin.inlineCallFrame);
-        return baselineCodeBlock;
     }
     
-
-    inline Register& ExecState::r(int index)
-    {
-        CodeBlock* codeBlock = this->codeBlock();
-        if (codeBlock->isConstantRegisterIndex(index))
-            return *reinterpret_cast<Register*>(&codeBlock->constantRegister(index));
-        return this[index];
-    }
-
-    inline Register& ExecState::uncheckedR(int index)
+    const Identifier& variable(unsigned index) { return unlinkedEvalCodeBlock()->variable(index); }
+    unsigned numVariables() { return unlinkedEvalCodeBlock()->numVariables(); }
+    
+#if ENABLE(JIT)
+protected:
+    virtual CodeBlock* replacement() override;
+    virtual DFG::CapabilityLevel capabilityLevelInternal() override;
+#endif
+    
+private:
+    UnlinkedEvalCodeBlock* unlinkedEvalCodeBlock() const { return jsCast<UnlinkedEvalCodeBlock*>(unlinkedCodeBlock()); }
+};
+
+class FunctionCodeBlock : public CodeBlock {
+public:
+    FunctionCodeBlock(CopyParsedBlockTag, FunctionCodeBlock& other)
+    : CodeBlock(CopyParsedBlock, other)
     {
-        ASSERT(index < FirstConstantRegisterIndex);
-        return this[index];
     }
 
-#if ENABLE(DFG_JIT)
-    inline bool ExecState::isInlineCallFrame()
+    FunctionCodeBlock(FunctionExecutable* ownerExecutable, UnlinkedFunctionCodeBlock* unlinkedCodeBlock, JSScope* scope, PassRefPtr<SourceProvider> sourceProvider, unsigned sourceOffset, unsigned firstLineColumnOffset)
+        : CodeBlock(ownerExecutable, unlinkedCodeBlock, scope, sourceProvider, sourceOffset, firstLineColumnOffset)
     {
-        if (LIKELY(!codeBlock() || codeBlock()->getJITType() != JITCode::DFGJIT))
-            return false;
-        return isInlineCallFrameSlow();
     }
+    
+#if ENABLE(JIT)
+protected:
+    virtual CodeBlock* replacement() override;
+    virtual DFG::CapabilityLevel capabilityLevelInternal() override;
 #endif
+};
+
+inline CodeBlock* baselineCodeBlockForInlineCallFrame(InlineCallFrame* inlineCallFrame)
+{
+    RELEASE_ASSERT(inlineCallFrame);
+    ExecutableBase* executable = inlineCallFrame->executable.get();
+    RELEASE_ASSERT(executable->structure()->classInfo() == FunctionExecutable::info());
+    return static_cast<FunctionExecutable*>(executable)->baselineCodeBlockFor(inlineCallFrame->isCall ? CodeForCall : CodeForConstruct);
+}
+
+inline CodeBlock* baselineCodeBlockForOriginAndBaselineCodeBlock(const CodeOrigin& codeOrigin, CodeBlock* baselineCodeBlock)
+{
+    if (codeOrigin.inlineCallFrame)
+        return baselineCodeBlockForInlineCallFrame(codeOrigin.inlineCallFrame);
+    return baselineCodeBlock;
+}
+
+inline int CodeBlock::argumentIndexAfterCapture(size_t argument)
+{
+    if (argument >= static_cast<size_t>(symbolTable()->parameterCount()))
+        return CallFrame::argumentOffset(argument);
+    
+    const SlowArgument* slowArguments = symbolTable()->slowArguments();
+    if (!slowArguments || slowArguments[argument].status == SlowArgument::Normal)
+        return CallFrame::argumentOffset(argument);
+    
+    ASSERT(slowArguments[argument].status == SlowArgument::Captured);
+    return slowArguments[argument].index;
+}
+
+inline bool CodeBlock::hasSlowArguments()
+{
+    return !!symbolTable()->slowArguments();
+}
+
+inline Register& ExecState::r(int index)
+{
+    CodeBlock* codeBlock = this->codeBlock();
+    if (codeBlock->isConstantRegisterIndex(index))
+        return *reinterpret_cast<Register*>(&codeBlock->constantRegister(index));
+    return this[index];
+}
+
+inline Register& ExecState::uncheckedR(int index)
+{
+    RELEASE_ASSERT(index < FirstConstantRegisterIndex);
+    return this[index];
+}
+
+inline JSValue ExecState::argumentAfterCapture(size_t argument)
+{
+    if (argument >= argumentCount())
+        return jsUndefined();
+    
+    if (!codeBlock())
+        return this[argumentOffset(argument)].jsValue();
+    
+    return this[codeBlock()->argumentIndexAfterCapture(argument)].jsValue();
+}
 
-#if ENABLE(DFG_JIT)
-    inline void DFGCodeBlocks::mark(void* candidateCodeBlock)
-    {
-        // We have to check for 0 and -1 because those are used by the HashMap as markers.
-        uintptr_t value = reinterpret_cast<uintptr_t>(candidateCodeBlock);
-        
-        // This checks for both of those nasty cases in one go.
-        // 0 + 1 = 1
-        // -1 + 1 = 0
-        if (value + 1 <= 1)
-            return;
+inline void CodeBlockSet::mark(void* candidateCodeBlock)
+{
+    // We have to check for 0 and -1 because those are used by the HashMap as markers.
+    uintptr_t value = reinterpret_cast<uintptr_t>(candidateCodeBlock);
+    
+    // This checks for both of those nasty cases in one go.
+    // 0 + 1 = 1
+    // -1 + 1 = 0
+    if (value + 1 <= 1)
+        return;
+
+    CodeBlock* codeBlock = static_cast<CodeBlock*>(candidateCodeBlock); 
+    if (!m_oldCodeBlocks.contains(codeBlock) && !m_newCodeBlocks.contains(codeBlock))
+        return;
+
+    mark(codeBlock);
+}
+
+inline void CodeBlockSet::mark(CodeBlock* codeBlock)
+{
+    if (!codeBlock)
+        return;
+    
+    if (codeBlock->m_mayBeExecuting)
+        return;
+    
+    codeBlock->m_mayBeExecuting = true;
+    // We might not have cleared the marks for this CodeBlock, but we need to visit it.
+    codeBlock->m_visitAggregateHasBeenCalled = false;
+#if ENABLE(GGC)
+    m_currentlyExecuting.append(codeBlock);
+#endif
+}
+
+template <typename Functor> inline void ScriptExecutable::forEachCodeBlock(Functor&& functor)
+{
+    switch (type()) {
+    case ProgramExecutableType: {
+        if (CodeBlock* codeBlock = jsCast<ProgramExecutable*>(this)->m_programCodeBlock.get())
+            codeBlock->forEachRelatedCodeBlock(std::forward<Functor>(functor));
+        break;
+    }
         
-        HashSet<CodeBlock*>::iterator iter = m_set.find(static_cast<CodeBlock*>(candidateCodeBlock));
-        if (iter == m_set.end())
-            return;
+    case EvalExecutableType: {
+        if (CodeBlock* codeBlock = jsCast<EvalExecutable*>(this)->m_evalCodeBlock.get())
+            codeBlock->forEachRelatedCodeBlock(std::forward<Functor>(functor));
+        break;
+    }
         
-        (*iter)->m_dfgData->mayBeExecuting = true;
+    case FunctionExecutableType: {
+        Functor f(std::forward<Functor>(functor));
+        FunctionExecutable* executable = jsCast<FunctionExecutable*>(this);
+        if (CodeBlock* codeBlock = executable->m_codeBlockForCall.get())
+            codeBlock->forEachRelatedCodeBlock(f);
+        if (CodeBlock* codeBlock = executable->m_codeBlockForConstruct.get())
+            codeBlock->forEachRelatedCodeBlock(f);
+        break;
     }
-#endif
-    
+    default:
+        RELEASE_ASSERT_NOT_REACHED();
+    }
+}
+
 } // namespace JSC
 
 #endif // CodeBlock_h