]> git.saurik.com Git - apple/javascriptcore.git/blobdiff - dfg/DFGByteCodeParser.cpp
JavaScriptCore-7601.1.46.3.tar.gz
[apple/javascriptcore.git] / dfg / DFGByteCodeParser.cpp
index c3e041c26c30cb3c6c189ce8dc32057ceefce575..2e37e697cf8c691b69698cbd5bc760556c40fdff 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2011, 2012, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2011-2015 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
 #if ENABLE(DFG_JIT)
 
 #include "ArrayConstructor.h"
+#include "BasicBlockLocation.h"
 #include "CallLinkStatus.h"
 #include "CodeBlock.h"
 #include "CodeBlockWithJITType.h"
 #include "DFGArrayMode.h"
 #include "DFGCapabilities.h"
+#include "DFGGraph.h"
+#include "DFGJITCode.h"
 #include "GetByIdStatus.h"
-#include "Operations.h"
+#include "Heap.h"
+#include "JSLexicalEnvironment.h"
+#include "JSCInlines.h"
 #include "PreciseJumpTargets.h"
 #include "PutByIdStatus.h"
-#include "ResolveGlobalStatus.h"
+#include "StackAlignment.h"
 #include "StringConstructor.h"
 #include <wtf/CommaPrinter.h>
 #include <wtf/HashMap.h>
 #include <wtf/MathExtras.h>
+#include <wtf/StdLibExtras.h>
 
 namespace JSC { namespace DFG {
 
+static const bool verbose = false;
+
 class ConstantBufferKey {
 public:
     ConstantBufferKey()
@@ -128,26 +136,20 @@ public:
         , m_graph(graph)
         , m_currentBlock(0)
         , m_currentIndex(0)
-        , m_currentProfilingIndex(0)
-        , m_constantUndefined(UINT_MAX)
-        , m_constantNull(UINT_MAX)
-        , m_constantNaN(UINT_MAX)
-        , m_constant1(UINT_MAX)
-        , m_constants(m_codeBlock->numberOfConstantRegisters())
+        , m_constantUndefined(graph.freeze(jsUndefined()))
+        , m_constantNull(graph.freeze(jsNull()))
+        , m_constantNaN(graph.freeze(jsNumber(PNaN)))
+        , m_constantOne(graph.freeze(jsNumber(1)))
         , m_numArguments(m_codeBlock->numParameters())
         , m_numLocals(m_codeBlock->m_numCalleeRegisters)
-        , m_preservedVars(m_codeBlock->m_numVars)
         , m_parameterSlots(0)
         , m_numPassedVarArgs(0)
         , m_inlineStackTop(0)
         , m_haveBuiltOperandMaps(false)
-        , m_emptyJSValueIndex(UINT_MAX)
         , m_currentInstruction(0)
+        , m_hasDebuggerEnabled(graph.hasDebuggerEnabled())
     {
         ASSERT(m_profiledBlock);
-        
-        for (int i = 0; i < m_codeBlock->m_numVars; ++i)
-            m_preservedVars.set(i);
     }
     
     // Parse a full CodeBlock of bytecode.
@@ -158,127 +160,203 @@ private:
 
     // Just parse from m_currentIndex to the end of the current CodeBlock.
     void parseCodeBlock();
+    
+    void ensureLocals(unsigned newNumLocals)
+    {
+        if (newNumLocals <= m_numLocals)
+            return;
+        m_numLocals = newNumLocals;
+        for (size_t i = 0; i < m_graph.numBlocks(); ++i)
+            m_graph.block(i)->ensureLocals(newNumLocals);
+    }
 
     // Helper for min and max.
-    bool handleMinMax(bool usesResult, int resultOperand, NodeType op, int registerOffset, int argumentCountIncludingThis);
+    template<typename ChecksFunctor>
+    bool handleMinMax(int resultOperand, NodeType op, int registerOffset, int argumentCountIncludingThis, const ChecksFunctor& insertChecks);
     
     // Handle calls. This resolves issues surrounding inlining and intrinsics.
-    void handleCall(Interpreter*, Instruction* currentInstruction, NodeType op, CodeSpecializationKind);
-    void emitFunctionChecks(const CallLinkStatus&, Node* callTarget, int registerOffset, CodeSpecializationKind);
-    void emitArgumentPhantoms(int registerOffset, int argumentCountIncludingThis, CodeSpecializationKind);
+    void handleCall(
+        int result, NodeType op, InlineCallFrame::Kind, unsigned instructionSize,
+        Node* callTarget, int argCount, int registerOffset, CallLinkStatus,
+        SpeculatedType prediction);
+    void handleCall(
+        int result, NodeType op, InlineCallFrame::Kind, unsigned instructionSize,
+        Node* callTarget, int argCount, int registerOffset, CallLinkStatus);
+    void handleCall(int result, NodeType op, CodeSpecializationKind, unsigned instructionSize, int callee, int argCount, int registerOffset);
+    void handleCall(Instruction* pc, NodeType op, CodeSpecializationKind);
+    void handleVarargsCall(Instruction* pc, NodeType op, CodeSpecializationKind);
+    void emitFunctionChecks(CallVariant, Node* callTarget, VirtualRegister thisArgumnt);
+    void emitArgumentPhantoms(int registerOffset, int argumentCountIncludingThis);
+    unsigned inliningCost(CallVariant, int argumentCountIncludingThis, CodeSpecializationKind); // Return UINT_MAX if it's not an inlining candidate. By convention, intrinsics have a cost of 1.
     // Handle inlining. Return true if it succeeded, false if we need to plant a call.
-    bool handleInlining(bool usesResult, Node* callTargetNode, int resultOperand, const CallLinkStatus&, int registerOffset, int argumentCountIncludingThis, unsigned nextOffset, CodeSpecializationKind);
-    // Handle setting the result of an intrinsic.
-    void setIntrinsicResult(bool usesResult, int resultOperand, Node*);
+    bool handleInlining(Node* callTargetNode, int resultOperand, const CallLinkStatus&, int registerOffset, VirtualRegister thisArgument, VirtualRegister argumentsArgument, unsigned argumentsOffset, int argumentCountIncludingThis, unsigned nextOffset, NodeType callOp, InlineCallFrame::Kind, SpeculatedType prediction);
+    enum CallerLinkability { CallerDoesNormalLinking, CallerLinksManually };
+    template<typename ChecksFunctor>
+    bool attemptToInlineCall(Node* callTargetNode, int resultOperand, CallVariant, int registerOffset, int argumentCountIncludingThis, unsigned nextOffset, InlineCallFrame::Kind, CallerLinkability, SpeculatedType prediction, unsigned& inliningBalance, const ChecksFunctor& insertChecks);
+    template<typename ChecksFunctor>
+    void inlineCall(Node* callTargetNode, int resultOperand, CallVariant, int registerOffset, int argumentCountIncludingThis, unsigned nextOffset, InlineCallFrame::Kind, CallerLinkability, const ChecksFunctor& insertChecks);
+    void cancelLinkingForBlock(InlineStackEntry*, BasicBlock*); // Only works when the given block is the last one to have been added for that inline stack entry.
     // Handle intrinsic functions. Return true if it succeeded, false if we need to plant a call.
-    bool handleIntrinsic(bool usesResult, int resultOperand, Intrinsic, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction);
-    bool handleConstantInternalFunction(bool usesResult, int resultOperand, InternalFunction*, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction, CodeSpecializationKind);
-    Node* handleGetByOffset(SpeculatedType, Node* base, unsigned identifierNumber, PropertyOffset);
-    void handleGetByOffset(
-        int destinationOperand, SpeculatedType, Node* base, unsigned identifierNumber,
-        PropertyOffset);
+    template<typename ChecksFunctor>
+    bool handleIntrinsic(int resultOperand, Intrinsic, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction, const ChecksFunctor& insertChecks);
+    template<typename ChecksFunctor>
+    bool handleTypedArrayConstructor(int resultOperand, InternalFunction*, int registerOffset, int argumentCountIncludingThis, TypedArrayType, const ChecksFunctor& insertChecks);
+    template<typename ChecksFunctor>
+    bool handleConstantInternalFunction(int resultOperand, InternalFunction*, int registerOffset, int argumentCountIncludingThis, CodeSpecializationKind, const ChecksFunctor& insertChecks);
+    Node* handlePutByOffset(Node* base, unsigned identifier, PropertyOffset, Node* value);
+    Node* handleGetByOffset(SpeculatedType, Node* base, const StructureSet&, unsigned identifierNumber, PropertyOffset, NodeType op = GetByOffset);
     void handleGetById(
         int destinationOperand, SpeculatedType, Node* base, unsigned identifierNumber,
         const GetByIdStatus&);
+    void emitPutById(
+        Node* base, unsigned identifierNumber, Node* value,  const PutByIdStatus&, bool isDirect);
+    void handlePutById(
+        Node* base, unsigned identifierNumber, Node* value, const PutByIdStatus&,
+        bool isDirect);
+    void emitChecks(const ConstantStructureCheckVector&);
 
-    Node* getScope(bool skipTop, unsigned skipCount);
-    
-    // Convert a set of ResolveOperations into graph nodes
-    bool parseResolveOperations(SpeculatedType, unsigned identifierNumber, ResolveOperations*, PutToBaseOperation*, Node** base, Node** value);
-
-    // Prepare to parse a block.
     void prepareToParseBlock();
+    void clearCaches();
+
     // Parse a single basic block of bytecode instructions.
     bool parseBlock(unsigned limit);
     // Link block successors.
-    void linkBlock(BasicBlock*, Vector<BlockIndex>& possibleTargets);
-    void linkBlocks(Vector<UnlinkedBlock>& unlinkedBlocks, Vector<BlockIndex>& possibleTargets);
+    void linkBlock(BasicBlock*, Vector<BasicBlock*>& possibleTargets);
+    void linkBlocks(Vector<UnlinkedBlock>& unlinkedBlocks, Vector<BasicBlock*>& possibleTargets);
     
-    VariableAccessData* newVariableAccessData(int operand, bool isCaptured)
+    VariableAccessData* newVariableAccessData(VirtualRegister operand)
     {
-        ASSERT(operand < FirstConstantRegisterIndex);
+        ASSERT(!operand.isConstant());
         
-        m_graph.m_variableAccessData.append(VariableAccessData(static_cast<VirtualRegister>(operand), isCaptured));
+        m_graph.m_variableAccessData.append(VariableAccessData(operand));
         return &m_graph.m_variableAccessData.last();
     }
     
     // Get/Set the operands/result of a bytecode instruction.
-    Node* getDirect(int operand)
+    Node* getDirect(VirtualRegister operand)
     {
-        // Is this a constant?
-        if (operand >= FirstConstantRegisterIndex) {
-            unsigned constant = operand - FirstConstantRegisterIndex;
-            ASSERT(constant < m_constants.size());
-            return getJSConstant(constant);
-        }
+        ASSERT(!operand.isConstant());
 
-        ASSERT(operand != JSStack::Callee);
-        
         // Is this an argument?
-        if (operandIsArgument(operand))
+        if (operand.isArgument())
             return getArgument(operand);
 
         // Must be a local.
-        return getLocal((unsigned)operand);
+        return getLocal(operand);
     }
-    Node* get(int operand)
+
+    Node* get(VirtualRegister operand)
     {
-        if (operand == JSStack::Callee) {
-            if (inlineCallFrame() && inlineCallFrame()->callee)
-                return cellConstant(inlineCallFrame()->callee.get());
-            
-            return getCallee();
+        if (operand.isConstant()) {
+            unsigned constantIndex = operand.toConstantIndex();
+            unsigned oldSize = m_constants.size();
+            if (constantIndex >= oldSize || !m_constants[constantIndex]) {
+                const CodeBlock& codeBlock = *m_inlineStackTop->m_codeBlock;
+                JSValue value = codeBlock.getConstant(operand.offset());
+                SourceCodeRepresentation sourceCodeRepresentation = codeBlock.constantSourceCodeRepresentation(operand.offset());
+                if (constantIndex >= oldSize) {
+                    m_constants.grow(constantIndex + 1);
+                    for (unsigned i = oldSize; i < m_constants.size(); ++i)
+                        m_constants[i] = nullptr;
+                }
+
+                Node* constantNode = nullptr;
+                if (sourceCodeRepresentation == SourceCodeRepresentation::Double)
+                    constantNode = addToGraph(DoubleConstant, OpInfo(m_graph.freezeStrong(jsDoubleNumber(value.asNumber()))));
+                else
+                    constantNode = addToGraph(JSConstant, OpInfo(m_graph.freezeStrong(value)));
+                m_constants[constantIndex] = constantNode;
+            }
+            ASSERT(m_constants[constantIndex]);
+            return m_constants[constantIndex];
+        }
+        
+        if (inlineCallFrame()) {
+            if (!inlineCallFrame()->isClosureCall) {
+                JSFunction* callee = inlineCallFrame()->calleeConstant();
+                if (operand.offset() == JSStack::Callee)
+                    return weakJSConstant(callee);
+            }
+        } else if (operand.offset() == JSStack::Callee) {
+            // We have to do some constant-folding here because this enables CreateThis folding. Note
+            // that we don't have such watchpoint-based folding for inlined uses of Callee, since in that
+            // case if the function is a singleton then we already know it.
+            if (FunctionExecutable* executable = jsDynamicCast<FunctionExecutable*>(m_codeBlock->ownerExecutable())) {
+                InferredValue* singleton = executable->singletonFunction();
+                if (JSValue value = singleton->inferredValue()) {
+                    m_graph.watchpoints().addLazily(singleton);
+                    JSFunction* function = jsCast<JSFunction*>(value);
+                    return weakJSConstant(function);
+                }
+            }
+            return addToGraph(GetCallee);
         }
         
         return getDirect(m_inlineStackTop->remapOperand(operand));
     }
-    enum SetMode { NormalSet, SetOnEntry };
-    void setDirect(int operand, Node* value, SetMode setMode = NormalSet)
+    
+    enum SetMode {
+        // A normal set which follows a two-phase commit that spans code origins. During
+        // the current code origin it issues a MovHint, and at the start of the next
+        // code origin there will be a SetLocal. If the local needs flushing, the second
+        // SetLocal will be preceded with a Flush.
+        NormalSet,
+        
+        // A set where the SetLocal happens immediately and there is still a Flush. This
+        // is relevant when assigning to a local in tricky situations for the delayed
+        // SetLocal logic but where we know that we have not performed any side effects
+        // within this code origin. This is a safe replacement for NormalSet anytime we
+        // know that we have not yet performed side effects in this code origin.
+        ImmediateSetWithFlush,
+        
+        // A set where the SetLocal happens immediately and we do not Flush it even if
+        // this is a local that is marked as needing it. This is relevant when
+        // initializing locals at the top of a function.
+        ImmediateNakedSet
+    };
+    Node* setDirect(VirtualRegister operand, Node* value, SetMode setMode = NormalSet)
     {
-        // Is this an argument?
-        if (operandIsArgument(operand)) {
-            setArgument(operand, value, setMode);
-            return;
-        }
+        addToGraph(MovHint, OpInfo(operand.offset()), value);
 
-        // Must be a local.
-        setLocal((unsigned)operand, value, setMode);
+        DelayedSetLocal delayed(currentCodeOrigin(), operand, value);
+        
+        if (setMode == NormalSet) {
+            m_setLocalQueue.append(delayed);
+            return 0;
+        }
+        
+        return delayed.execute(this, setMode);
     }
-    void set(int operand, Node* value, SetMode setMode = NormalSet)
+    
+    void processSetLocalQueue()
     {
-        setDirect(m_inlineStackTop->remapOperand(operand), value, setMode);
+        for (unsigned i = 0; i < m_setLocalQueue.size(); ++i)
+            m_setLocalQueue[i].execute(this);
+        m_setLocalQueue.resize(0);
     }
-    
-    void setPair(int operand1, Node* value1, int operand2, Node* value2)
+
+    Node* set(VirtualRegister operand, Node* value, SetMode setMode = NormalSet)
     {
-        // First emit dead SetLocals for the benefit of OSR.
-        set(operand1, value1);
-        set(operand2, value2);
-        
-        // Now emit the real SetLocals.
-        set(operand1, value1);
-        set(operand2, value2);
+        return setDirect(m_inlineStackTop->remapOperand(operand), value, setMode);
     }
     
     Node* injectLazyOperandSpeculation(Node* node)
     {
         ASSERT(node->op() == GetLocal);
-        ASSERT(node->codeOrigin.bytecodeIndex == m_currentIndex);
-        SpeculatedType prediction = 
-            m_inlineStackTop->m_lazyOperands.prediction(
-                LazyOperandValueProfileKey(m_currentIndex, node->local()));
-#if DFG_ENABLE(DEBUG_VERBOSE)
-        dataLog("Lazy operand [@", node->index(), ", bc#", m_currentIndex, ", r", node->local(), "] prediction: ", SpeculationDump(prediction), "\n");
-#endif
+        ASSERT(node->origin.semantic.bytecodeIndex == m_currentIndex);
+        ConcurrentJITLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
+        LazyOperandValueProfileKey key(m_currentIndex, node->local());
+        SpeculatedType prediction = m_inlineStackTop->m_lazyOperands.prediction(locker, key);
         node->variableAccessData()->predict(prediction);
         return node;
     }
 
     // Used in implementing get/set, above, where the operand is a local variable.
-    Node* getLocal(unsigned operand)
+    Node* getLocal(VirtualRegister operand)
     {
-        Node* node = m_currentBlock->variablesAtTail.local(operand);
-        bool isCaptured = m_codeBlock->isCaptured(operand, inlineCallFrame());
+        unsigned local = operand.toLocal();
+
+        Node* node = m_currentBlock->variablesAtTail.local(local);
         
         // This has two goals: 1) link together variable access datas, and 2)
         // try to avoid creating redundant GetLocals. (1) is required for
@@ -290,58 +368,62 @@ private:
         
         if (node) {
             variable = node->variableAccessData();
-            variable->mergeIsCaptured(isCaptured);
             
-            if (!isCaptured) {
-                switch (node->op()) {
-                case GetLocal:
-                    return node;
-                case SetLocal:
-                    return node->child1().node();
-                default:
-                    break;
-                }
+            switch (node->op()) {
+            case GetLocal:
+                return node;
+            case SetLocal:
+                return node->child1().node();
+            default:
+                break;
             }
-        } else {
-            m_preservedVars.set(operand);
-            variable = newVariableAccessData(operand, isCaptured);
-        }
+        } else
+            variable = newVariableAccessData(operand);
         
         node = injectLazyOperandSpeculation(addToGraph(GetLocal, OpInfo(variable)));
-        m_currentBlock->variablesAtTail.local(operand) = node;
+        m_currentBlock->variablesAtTail.local(local) = node;
         return node;
     }
-    void setLocal(unsigned operand, Node* value, SetMode setMode = NormalSet)
+
+    Node* setLocal(const CodeOrigin& semanticOrigin, VirtualRegister operand, Node* value, SetMode setMode = NormalSet)
     {
-        bool isCaptured = m_codeBlock->isCaptured(operand, inlineCallFrame());
+        CodeOrigin oldSemanticOrigin = m_currentSemanticOrigin;
+        m_currentSemanticOrigin = semanticOrigin;
+
+        unsigned local = operand.toLocal();
         
-        if (setMode == NormalSet) {
+        if (setMode != ImmediateNakedSet) {
             ArgumentPosition* argumentPosition = findArgumentPositionForLocal(operand);
-            if (isCaptured || argumentPosition)
+            if (argumentPosition)
                 flushDirect(operand, argumentPosition);
+            else if (m_hasDebuggerEnabled && operand == m_codeBlock->scopeRegister())
+                flush(operand);
         }
 
-        VariableAccessData* variableAccessData = newVariableAccessData(operand, isCaptured);
+        VariableAccessData* variableAccessData = newVariableAccessData(operand);
         variableAccessData->mergeStructureCheckHoistingFailed(
-            m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache));
+            m_inlineStackTop->m_exitProfile.hasExitSite(semanticOrigin.bytecodeIndex, BadCache));
+        variableAccessData->mergeCheckArrayHoistingFailed(
+            m_inlineStackTop->m_exitProfile.hasExitSite(semanticOrigin.bytecodeIndex, BadIndexingType));
         Node* node = addToGraph(SetLocal, OpInfo(variableAccessData), value);
-        m_currentBlock->variablesAtTail.local(operand) = node;
+        m_currentBlock->variablesAtTail.local(local) = node;
+
+        m_currentSemanticOrigin = oldSemanticOrigin;
+        return node;
     }
 
     // Used in implementing get/set, above, where the operand is an argument.
-    Node* getArgument(unsigned operand)
+    Node* getArgument(VirtualRegister operand)
     {
-        unsigned argument = operandToArgument(operand);
+        unsigned argument = operand.toArgument();
         ASSERT(argument < m_numArguments);
         
         Node* node = m_currentBlock->variablesAtTail.argument(argument);
-        bool isCaptured = m_codeBlock->isCaptured(operand);
 
         VariableAccessData* variable;
         
         if (node) {
             variable = node->variableAccessData();
-            variable->mergeIsCaptured(isCaptured);
             
             switch (node->op()) {
             case GetLocal:
@@ -352,33 +434,39 @@ private:
                 break;
             }
         } else
-            variable = newVariableAccessData(operand, isCaptured);
+            variable = newVariableAccessData(operand);
         
         node = injectLazyOperandSpeculation(addToGraph(GetLocal, OpInfo(variable)));
         m_currentBlock->variablesAtTail.argument(argument) = node;
         return node;
     }
-    void setArgument(int operand, Node* value, SetMode setMode = NormalSet)
+    Node* setArgument(const CodeOrigin& semanticOrigin, VirtualRegister operand, Node* value, SetMode setMode = NormalSet)
     {
-        unsigned argument = operandToArgument(operand);
+        CodeOrigin oldSemanticOrigin = m_currentSemanticOrigin;
+        m_currentSemanticOrigin = semanticOrigin;
+
+        unsigned argument = operand.toArgument();
         ASSERT(argument < m_numArguments);
         
-        bool isCaptured = m_codeBlock->isCaptured(operand);
-
-        VariableAccessData* variableAccessData = newVariableAccessData(operand, isCaptured);
+        VariableAccessData* variableAccessData = newVariableAccessData(operand);
 
         // Always flush arguments, except for 'this'. If 'this' is created by us,
         // then make sure that it's never unboxed.
         if (argument) {
-            if (setMode == NormalSet)
+            if (setMode != ImmediateNakedSet)
                 flushDirect(operand);
         } else if (m_codeBlock->specializationKind() == CodeForConstruct)
             variableAccessData->mergeShouldNeverUnbox(true);
         
         variableAccessData->mergeStructureCheckHoistingFailed(
-            m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache));
+            m_inlineStackTop->m_exitProfile.hasExitSite(semanticOrigin.bytecodeIndex, BadCache));
+        variableAccessData->mergeCheckArrayHoistingFailed(
+            m_inlineStackTop->m_exitProfile.hasExitSite(semanticOrigin.bytecodeIndex, BadIndexingType));
         Node* node = addToGraph(SetLocal, OpInfo(variableAccessData), value);
         m_currentBlock->variablesAtTail.argument(argument) = node;
+
+        m_currentSemanticOrigin = oldSemanticOrigin;
+        return node;
     }
     
     ArgumentPosition* findArgumentPositionForArgument(int argument)
@@ -389,149 +477,111 @@ private:
         return stack->m_argumentPositions[argument];
     }
     
-    ArgumentPosition* findArgumentPositionForLocal(int operand)
+    ArgumentPosition* findArgumentPositionForLocal(VirtualRegister operand)
     {
         for (InlineStackEntry* stack = m_inlineStackTop; ; stack = stack->m_caller) {
             InlineCallFrame* inlineCallFrame = stack->m_inlineCallFrame;
             if (!inlineCallFrame)
                 break;
-            if (operand >= static_cast<int>(inlineCallFrame->stackOffset - JSStack::CallFrameHeaderSize))
+            if (operand.offset() < static_cast<int>(inlineCallFrame->stackOffset + JSStack::CallFrameHeaderSize))
                 continue;
-            if (operand == inlineCallFrame->stackOffset + CallFrame::thisArgumentOffset())
+            if (operand.offset() == inlineCallFrame->stackOffset + CallFrame::thisArgumentOffset())
                 continue;
-            if (operand < static_cast<int>(inlineCallFrame->stackOffset - JSStack::CallFrameHeaderSize - inlineCallFrame->arguments.size()))
+            if (operand.offset() >= static_cast<int>(inlineCallFrame->stackOffset + CallFrame::thisArgumentOffset() + inlineCallFrame->arguments.size()))
                 continue;
-            int argument = operandToArgument(operand - inlineCallFrame->stackOffset);
+            int argument = VirtualRegister(operand.offset() - inlineCallFrame->stackOffset).toArgument();
             return stack->m_argumentPositions[argument];
         }
         return 0;
     }
     
-    ArgumentPosition* findArgumentPosition(int operand)
+    ArgumentPosition* findArgumentPosition(VirtualRegister operand)
     {
-        if (operandIsArgument(operand))
-            return findArgumentPositionForArgument(operandToArgument(operand));
+        if (operand.isArgument())
+            return findArgumentPositionForArgument(operand.toArgument());
         return findArgumentPositionForLocal(operand);
     }
-    
-    void flush(int operand)
+
+    void flush(VirtualRegister operand)
     {
         flushDirect(m_inlineStackTop->remapOperand(operand));
     }
     
-    void flushDirect(int operand)
+    void flushDirect(VirtualRegister operand)
     {
         flushDirect(operand, findArgumentPosition(operand));
     }
     
-    void flushDirect(int operand, ArgumentPosition* argumentPosition)
+    void flushDirect(VirtualRegister operand, ArgumentPosition* argumentPosition)
     {
-        bool isCaptured = m_codeBlock->isCaptured(operand, inlineCallFrame());
-        
-        ASSERT(operand < FirstConstantRegisterIndex);
-        
-        if (!operandIsArgument(operand))
-            m_preservedVars.set(operand);
+        ASSERT(!operand.isConstant());
         
         Node* node = m_currentBlock->variablesAtTail.operand(operand);
         
         VariableAccessData* variable;
         
-        if (node) {
+        if (node)
             variable = node->variableAccessData();
-            variable->mergeIsCaptured(isCaptured);
-        } else
-            variable = newVariableAccessData(operand, isCaptured);
+        else
+            variable = newVariableAccessData(operand);
         
         node = addToGraph(Flush, OpInfo(variable));
         m_currentBlock->variablesAtTail.operand(operand) = node;
         if (argumentPosition)
             argumentPosition->addVariable(variable);
     }
-
+    
     void flush(InlineStackEntry* inlineStackEntry)
     {
         int numArguments;
-        if (InlineCallFrame* inlineCallFrame = inlineStackEntry->m_inlineCallFrame)
+        if (InlineCallFrame* inlineCallFrame = inlineStackEntry->m_inlineCallFrame) {
+            ASSERT(!m_hasDebuggerEnabled);
             numArguments = inlineCallFrame->arguments.size();
-        else
+            if (inlineCallFrame->isClosureCall)
+                flushDirect(inlineStackEntry->remapOperand(VirtualRegister(JSStack::Callee)));
+            if (inlineCallFrame->isVarargs())
+                flushDirect(inlineStackEntry->remapOperand(VirtualRegister(JSStack::ArgumentCount)));
+        } else
             numArguments = inlineStackEntry->m_codeBlock->numParameters();
         for (unsigned argument = numArguments; argument-- > 1;)
-            flushDirect(inlineStackEntry->remapOperand(argumentToOperand(argument)));
-        for (int local = 0; local < inlineStackEntry->m_codeBlock->m_numVars; ++local) {
-            if (!inlineStackEntry->m_codeBlock->isCaptured(local))
-                continue;
-            flushDirect(inlineStackEntry->remapOperand(local));
-        }
+            flushDirect(inlineStackEntry->remapOperand(virtualRegisterForArgument(argument)));
+        if (m_hasDebuggerEnabled)
+            flush(m_codeBlock->scopeRegister());
     }
 
-    void flushAllArgumentsAndCapturedVariablesInInlineStack()
+    void flushForTerminal()
     {
         for (InlineStackEntry* inlineStackEntry = m_inlineStackTop; inlineStackEntry; inlineStackEntry = inlineStackEntry->m_caller)
             flush(inlineStackEntry);
     }
 
-    void flushArgumentsAndCapturedVariables()
+    void flushForReturn()
     {
         flush(m_inlineStackTop);
     }
-
-    // Get an operand, and perform a ToInt32/ToNumber conversion on it.
-    Node* getToInt32(int operand)
-    {
-        return toInt32(get(operand));
-    }
-
-    // Perform an ES5 ToInt32 operation - returns a node of type NodeResultInt32.
-    Node* toInt32(Node* node)
-    {
-        if (node->hasInt32Result())
-            return node;
-
-        if (node->op() == UInt32ToNumber)
-            return node->child1().node();
-
-        // Check for numeric constants boxed as JSValues.
-        if (canFold(node)) {
-            JSValue v = valueOfJSConstant(node);
-            if (v.isInt32())
-                return getJSConstant(node->constantNumber());
-            if (v.isNumber())
-                return getJSConstantForValue(JSValue(JSC::toInt32(v.asNumber())));
-        }
-
-        return addToGraph(ValueToInt32, node);
-    }
-
-    // NOTE: Only use this to construct constants that arise from non-speculative
-    // constant folding. I.e. creating constants using this if we had constant
-    // field inference would be a bad idea, since the bytecode parser's folding
-    // doesn't handle liveness preservation.
-    Node* getJSConstantForValue(JSValue constantValue)
+    
+    void flushIfTerminal(SwitchData& data)
     {
-        unsigned constantIndex = m_codeBlock->addOrFindConstant(constantValue);
-        if (constantIndex >= m_constants.size())
-            m_constants.append(ConstantRecord());
+        if (data.fallThrough.bytecodeIndex() > m_currentIndex)
+            return;
         
-        ASSERT(m_constants.size() == m_codeBlock->numberOfConstantRegisters());
+        for (unsigned i = data.cases.size(); i--;) {
+            if (data.cases[i].target.bytecodeIndex() > m_currentIndex)
+                return;
+        }
         
-        return getJSConstant(constantIndex);
+        flushForTerminal();
     }
 
-    Node* getJSConstant(unsigned constant)
+    // Assumes that the constant should be strongly marked.
+    Node* jsConstant(JSValue constantValue)
     {
-        Node* node = m_constants[constant].asJSValue;
-        if (node)
-            return node;
-
-        Node* result = addToGraph(JSConstant, OpInfo(constant));
-        m_constants[constant].asJSValue = result;
-        return result;
+        return addToGraph(JSConstant, OpInfo(m_graph.freezeStrong(constantValue)));
     }
 
-    Node* getCallee()
+    Node* weakJSConstant(JSValue constantValue)
     {
-        return addToGraph(GetCallee);
+        return addToGraph(JSConstant, OpInfo(m_graph.freeze(constantValue)));
     }
 
     // Helper functions to get/set the this value.
@@ -539,273 +589,133 @@ private:
     {
         return get(m_inlineStackTop->m_codeBlock->thisRegister());
     }
+
     void setThis(Node* value)
     {
         set(m_inlineStackTop->m_codeBlock->thisRegister(), value);
     }
 
-    // Convenience methods for checking nodes for constants.
-    bool isJSConstant(Node* node)
-    {
-        return node->op() == JSConstant;
-    }
-    bool isInt32Constant(Node* node)
-    {
-        return isJSConstant(node) && valueOfJSConstant(node).isInt32();
-    }
-    // Convenience methods for getting constant values.
-    JSValue valueOfJSConstant(Node* node)
-    {
-        ASSERT(isJSConstant(node));
-        return m_codeBlock->getConstant(FirstConstantRegisterIndex + node->constantNumber());
-    }
-    int32_t valueOfInt32Constant(Node* node)
-    {
-        ASSERT(isInt32Constant(node));
-        return valueOfJSConstant(node).asInt32();
-    }
-    
-    // This method returns a JSConstant with the value 'undefined'.
-    Node* constantUndefined()
-    {
-        // Has m_constantUndefined been set up yet?
-        if (m_constantUndefined == UINT_MAX) {
-            // Search the constant pool for undefined, if we find it, we can just reuse this!
-            unsigned numberOfConstants = m_codeBlock->numberOfConstantRegisters();
-            for (m_constantUndefined = 0; m_constantUndefined < numberOfConstants; ++m_constantUndefined) {
-                JSValue testMe = m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constantUndefined);
-                if (testMe.isUndefined())
-                    return getJSConstant(m_constantUndefined);
-            }
-
-            // Add undefined to the CodeBlock's constants, and add a corresponding slot in m_constants.
-            ASSERT(m_constants.size() == numberOfConstants);
-            m_codeBlock->addConstant(jsUndefined());
-            m_constants.append(ConstantRecord());
-            ASSERT(m_constants.size() == m_codeBlock->numberOfConstantRegisters());
-        }
-
-        // m_constantUndefined must refer to an entry in the CodeBlock's constant pool that has the value 'undefined'.
-        ASSERT(m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constantUndefined).isUndefined());
-        return getJSConstant(m_constantUndefined);
-    }
-
-    // This method returns a JSConstant with the value 'null'.
-    Node* constantNull()
+    InlineCallFrame* inlineCallFrame()
     {
-        // Has m_constantNull been set up yet?
-        if (m_constantNull == UINT_MAX) {
-            // Search the constant pool for null, if we find it, we can just reuse this!
-            unsigned numberOfConstants = m_codeBlock->numberOfConstantRegisters();
-            for (m_constantNull = 0; m_constantNull < numberOfConstants; ++m_constantNull) {
-                JSValue testMe = m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constantNull);
-                if (testMe.isNull())
-                    return getJSConstant(m_constantNull);
-            }
-
-            // Add null to the CodeBlock's constants, and add a corresponding slot in m_constants.
-            ASSERT(m_constants.size() == numberOfConstants);
-            m_codeBlock->addConstant(jsNull());
-            m_constants.append(ConstantRecord());
-            ASSERT(m_constants.size() == m_codeBlock->numberOfConstantRegisters());
-        }
-
-        // m_constantNull must refer to an entry in the CodeBlock's constant pool that has the value 'null'.
-        ASSERT(m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constantNull).isNull());
-        return getJSConstant(m_constantNull);
+        return m_inlineStackTop->m_inlineCallFrame;
     }
 
-    // This method returns a DoubleConstant with the value 1.
-    Node* one()
+    CodeOrigin currentCodeOrigin()
     {
-        // Has m_constant1 been set up yet?
-        if (m_constant1 == UINT_MAX) {
-            // Search the constant pool for the value 1, if we find it, we can just reuse this!
-            unsigned numberOfConstants = m_codeBlock->numberOfConstantRegisters();
-            for (m_constant1 = 0; m_constant1 < numberOfConstants; ++m_constant1) {
-                JSValue testMe = m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constant1);
-                if (testMe.isInt32() && testMe.asInt32() == 1)
-                    return getJSConstant(m_constant1);
-            }
-
-            // Add the value 1 to the CodeBlock's constants, and add a corresponding slot in m_constants.
-            ASSERT(m_constants.size() == numberOfConstants);
-            m_codeBlock->addConstant(jsNumber(1));
-            m_constants.append(ConstantRecord());
-            ASSERT(m_constants.size() == m_codeBlock->numberOfConstantRegisters());
-        }
-
-        // m_constant1 must refer to an entry in the CodeBlock's constant pool that has the integer value 1.
-        ASSERT(m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constant1).isInt32());
-        ASSERT(m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constant1).asInt32() == 1);
-        return getJSConstant(m_constant1);
+        return CodeOrigin(m_currentIndex, inlineCallFrame());
     }
-    
-    // This method returns a DoubleConstant with the value NaN.
-    Node* constantNaN()
-    {
-        JSValue nan = jsNaN();
-        
-        // Has m_constantNaN been set up yet?
-        if (m_constantNaN == UINT_MAX) {
-            // Search the constant pool for the value NaN, if we find it, we can just reuse this!
-            unsigned numberOfConstants = m_codeBlock->numberOfConstantRegisters();
-            for (m_constantNaN = 0; m_constantNaN < numberOfConstants; ++m_constantNaN) {
-                JSValue testMe = m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constantNaN);
-                if (JSValue::encode(testMe) == JSValue::encode(nan))
-                    return getJSConstant(m_constantNaN);
-            }
-
-            // Add the value nan to the CodeBlock's constants, and add a corresponding slot in m_constants.
-            ASSERT(m_constants.size() == numberOfConstants);
-            m_codeBlock->addConstant(nan);
-            m_constants.append(ConstantRecord());
-            ASSERT(m_constants.size() == m_codeBlock->numberOfConstantRegisters());
-        }
 
-        // m_constantNaN must refer to an entry in the CodeBlock's constant pool that has the value nan.
-        ASSERT(m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constantNaN).isDouble());
-        ASSERT(std::isnan(m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constantNaN).asDouble()));
-        return getJSConstant(m_constantNaN);
-    }
-    
-    Node* cellConstant(JSCell* cell)
+    NodeOrigin currentNodeOrigin()
     {
-        HashMap<JSCell*, Node*>::AddResult result = m_cellConstantNodes.add(cell, 0);
-        if (result.isNewEntry)
-            result.iterator->value = addToGraph(WeakJSConstant, OpInfo(cell));
-        
-        return result.iterator->value;
+        // FIXME: We should set the forExit origin only on those nodes that can exit.
+        // https://bugs.webkit.org/show_bug.cgi?id=145204
+        if (m_currentSemanticOrigin.isSet())
+            return NodeOrigin(m_currentSemanticOrigin, currentCodeOrigin());
+        return NodeOrigin(currentCodeOrigin());
     }
     
-    InlineCallFrame* inlineCallFrame()
-    {
-        return m_inlineStackTop->m_inlineCallFrame;
-    }
-
-    CodeOrigin currentCodeOrigin()
+    BranchData* branchData(unsigned taken, unsigned notTaken)
     {
-        return CodeOrigin(m_currentIndex, inlineCallFrame(), m_currentProfilingIndex - m_currentIndex);
+        // We assume that branches originating from bytecode always have a fall-through. We
+        // use this assumption to avoid checking for the creation of terminal blocks.
+        ASSERT((taken > m_currentIndex) || (notTaken > m_currentIndex));
+        BranchData* data = m_graph.m_branchData.add();
+        *data = BranchData::withBytecodeIndices(taken, notTaken);
+        return data;
     }
     
-    bool canFold(Node* node)
+    Node* addToGraph(Node* node)
     {
-        return node->isStronglyProvedConstantIn(inlineCallFrame());
-    }
-
-    // Our codegen for constant strict equality performs a bitwise comparison,
-    // so we can only select values that have a consistent bitwise identity.
-    bool isConstantForCompareStrictEq(Node* node)
-    {
-        if (!node->isConstant())
-            return false;
-        JSValue value = valueOfJSConstant(node);
-        return value.isBoolean() || value.isUndefinedOrNull();
+        if (Options::verboseDFGByteCodeParsing())
+            dataLog("        appended ", node, " ", Graph::opName(node->op()), "\n");
+        m_currentBlock->append(node);
+        return node;
     }
     
     Node* addToGraph(NodeType op, Node* child1 = 0, Node* child2 = 0, Node* child3 = 0)
     {
         Node* result = m_graph.addNode(
-            SpecNone, op, currentCodeOrigin(), Edge(child1), Edge(child2), Edge(child3));
-        ASSERT(op != Phi);
-        m_currentBlock->append(result);
-        return result;
+            SpecNone, op, currentNodeOrigin(), Edge(child1), Edge(child2),
+            Edge(child3));
+        return addToGraph(result);
     }
     Node* addToGraph(NodeType op, Edge child1, Edge child2 = Edge(), Edge child3 = Edge())
     {
         Node* result = m_graph.addNode(
-            SpecNone, op, currentCodeOrigin(), child1, child2, child3);
-        ASSERT(op != Phi);
-        m_currentBlock->append(result);
-        return result;
+            SpecNone, op, currentNodeOrigin(), child1, child2, child3);
+        return addToGraph(result);
     }
     Node* addToGraph(NodeType op, OpInfo info, Node* child1 = 0, Node* child2 = 0, Node* child3 = 0)
     {
         Node* result = m_graph.addNode(
-            SpecNone, op, currentCodeOrigin(), info, Edge(child1), Edge(child2), Edge(child3));
-        ASSERT(op != Phi);
-        m_currentBlock->append(result);
-        return result;
+            SpecNone, op, currentNodeOrigin(), info, Edge(child1), Edge(child2),
+            Edge(child3));
+        return addToGraph(result);
     }
     Node* addToGraph(NodeType op, OpInfo info1, OpInfo info2, Node* child1 = 0, Node* child2 = 0, Node* child3 = 0)
     {
         Node* result = m_graph.addNode(
-            SpecNone, op, currentCodeOrigin(), info1, info2,
+            SpecNone, op, currentNodeOrigin(), info1, info2,
             Edge(child1), Edge(child2), Edge(child3));
-        ASSERT(op != Phi);
-        m_currentBlock->append(result);
-        return result;
+        return addToGraph(result);
     }
     
     Node* addToGraph(Node::VarArgTag, NodeType op, OpInfo info1, OpInfo info2)
     {
         Node* result = m_graph.addNode(
-            SpecNone, Node::VarArg, op, currentCodeOrigin(), info1, info2,
+            SpecNone, Node::VarArg, op, currentNodeOrigin(), info1, info2,
             m_graph.m_varArgChildren.size() - m_numPassedVarArgs, m_numPassedVarArgs);
-        ASSERT(op != Phi);
-        m_currentBlock->append(result);
+        addToGraph(result);
         
         m_numPassedVarArgs = 0;
         
         return result;
     }
-
+    
     void addVarArgChild(Node* child)
     {
         m_graph.m_varArgChildren.append(Edge(child));
         m_numPassedVarArgs++;
     }
     
-    Node* addCall(Interpreter* interpreter, Instruction* currentInstruction, NodeType op)
+    Node* addCallWithoutSettingResult(
+        NodeType op, OpInfo opInfo, Node* callee, int argCount, int registerOffset,
+        SpeculatedType prediction)
     {
-        Instruction* putInstruction = currentInstruction + OPCODE_LENGTH(op_call);
-
-        SpeculatedType prediction = SpecNone;
-        if (interpreter->getOpcodeID(putInstruction->u.opcode) == op_call_put_result) {
-            m_currentProfilingIndex = m_currentIndex + OPCODE_LENGTH(op_call);
-            prediction = getPrediction();
-        }
-        
-        addVarArgChild(get(currentInstruction[1].u.operand));
-        int argCount = currentInstruction[2].u.operand;
-        if (JSStack::CallFrameHeaderSize + (unsigned)argCount > m_parameterSlots)
-            m_parameterSlots = JSStack::CallFrameHeaderSize + argCount;
+        addVarArgChild(callee);
+        size_t parameterSlots = JSStack::CallFrameHeaderSize - JSStack::CallerFrameAndPCSize + argCount;
+        if (parameterSlots > m_parameterSlots)
+            m_parameterSlots = parameterSlots;
 
-        int registerOffset = currentInstruction[3].u.operand;
-        int dummyThisArgument = op == Call ? 0 : 1;
-        for (int i = 0 + dummyThisArgument; i < argCount; ++i)
-            addVarArgChild(get(registerOffset + argumentToOperand(i)));
+        for (int i = 0; i < argCount; ++i)
+            addVarArgChild(get(virtualRegisterForArgument(i, registerOffset)));
 
-        Node* call = addToGraph(Node::VarArg, op, OpInfo(0), OpInfo(prediction));
-        if (interpreter->getOpcodeID(putInstruction->u.opcode) == op_call_put_result)
-            set(putInstruction[1].u.operand, call);
-        return call;
+        return addToGraph(Node::VarArg, op, opInfo, OpInfo(prediction));
     }
     
-    Node* addStructureTransitionCheck(JSCell* object, Structure* structure)
+    Node* addCall(
+        int result, NodeType op, OpInfo opInfo, Node* callee, int argCount, int registerOffset,
+        SpeculatedType prediction)
     {
-        // Add a weak JS constant for the object regardless, since the code should
-        // be jettisoned if the object ever dies.
-        Node* objectNode = cellConstant(object);
-        
-        if (object->structure() == structure && structure->transitionWatchpointSetIsStillValid()) {
-            addToGraph(StructureTransitionWatchpoint, OpInfo(structure), objectNode);
-            return objectNode;
-        }
-        
-        addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(structure)), objectNode);
-        
-        return objectNode;
+        Node* call = addCallWithoutSettingResult(
+            op, opInfo, callee, argCount, registerOffset, prediction);
+        VirtualRegister resultReg(result);
+        if (resultReg.isValid())
+            set(resultReg, call);
+        return call;
     }
     
-    Node* addStructureTransitionCheck(JSCell* object)
+    Node* cellConstantWithStructureCheck(JSCell* object, Structure* structure)
     {
-        return addStructureTransitionCheck(object, object->structure());
+        Node* objectNode = weakJSConstant(object);
+        addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(structure)), objectNode);
+        return objectNode;
     }
     
     SpeculatedType getPredictionWithoutOSRExit(unsigned bytecodeIndex)
     {
-        return m_inlineStackTop->m_profiledBlock->valueProfilePredictionForBytecodeOffset(bytecodeIndex);
+        ConcurrentJITLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
+        return m_inlineStackTop->m_profiledBlock->valueProfilePredictionForBytecodeOffset(locker, bytecodeIndex);
     }
 
     SpeculatedType getPrediction(unsigned bytecodeIndex)
@@ -823,18 +733,20 @@ private:
     
     SpeculatedType getPredictionWithoutOSRExit()
     {
-        return getPredictionWithoutOSRExit(m_currentProfilingIndex);
+        return getPredictionWithoutOSRExit(m_currentIndex);
     }
     
     SpeculatedType getPrediction()
     {
-        return getPrediction(m_currentProfilingIndex);
+        return getPrediction(m_currentIndex);
     }
     
     ArrayMode getArrayMode(ArrayProfile* profile, Array::Action action)
     {
-        profile->computeUpdatedPrediction(m_inlineStackTop->m_codeBlock);
-        return ArrayMode::fromObserved(profile, action, false);
+        ConcurrentJITLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
+        profile->computeUpdatedPrediction(locker, m_inlineStackTop->m_profiledBlock);
+        bool makeSafe = profile->outOfBounds(locker);
+        return ArrayMode::fromObserved(locker, profile, action, makeSafe);
     }
     
     ArrayMode getArrayMode(ArrayProfile* profile)
@@ -842,67 +754,46 @@ private:
         return getArrayMode(profile, Array::Read);
     }
     
-    ArrayMode getArrayModeAndEmitChecks(ArrayProfile* profile, Array::Action action, Node* base)
-    {
-        profile->computeUpdatedPrediction(m_inlineStackTop->m_codeBlock);
-        
-#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
-        if (m_inlineStackTop->m_profiledBlock->numberOfRareCaseProfiles())
-            dataLogF("Slow case profile for bc#%u: %u\n", m_currentIndex, m_inlineStackTop->m_profiledBlock->rareCaseProfileForBytecodeOffset(m_currentIndex)->m_counter);
-        dataLogF("Array profile for bc#%u: %p%s%s, %u\n", m_currentIndex, profile->expectedStructure(), profile->structureIsPolymorphic() ? " (polymorphic)" : "", profile->mayInterceptIndexedAccesses() ? " (may intercept)" : "", profile->observedArrayModes());
-#endif
-        
-        bool makeSafe =
-            m_inlineStackTop->m_profiledBlock->likelyToTakeSlowCase(m_currentIndex)
-            || profile->outOfBounds();
-        
-        ArrayMode result = ArrayMode::fromObserved(profile, action, makeSafe);
-        
-        if (profile->hasDefiniteStructure()
-            && result.benefitsFromStructureCheck()
-            && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache))
-            addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(profile->expectedStructure())), base);
-        
-        return result;
-    }
-    
     Node* makeSafe(Node* node)
     {
-        bool likelyToTakeSlowCase;
-        if (!isX86() && node->op() == ArithMod)
-            likelyToTakeSlowCase = false;
-        else
-            likelyToTakeSlowCase = m_inlineStackTop->m_profiledBlock->likelyToTakeSlowCase(m_currentIndex);
+        if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow))
+            node->mergeFlags(NodeMayOverflowInDFG);
+        if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero))
+            node->mergeFlags(NodeMayNegZeroInDFG);
         
-        if (!likelyToTakeSlowCase
-            && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow)
-            && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero))
+        if (!isX86() && node->op() == ArithMod)
+            return node;
+
+        if (!m_inlineStackTop->m_profiledBlock->likelyToTakeSlowCase(m_currentIndex))
             return node;
         
         switch (node->op()) {
         case UInt32ToNumber:
         case ArithAdd:
         case ArithSub:
-        case ArithNegate:
         case ValueAdd:
         case ArithMod: // for ArithMod "MayOverflow" means we tried to divide by zero, or we saw double.
-            node->mergeFlags(NodeMayOverflow);
+            node->mergeFlags(NodeMayOverflowInBaseline);
             break;
             
+        case ArithNegate:
+            // Currently we can't tell the difference between a negation overflowing
+            // (i.e. -(1 << 31)) or generating negative zero (i.e. -0). If it took slow
+            // path then we assume that it did both of those things.
+            node->mergeFlags(NodeMayOverflowInBaseline);
+            node->mergeFlags(NodeMayNegZeroInBaseline);
+            break;
+
         case ArithMul:
+            // FIXME: We should detect cases where we only overflowed but never created
+            // negative zero.
+            // https://bugs.webkit.org/show_bug.cgi?id=132470
             if (m_inlineStackTop->m_profiledBlock->likelyToTakeDeepestSlowCase(m_currentIndex)
-                || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow)) {
-#if DFG_ENABLE(DEBUG_VERBOSE)
-                dataLogF("Making ArithMul @%u take deepest slow case.\n", node->index());
-#endif
-                node->mergeFlags(NodeMayOverflow | NodeMayNegZero);
-            } else if (m_inlineStackTop->m_profiledBlock->likelyToTakeSlowCase(m_currentIndex)
-                       || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero)) {
-#if DFG_ENABLE(DEBUG_VERBOSE)
-                dataLogF("Making ArithMul @%u take faster slow case.\n", node->index());
-#endif
-                node->mergeFlags(NodeMayNegZero);
-            }
+                || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow))
+                node->mergeFlags(NodeMayOverflowInBaseline | NodeMayNegZeroInBaseline);
+            else if (m_inlineStackTop->m_profiledBlock->likelyToTakeSlowCase(m_currentIndex)
+                || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero))
+                node->mergeFlags(NodeMayNegZeroInBaseline);
             break;
             
         default:
@@ -917,42 +808,34 @@ private:
     {
         ASSERT(node->op() == ArithDiv);
         
+        if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow))
+            node->mergeFlags(NodeMayOverflowInDFG);
+        if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero))
+            node->mergeFlags(NodeMayNegZeroInDFG);
+        
         // The main slow case counter for op_div in the old JIT counts only when
         // the operands are not numbers. We don't care about that since we already
         // have speculations in place that take care of that separately. We only
         // care about when the outcome of the division is not an integer, which
         // is what the special fast case counter tells us.
         
-        if (!m_inlineStackTop->m_profiledBlock->couldTakeSpecialFastCase(m_currentIndex)
-            && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow)
-            && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero))
+        if (!m_inlineStackTop->m_profiledBlock->couldTakeSpecialFastCase(m_currentIndex))
             return node;
         
-#if DFG_ENABLE(DEBUG_VERBOSE)
-        dataLogF("Making %s @%u safe at bc#%u because special fast-case counter is at %u and exit profiles say %d, %d\n", Graph::opName(node->op()), node->index(), m_currentIndex, m_inlineStackTop->m_profiledBlock->specialFastCaseProfileForBytecodeOffset(m_currentIndex)->m_counter, m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow), m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero));
-#endif
-        
-        // FIXME: It might be possible to make this more granular. The DFG certainly can
-        // distinguish between negative zero and overflow in its exit profiles.
-        node->mergeFlags(NodeMayOverflow | NodeMayNegZero);
+        // FIXME: It might be possible to make this more granular.
+        node->mergeFlags(NodeMayOverflowInBaseline | NodeMayNegZeroInBaseline);
         
         return node;
     }
     
-    bool structureChainIsStillValid(bool direct, Structure* previousStructure, StructureChain* chain)
+    void noticeArgumentsUse()
     {
-        if (direct)
-            return true;
+        // All of the arguments in this function need to be formatted as JSValues because we will
+        // load from them in a random-access fashion and we don't want to have to switch on
+        // format.
         
-        if (!previousStructure->storedPrototype().isNull() && previousStructure->storedPrototype().asCell()->structure() != chain->head()->get())
-            return false;
-        
-        for (WriteBarrier<Structure>* it = chain->head(); *it; ++it) {
-            if (!(*it)->storedPrototype().isNull() && (*it)->storedPrototype().asCell()->structure() != it[1].get())
-                return false;
-        }
-        
-        return true;
+        for (ArgumentPosition* argument : m_inlineStackTop->m_argumentPositions)
+            argument->mergeShouldNeverUnbox(true);
     }
     
     void buildOperandMapsIfNecessary();
@@ -966,52 +849,24 @@ private:
     BasicBlock* m_currentBlock;
     // The bytecode index of the current instruction being generated.
     unsigned m_currentIndex;
-    // The bytecode index of the value profile of the current instruction being generated.
-    unsigned m_currentProfilingIndex;
-
-    // We use these values during code generation, and to avoid the need for
-    // special handling we make sure they are available as constants in the
-    // CodeBlock's constant pool. These variables are initialized to
-    // UINT_MAX, and lazily updated to hold an index into the CodeBlock's
-    // constant pool, as necessary.
-    unsigned m_constantUndefined;
-    unsigned m_constantNull;
-    unsigned m_constantNaN;
-    unsigned m_constant1;
-    HashMap<JSCell*, unsigned> m_cellConstants;
-    HashMap<JSCell*, Node*> m_cellConstantNodes;
-
-    // A constant in the constant pool may be represented by more than one
-    // node in the graph, depending on the context in which it is being used.
-    struct ConstantRecord {
-        ConstantRecord()
-            : asInt32(0)
-            , asNumeric(0)
-            , asJSValue(0)
-        {
-        }
-
-        Node* asInt32;
-        Node* asNumeric;
-        Node* asJSValue;
-    };
+    // The semantic origin of the current node if different from the current Index.
+    CodeOrigin m_currentSemanticOrigin;
 
-    // Track the index of the node whose result is the current value for every
-    // register value in the bytecode - argument, local, and temporary.
-    Vector<ConstantRecord, 16> m_constants;
+    FrozenValue* m_constantUndefined;
+    FrozenValue* m_constantNull;
+    FrozenValue* m_constantNaN;
+    FrozenValue* m_constantOne;
+    Vector<Node*, 16> m_constants;
 
     // The number of arguments passed to the function.
     unsigned m_numArguments;
     // The number of locals (vars + temporaries) used in the function.
     unsigned m_numLocals;
-    // The set of registers we need to preserve across BasicBlock boundaries;
-    // typically equal to the set of vars, but we expand this to cover all
-    // temporaries that persist across blocks (dues to ?:, &&, ||, etc).
-    BitVector m_preservedVars;
     // The number of slots (in units of sizeof(Register)) that we need to
-    // preallocate for calls emanating from this frame. This includes the
-    // size of the CallFrame, only if this is not a leaf function.  (I.e.
-    // this is 0 if and only if this function is a leaf.)
+    // preallocate for arguments to outgoing calls from this frame. This
+    // number includes the CallFrame slots that we initialize for the callee
+    // (but not the callee-initialized CallerFrame and ReturnPC slots).
+    // This number is 0 if and only if this function is a leaf.
     unsigned m_parameterSlots;
     // The number of var args passed to the next var arg node.
     unsigned m_numPassedVarArgs;
@@ -1034,8 +889,8 @@ private:
         // (the machine code block, which is the transitive, though not necessarily
         // direct, caller).
         Vector<unsigned> m_identifierRemap;
-        Vector<unsigned> m_constantRemap;
         Vector<unsigned> m_constantBufferRemap;
+        Vector<unsigned> m_switchRemap;
         
         // Blocks introduced by this code block, which need successor linking.
         // May include up to one basic block that includes the continuation after
@@ -1045,15 +900,14 @@ private:
         Vector<UnlinkedBlock> m_unlinkedBlocks;
         
         // Potential block linking targets. Must be sorted by bytecodeBegin, and
-        // cannot have two blocks that have the same bytecodeBegin. For this very
-        // reason, this is not equivalent to 
-        Vector<BlockIndex> m_blockLinkingTargets;
+        // cannot have two blocks that have the same bytecodeBegin.
+        Vector<BasicBlock*> m_blockLinkingTargets;
         
         // If the callsite's basic block was split into two, then this will be
         // the head of the callsite block. It needs its successors linked to the
         // m_unlinkedBlocks, but not the other way around: there's no way for
         // any blocks in m_unlinkedBlocks to jump back into this block.
-        BlockIndex m_callsiteBlockHead;
+        BasicBlock* m_callsiteBlockHead;
         
         // Does the callsite block head need linking? This is typically true
         // but will be false for the machine code block's inline stack entry
@@ -1068,6 +922,9 @@ private:
         // code block had gathered.
         LazyOperandValueProfileParser m_lazyOperands;
         
+        CallLinkInfoMap m_callLinkInfos;
+        StubInfoMap m_stubInfos;
+        
         // Did we see any returns? We need to handle the (uncommon but necessary)
         // case where a procedure that does not return was inlined.
         bool m_didReturn;
@@ -1084,49 +941,66 @@ private:
             ByteCodeParser*,
             CodeBlock*,
             CodeBlock* profiledBlock,
-            BlockIndex callsiteBlockHead,
+            BasicBlock* callsiteBlockHead,
             JSFunction* callee, // Null if this is a closure call.
             VirtualRegister returnValueVR,
             VirtualRegister inlineCallFrameStart,
             int argumentCountIncludingThis,
-            CodeSpecializationKind);
+            InlineCallFrame::Kind);
         
         ~InlineStackEntry()
         {
             m_byteCodeParser->m_inlineStackTop = m_caller;
         }
         
-        int remapOperand(int operand) const
+        VirtualRegister remapOperand(VirtualRegister operand) const
         {
             if (!m_inlineCallFrame)
                 return operand;
             
-            if (operand >= FirstConstantRegisterIndex) {
-                int result = m_constantRemap[operand - FirstConstantRegisterIndex];
-                ASSERT(result >= FirstConstantRegisterIndex);
-                return result;
-            }
-
-            ASSERT(operand != JSStack::Callee);
+            ASSERT(!operand.isConstant());
 
-            return operand + m_inlineCallFrame->stackOffset;
+            return VirtualRegister(operand.offset() + m_inlineCallFrame->stackOffset);
         }
     };
     
     InlineStackEntry* m_inlineStackTop;
+    
+    struct DelayedSetLocal {
+        CodeOrigin m_origin;
+        VirtualRegister m_operand;
+        Node* m_value;
+        
+        DelayedSetLocal() { }
+        DelayedSetLocal(const CodeOrigin& origin, VirtualRegister operand, Node* value)
+            : m_origin(origin)
+            , m_operand(operand)
+            , m_value(value)
+        {
+        }
+        
+        Node* execute(ByteCodeParser* parser, SetMode setMode = NormalSet)
+        {
+            if (m_operand.isArgument())
+                return parser->setArgument(m_origin, m_operand, m_value, setMode);
+            return parser->setLocal(m_origin, m_operand, m_value, setMode);
+        }
+    };
+    
+    Vector<DelayedSetLocal, 2> m_setLocalQueue;
 
     // Have we built operand maps? We initialize them lazily, and only when doing
     // inlining.
     bool m_haveBuiltOperandMaps;
     // Mapping between identifier names and numbers.
-    IdentifierMap m_identifierMap;
-    // Mapping between values and constant numbers.
-    JSValueMap m_jsValueMap;
-    // Index of the empty value, or UINT_MAX if there is no mapping. This is a horrible
-    // work-around for the fact that JSValueMap can't handle "empty" values.
-    unsigned m_emptyJSValueIndex;
+    BorrowedIdentifierMap m_identifierMap;
+    
+    CodeBlock* m_dfgCodeBlock;
+    CallLinkStatus::ContextMap m_callContextMap;
+    StubInfoMap m_dfgStubInfos;
     
     Instruction* m_currentInstruction;
+    bool m_hasDebuggerEnabled;
 };
 
 #define NEXT_OPCODE(name) \
@@ -1137,328 +1011,831 @@ private:
     m_currentIndex += OPCODE_LENGTH(name); \
     return shouldContinueParsing
 
-
-void ByteCodeParser::handleCall(Interpreter* interpreter, Instruction* currentInstruction, NodeType op, CodeSpecializationKind kind)
+void ByteCodeParser::handleCall(Instruction* pc, NodeType op, CodeSpecializationKind kind)
 {
     ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_construct));
+    handleCall(
+        pc[1].u.operand, op, kind, OPCODE_LENGTH(op_call),
+        pc[2].u.operand, pc[3].u.operand, -pc[4].u.operand);
+}
+
+void ByteCodeParser::handleCall(
+    int result, NodeType op, CodeSpecializationKind kind, unsigned instructionSize,
+    int callee, int argumentCountIncludingThis, int registerOffset)
+{
+    Node* callTarget = get(VirtualRegister(callee));
     
-    Node* callTarget = get(currentInstruction[1].u.operand);
+    CallLinkStatus callLinkStatus = CallLinkStatus::computeFor(
+        m_inlineStackTop->m_profiledBlock, currentCodeOrigin(),
+        m_inlineStackTop->m_callLinkInfos, m_callContextMap);
+    
+    handleCall(
+        result, op, InlineCallFrame::kindFor(kind), instructionSize, callTarget,
+        argumentCountIncludingThis, registerOffset, callLinkStatus);
+}
     
-    CallLinkStatus callLinkStatus;
+void ByteCodeParser::handleCall(
+    int result, NodeType op, InlineCallFrame::Kind kind, unsigned instructionSize,
+    Node* callTarget, int argumentCountIncludingThis, int registerOffset,
+    CallLinkStatus callLinkStatus)
+{
+    handleCall(
+        result, op, kind, instructionSize, callTarget, argumentCountIncludingThis,
+        registerOffset, callLinkStatus, getPrediction());
+}
 
-    if (m_graph.isConstant(callTarget))
-        callLinkStatus = CallLinkStatus(m_graph.valueOfJSConstant(callTarget)).setIsProved(true);
-    else {
-        callLinkStatus = CallLinkStatus::computeFor(m_inlineStackTop->m_profiledBlock, m_currentIndex);
-        callLinkStatus.setHasBadFunctionExitSite(m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadFunction));
-        callLinkStatus.setHasBadCacheExitSite(m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache));
-        callLinkStatus.setHasBadExecutableExitSite(m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadExecutable));
-    }
+void ByteCodeParser::handleCall(
+    int result, NodeType op, InlineCallFrame::Kind kind, unsigned instructionSize,
+    Node* callTarget, int argumentCountIncludingThis, int registerOffset,
+    CallLinkStatus callLinkStatus, SpeculatedType prediction)
+{
+    ASSERT(registerOffset <= 0);
     
-#if DFG_ENABLE(DEBUG_VERBOSE)
-    dataLog("For call at bc#", m_currentIndex, ": ", callLinkStatus, "\n");
-#endif
+    if (callTarget->isCellConstant())
+        callLinkStatus.setProvenConstantCallee(CallVariant(callTarget->asCell()));
+    
+    if (Options::verboseDFGByteCodeParsing())
+        dataLog("    Handling call at ", currentCodeOrigin(), ": ", callLinkStatus, "\n");
     
     if (!callLinkStatus.canOptimize()) {
         // Oddly, this conflates calls that haven't executed with calls that behaved sufficiently polymorphically
         // that we cannot optimize them.
         
-        addCall(interpreter, currentInstruction, op);
+        addCall(result, op, OpInfo(), callTarget, argumentCountIncludingThis, registerOffset, prediction);
         return;
     }
     
-    int argumentCountIncludingThis = currentInstruction[2].u.operand;
-    int registerOffset = currentInstruction[3].u.operand;
-
-    // Do we have a result?
-    bool usesResult = false;
-    int resultOperand = 0; // make compiler happy
-    unsigned nextOffset = m_currentIndex + OPCODE_LENGTH(op_call);
-    Instruction* putInstruction = currentInstruction + OPCODE_LENGTH(op_call);
-    SpeculatedType prediction = SpecNone;
-    if (interpreter->getOpcodeID(putInstruction->u.opcode) == op_call_put_result) {
-        resultOperand = putInstruction[1].u.operand;
-        usesResult = true;
-        m_currentProfilingIndex = nextOffset;
-        prediction = getPrediction();
-        nextOffset += OPCODE_LENGTH(op_call_put_result);
-    }
-
-    if (InternalFunction* function = callLinkStatus.internalFunction()) {
-        if (handleConstantInternalFunction(usesResult, resultOperand, function, registerOffset, argumentCountIncludingThis, prediction, kind)) {
-            // This phantoming has to be *after* the code for the intrinsic, to signify that
-            // the inputs must be kept alive whatever exits the intrinsic may do.
-            addToGraph(Phantom, callTarget);
-            emitArgumentPhantoms(registerOffset, argumentCountIncludingThis, kind);
-            return;
-        }
-        
-        // Can only handle this using the generic call handler.
-        addCall(interpreter, currentInstruction, op);
+    unsigned nextOffset = m_currentIndex + instructionSize;
+    
+    OpInfo callOpInfo;
+    
+    if (handleInlining(callTarget, result, callLinkStatus, registerOffset, virtualRegisterForArgument(0, registerOffset), VirtualRegister(), 0, argumentCountIncludingThis, nextOffset, op, kind, prediction)) {
+        if (m_graph.compilation())
+            m_graph.compilation()->noticeInlinedCall();
         return;
     }
-        
-    Intrinsic intrinsic = callLinkStatus.intrinsicFor(kind);
-    if (intrinsic != NoIntrinsic) {
-        emitFunctionChecks(callLinkStatus, callTarget, registerOffset, kind);
-            
-        if (handleIntrinsic(usesResult, resultOperand, intrinsic, registerOffset, argumentCountIncludingThis, prediction)) {
-            // This phantoming has to be *after* the code for the intrinsic, to signify that
-            // the inputs must be kept alive whatever exits the intrinsic may do.
-            addToGraph(Phantom, callTarget);
-            emitArgumentPhantoms(registerOffset, argumentCountIncludingThis, kind);
-            if (m_graph.m_compilation)
-                m_graph.m_compilation->noticeInlinedCall();
-            return;
+    
+#if ENABLE(FTL_NATIVE_CALL_INLINING)
+    if (isFTL(m_graph.m_plan.mode) && Options::optimizeNativeCalls() && callLinkStatus.size() == 1 && !callLinkStatus.couldTakeSlowPath()) {
+        CallVariant callee = callLinkStatus[0];
+        JSFunction* function = callee.function();
+        CodeSpecializationKind specializationKind = InlineCallFrame::specializationKindFor(kind);
+        if (function && function->isHostFunction()) {
+            emitFunctionChecks(callee, callTarget, virtualRegisterForArgument(0, registerOffset));
+            callOpInfo = OpInfo(m_graph.freeze(function));
+
+            if (op == Call)
+                op = NativeCall;
+            else {
+                ASSERT(op == Construct);
+                op = NativeConstruct;
+            }
         }
-    } else if (handleInlining(usesResult, callTarget, resultOperand, callLinkStatus, registerOffset, argumentCountIncludingThis, nextOffset, kind)) {
-        if (m_graph.m_compilation)
-            m_graph.m_compilation->noticeInlinedCall();
-        return;
     }
+#endif
     
-    addCall(interpreter, currentInstruction, op);
+    addCall(result, op, callOpInfo, callTarget, argumentCountIncludingThis, registerOffset, prediction);
 }
 
-void ByteCodeParser::emitFunctionChecks(const CallLinkStatus& callLinkStatus, Node* callTarget, int registerOffset, CodeSpecializationKind kind)
+void ByteCodeParser::handleVarargsCall(Instruction* pc, NodeType op, CodeSpecializationKind kind)
 {
-    Node* thisArgument;
-    if (kind == CodeForCall)
-        thisArgument = get(registerOffset + argumentToOperand(0));
-    else
-        thisArgument = 0;
-
-    if (callLinkStatus.isProved()) {
-        addToGraph(Phantom, callTarget, thisArgument);
+    ASSERT(OPCODE_LENGTH(op_call_varargs) == OPCODE_LENGTH(op_construct_varargs));
+    
+    int result = pc[1].u.operand;
+    int callee = pc[2].u.operand;
+    int thisReg = pc[3].u.operand;
+    int arguments = pc[4].u.operand;
+    int firstFreeReg = pc[5].u.operand;
+    int firstVarArgOffset = pc[6].u.operand;
+    
+    SpeculatedType prediction = getPrediction();
+    
+    Node* callTarget = get(VirtualRegister(callee));
+    
+    CallLinkStatus callLinkStatus = CallLinkStatus::computeFor(
+        m_inlineStackTop->m_profiledBlock, currentCodeOrigin(),
+        m_inlineStackTop->m_callLinkInfos, m_callContextMap);
+    if (callTarget->isCellConstant())
+        callLinkStatus.setProvenConstantCallee(CallVariant(callTarget->asCell()));
+    
+    if (Options::verboseDFGByteCodeParsing())
+        dataLog("    Varargs call link status at ", currentCodeOrigin(), ": ", callLinkStatus, "\n");
+    
+    if (callLinkStatus.canOptimize()
+        && handleInlining(callTarget, result, callLinkStatus, firstFreeReg, VirtualRegister(thisReg), VirtualRegister(arguments), firstVarArgOffset, 0, m_currentIndex + OPCODE_LENGTH(op_call_varargs), op, InlineCallFrame::varargsKindFor(kind), prediction)) {
+        if (m_graph.compilation())
+            m_graph.compilation()->noticeInlinedCall();
         return;
     }
     
-    ASSERT(callLinkStatus.canOptimize());
+    CallVarargsData* data = m_graph.m_callVarargsData.add();
+    data->firstVarArgOffset = firstVarArgOffset;
     
-    if (JSFunction* function = callLinkStatus.function())
-        addToGraph(CheckFunction, OpInfo(function), callTarget, thisArgument);
-    else {
-        ASSERT(callLinkStatus.structure());
-        ASSERT(callLinkStatus.executable());
-        
-        addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(callLinkStatus.structure())), callTarget);
-        addToGraph(CheckExecutable, OpInfo(callLinkStatus.executable()), callTarget, thisArgument);
-    }
+    Node* thisChild = get(VirtualRegister(thisReg));
+    
+    Node* call = addToGraph(op, OpInfo(data), OpInfo(prediction), callTarget, get(VirtualRegister(arguments)), thisChild);
+    VirtualRegister resultReg(result);
+    if (resultReg.isValid())
+        set(resultReg, call);
 }
 
-void ByteCodeParser::emitArgumentPhantoms(int registerOffset, int argumentCountIncludingThis, CodeSpecializationKind kind)
+void ByteCodeParser::emitFunctionChecks(CallVariant callee, Node* callTarget, VirtualRegister thisArgumentReg)
 {
-    for (int i = kind == CodeForCall ? 0 : 1; i < argumentCountIncludingThis; ++i)
-        addToGraph(Phantom, get(registerOffset + argumentToOperand(i)));
-}
-
-bool ByteCodeParser::handleInlining(bool usesResult, Node* callTargetNode, int resultOperand, const CallLinkStatus& callLinkStatus, int registerOffset, int argumentCountIncludingThis, unsigned nextOffset, CodeSpecializationKind kind)
+    Node* thisArgument;
+    if (thisArgumentReg.isValid())
+        thisArgument = get(thisArgumentReg);
+    else
+        thisArgument = 0;
+
+    JSCell* calleeCell;
+    Node* callTargetForCheck;
+    if (callee.isClosureCall()) {
+        calleeCell = callee.executable();
+        callTargetForCheck = addToGraph(GetExecutable, callTarget);
+    } else {
+        calleeCell = callee.nonExecutableCallee();
+        callTargetForCheck = callTarget;
+    }
+    
+    ASSERT(calleeCell);
+    addToGraph(CheckCell, OpInfo(m_graph.freeze(calleeCell)), callTargetForCheck, thisArgument);
+}
+
+void ByteCodeParser::emitArgumentPhantoms(int registerOffset, int argumentCountIncludingThis)
 {
-    // First, the really simple checks: do we have an actual JS function?
-    if (!callLinkStatus.executable())
-        return false;
-    if (callLinkStatus.executable()->isHostFunction())
-        return false;
+    for (int i = 0; i < argumentCountIncludingThis; ++i)
+        addToGraph(Phantom, get(virtualRegisterForArgument(i, registerOffset)));
+}
+
+unsigned ByteCodeParser::inliningCost(CallVariant callee, int argumentCountIncludingThis, CodeSpecializationKind kind)
+{
+    if (verbose)
+        dataLog("Considering inlining ", callee, " into ", currentCodeOrigin(), "\n");
     
-    FunctionExecutable* executable = jsCast<FunctionExecutable*>(callLinkStatus.executable());
+    if (m_hasDebuggerEnabled) {
+        if (verbose)
+            dataLog("    Failing because the debugger is in use.\n");
+        return UINT_MAX;
+    }
+
+    FunctionExecutable* executable = callee.functionExecutable();
+    if (!executable) {
+        if (verbose)
+            dataLog("    Failing because there is no function executable.\n");
+        return UINT_MAX;
+    }
     
     // Does the number of arguments we're passing match the arity of the target? We currently
     // inline only if the number of arguments passed is greater than or equal to the number
     // arguments expected.
-    if (static_cast<int>(executable->parameterCount()) + 1 > argumentCountIncludingThis)
-        return false;
+    if (static_cast<int>(executable->parameterCount()) + 1 > argumentCountIncludingThis) {
+        if (verbose)
+            dataLog("    Failing because of arity mismatch.\n");
+        return UINT_MAX;
+    }
+    
+    // Do we have a code block, and does the code block's size match the heuristics/requirements for
+    // being an inline candidate? We might not have a code block (1) if code was thrown away,
+    // (2) if we simply hadn't actually made this call yet or (3) code is a builtin function and
+    // specialization kind is construct. In the former 2 cases, we could still theoretically attempt
+    // to inline it if we had a static proof of what was being called; this might happen for example
+    // if you call a global function, where watchpointing gives us static information. Overall,
+    // it's a rare case because we expect that any hot callees would have already been compiled.
+    CodeBlock* codeBlock = executable->baselineCodeBlockFor(kind);
+    if (!codeBlock) {
+        if (verbose)
+            dataLog("    Failing because no code block available.\n");
+        return UINT_MAX;
+    }
+    CapabilityLevel capabilityLevel = inlineFunctionForCapabilityLevel(
+        codeBlock, kind, callee.isClosureCall());
+    if (verbose) {
+        dataLog("    Kind: ", kind, "\n");
+        dataLog("    Is closure call: ", callee.isClosureCall(), "\n");
+        dataLog("    Capability level: ", capabilityLevel, "\n");
+        dataLog("    Might inline function: ", mightInlineFunctionFor(codeBlock, kind), "\n");
+        dataLog("    Might compile function: ", mightCompileFunctionFor(codeBlock, kind), "\n");
+        dataLog("    Is supported for inlining: ", isSupportedForInlining(codeBlock), "\n");
+        dataLog("    Needs activation: ", codeBlock->ownerExecutable()->needsActivation(), "\n");
+        dataLog("    Is inlining candidate: ", codeBlock->ownerExecutable()->isInliningCandidate(), "\n");
+    }
+    if (!canInline(capabilityLevel)) {
+        if (verbose)
+            dataLog("    Failing because the function is not inlineable.\n");
+        return UINT_MAX;
+    }
+    
+    // Check if the caller is already too large. We do this check here because that's just
+    // where we happen to also have the callee's code block, and we want that for the
+    // purpose of unsetting SABI.
+    if (!isSmallEnoughToInlineCodeInto(m_codeBlock)) {
+        codeBlock->m_shouldAlwaysBeInlined = false;
+        if (verbose)
+            dataLog("    Failing because the caller is too large.\n");
+        return UINT_MAX;
+    }
+    
+    // FIXME: this should be better at predicting how much bloat we will introduce by inlining
+    // this function.
+    // https://bugs.webkit.org/show_bug.cgi?id=127627
+    
+    // FIXME: We currently inline functions that have run in LLInt but not in Baseline. These
+    // functions have very low fidelity profiling, and presumably they weren't very hot if they
+    // haven't gotten to Baseline yet. Consider not inlining these functions.
+    // https://bugs.webkit.org/show_bug.cgi?id=145503
+    
+    // Have we exceeded inline stack depth, or are we trying to inline a recursive call to
+    // too many levels? If either of these are detected, then don't inline. We adjust our
+    // heuristics if we are dealing with a function that cannot otherwise be compiled.
     
-    // Have we exceeded inline stack depth, or are we trying to inline a recursive call?
-    // If either of these are detected, then don't inline.
     unsigned depth = 0;
+    unsigned recursion = 0;
+    
     for (InlineStackEntry* entry = m_inlineStackTop; entry; entry = entry->m_caller) {
         ++depth;
-        if (depth >= Options::maximumInliningDepth())
-            return false; // Depth exceeded.
+        if (depth >= Options::maximumInliningDepth()) {
+            if (verbose)
+                dataLog("    Failing because depth exceeded.\n");
+            return UINT_MAX;
+        }
         
-        if (entry->executable() == executable)
-            return false; // Recursion detected.
+        if (entry->executable() == executable) {
+            ++recursion;
+            if (recursion >= Options::maximumInliningRecursion()) {
+                if (verbose)
+                    dataLog("    Failing because recursion detected.\n");
+                return UINT_MAX;
+            }
+        }
     }
     
-    // Do we have a code block, and does the code block's size match the heuristics/requirements for
-    // being an inline candidate? We might not have a code block if code was thrown away or if we
-    // simply hadn't actually made this call yet. We could still theoretically attempt to inline it
-    // if we had a static proof of what was being called; this might happen for example if you call a
-    // global function, where watchpointing gives us static information. Overall, it's a rare case
-    // because we expect that any hot callees would have already been compiled.
-    CodeBlock* codeBlock = executable->baselineCodeBlockFor(kind);
-    if (!codeBlock)
-        return false;
-    if (!canInlineFunctionFor(codeBlock, kind, callLinkStatus.isClosureCall()))
-        return false;
+    if (verbose)
+        dataLog("    Inlining should be possible.\n");
     
-#if DFG_ENABLE(DEBUG_VERBOSE)
-    dataLogF("Inlining executable %p.\n", executable);
-#endif
+    // It might be possible to inline.
+    return codeBlock->instructionCount();
+}
+
+template<typename ChecksFunctor>
+void ByteCodeParser::inlineCall(Node* callTargetNode, int resultOperand, CallVariant callee, int registerOffset, int argumentCountIncludingThis, unsigned nextOffset, InlineCallFrame::Kind kind, CallerLinkability callerLinkability, const ChecksFunctor& insertChecks)
+{
+    CodeSpecializationKind specializationKind = InlineCallFrame::specializationKindFor(kind);
     
-    // Now we know without a doubt that we are committed to inlining. So begin the process
-    // by checking the callee (if necessary) and making sure that arguments and the callee
-    // are flushed.
-    emitFunctionChecks(callLinkStatus, callTargetNode, registerOffset, kind);
+    ASSERT(inliningCost(callee, argumentCountIncludingThis, specializationKind) != UINT_MAX);
     
+    CodeBlock* codeBlock = callee.functionExecutable()->baselineCodeBlockFor(specializationKind);
+    insertChecks(codeBlock);
+
     // FIXME: Don't flush constants!
     
-    int inlineCallFrameStart = m_inlineStackTop->remapOperand(registerOffset) - JSStack::CallFrameHeaderSize;
+    int inlineCallFrameStart = m_inlineStackTop->remapOperand(VirtualRegister(registerOffset)).offset() + JSStack::CallFrameHeaderSize;
     
-    // Make sure that the area used by the call frame is reserved.
-    for (int arg = inlineCallFrameStart + JSStack::CallFrameHeaderSize + codeBlock->m_numVars; arg-- > inlineCallFrameStart;)
-        m_preservedVars.set(arg);
-    
-    // Make sure that we have enough locals.
-    unsigned newNumLocals = inlineCallFrameStart + JSStack::CallFrameHeaderSize + codeBlock->m_numCalleeRegisters;
-    if (newNumLocals > m_numLocals) {
-        m_numLocals = newNumLocals;
-        for (size_t i = 0; i < m_graph.m_blocks.size(); ++i)
-            m_graph.m_blocks[i]->ensureLocals(newNumLocals);
-    }
+    ensureLocals(
+        VirtualRegister(inlineCallFrameStart).toLocal() + 1 +
+        JSStack::CallFrameHeaderSize + codeBlock->m_numCalleeRegisters);
     
     size_t argumentPositionStart = m_graph.m_argumentPositions.size();
 
+    VirtualRegister resultReg(resultOperand);
+    if (resultReg.isValid())
+        resultReg = m_inlineStackTop->remapOperand(resultReg);
+    
     InlineStackEntry inlineStackEntry(
-        this, codeBlock, codeBlock, m_graph.m_blocks.size() - 1,
-        callLinkStatus.function(), (VirtualRegister)m_inlineStackTop->remapOperand(
-            usesResult ? resultOperand : InvalidVirtualRegister),
+        this, codeBlock, codeBlock, m_graph.lastBlock(), callee.function(), resultReg,
         (VirtualRegister)inlineCallFrameStart, argumentCountIncludingThis, kind);
     
     // This is where the actual inlining really happens.
     unsigned oldIndex = m_currentIndex;
-    unsigned oldProfilingIndex = m_currentProfilingIndex;
     m_currentIndex = 0;
-    m_currentProfilingIndex = 0;
 
-    addToGraph(InlineStart, OpInfo(argumentPositionStart));
-    if (callLinkStatus.isClosureCall()) {
-        addToGraph(SetCallee, callTargetNode);
-        addToGraph(SetMyScope, addToGraph(GetScope, callTargetNode));
+    InlineVariableData inlineVariableData;
+    inlineVariableData.inlineCallFrame = m_inlineStackTop->m_inlineCallFrame;
+    inlineVariableData.argumentPositionStart = argumentPositionStart;
+    inlineVariableData.calleeVariable = 0;
+    
+    RELEASE_ASSERT(
+        m_inlineStackTop->m_inlineCallFrame->isClosureCall
+        == callee.isClosureCall());
+    if (callee.isClosureCall()) {
+        VariableAccessData* calleeVariable =
+            set(VirtualRegister(JSStack::Callee), callTargetNode, ImmediateNakedSet)->variableAccessData();
+        
+        calleeVariable->mergeShouldNeverUnbox(true);
+        
+        inlineVariableData.calleeVariable = calleeVariable;
     }
     
+    m_graph.m_inlineVariableData.append(inlineVariableData);
+    
     parseCodeBlock();
+    clearCaches(); // Reset our state now that we're back to the outer code.
     
     m_currentIndex = oldIndex;
-    m_currentProfilingIndex = oldProfilingIndex;
     
     // If the inlined code created some new basic blocks, then we have linking to do.
-    if (inlineStackEntry.m_callsiteBlockHead != m_graph.m_blocks.size() - 1) {
+    if (inlineStackEntry.m_callsiteBlockHead != m_graph.lastBlock()) {
         
         ASSERT(!inlineStackEntry.m_unlinkedBlocks.isEmpty());
         if (inlineStackEntry.m_callsiteBlockHeadNeedsLinking)
-            linkBlock(m_graph.m_blocks[inlineStackEntry.m_callsiteBlockHead].get(), inlineStackEntry.m_blockLinkingTargets);
+            linkBlock(inlineStackEntry.m_callsiteBlockHead, inlineStackEntry.m_blockLinkingTargets);
         else
-            ASSERT(m_graph.m_blocks[inlineStackEntry.m_callsiteBlockHead]->isLinked);
-        
-        // It's possible that the callsite block head is not owned by the caller.
-        if (!inlineStackEntry.m_caller->m_unlinkedBlocks.isEmpty()) {
-            // It's definitely owned by the caller, because the caller created new blocks.
-            // Assert that this all adds up.
-            ASSERT(inlineStackEntry.m_caller->m_unlinkedBlocks.last().m_blockIndex == inlineStackEntry.m_callsiteBlockHead);
-            ASSERT(inlineStackEntry.m_caller->m_unlinkedBlocks.last().m_needsNormalLinking);
-            inlineStackEntry.m_caller->m_unlinkedBlocks.last().m_needsNormalLinking = false;
-        } else {
-            // It's definitely not owned by the caller. Tell the caller that he does not
-            // need to link his callsite block head, because we did it for him.
-            ASSERT(inlineStackEntry.m_caller->m_callsiteBlockHeadNeedsLinking);
-            ASSERT(inlineStackEntry.m_caller->m_callsiteBlockHead == inlineStackEntry.m_callsiteBlockHead);
-            inlineStackEntry.m_caller->m_callsiteBlockHeadNeedsLinking = false;
-        }
+            ASSERT(inlineStackEntry.m_callsiteBlockHead->isLinked);
+        
+        if (callerLinkability == CallerDoesNormalLinking)
+            cancelLinkingForBlock(inlineStackEntry.m_caller, inlineStackEntry.m_callsiteBlockHead);
         
         linkBlocks(inlineStackEntry.m_unlinkedBlocks, inlineStackEntry.m_blockLinkingTargets);
     } else
         ASSERT(inlineStackEntry.m_unlinkedBlocks.isEmpty());
     
-    BasicBlock* lastBlock = m_graph.m_blocks.last().get();
+    BasicBlock* lastBlock = m_graph.lastBlock();
     // If there was a return, but no early returns, then we're done. We allow parsing of
     // the caller to continue in whatever basic block we're in right now.
     if (!inlineStackEntry.m_didEarlyReturn && inlineStackEntry.m_didReturn) {
-        ASSERT(lastBlock->isEmpty() || !lastBlock->last()->isTerminal());
+        if (Options::verboseDFGByteCodeParsing())
+            dataLog("    Allowing parsing to continue in last inlined block.\n");
+        
+        ASSERT(lastBlock->isEmpty() || !lastBlock->terminal());
         
         // If we created new blocks then the last block needs linking, but in the
         // caller. It doesn't need to be linked to, but it needs outgoing links.
         if (!inlineStackEntry.m_unlinkedBlocks.isEmpty()) {
-#if DFG_ENABLE(DEBUG_VERBOSE)
-            dataLogF("Reascribing bytecode index of block %p from bc#%u to bc#%u (inline return case).\n", lastBlock, lastBlock->bytecodeBegin, m_currentIndex);
-#endif
             // For debugging purposes, set the bytecodeBegin. Note that this doesn't matter
             // for release builds because this block will never serve as a potential target
             // in the linker's binary search.
+            if (Options::verboseDFGByteCodeParsing())
+                dataLog("        Repurposing last block from ", lastBlock->bytecodeBegin, " to ", m_currentIndex, "\n");
             lastBlock->bytecodeBegin = m_currentIndex;
-            m_inlineStackTop->m_caller->m_unlinkedBlocks.append(UnlinkedBlock(m_graph.m_blocks.size() - 1));
+            if (callerLinkability == CallerDoesNormalLinking) {
+                if (verbose)
+                    dataLog("Adding unlinked block ", RawPointer(m_graph.lastBlock()), " (one return)\n");
+                m_inlineStackTop->m_caller->m_unlinkedBlocks.append(UnlinkedBlock(m_graph.lastBlock()));
+            }
         }
         
-        m_currentBlock = m_graph.m_blocks.last().get();
-
-#if DFG_ENABLE(DEBUG_VERBOSE)
-        dataLogF("Done inlining executable %p, continuing code generation at epilogue.\n", executable);
-#endif
-        return true;
+        m_currentBlock = m_graph.lastBlock();
+        return;
     }
     
+    if (Options::verboseDFGByteCodeParsing())
+        dataLog("    Creating new block after inlining.\n");
+
     // If we get to this point then all blocks must end in some sort of terminals.
-    ASSERT(lastBlock->last()->isTerminal());
-    
+    ASSERT(lastBlock->terminal());
+
+    // Need to create a new basic block for the continuation at the caller.
+    RefPtr<BasicBlock> block = adoptRef(new BasicBlock(nextOffset, m_numArguments, m_numLocals, PNaN));
+
     // Link the early returns to the basic block we're about to create.
     for (size_t i = 0; i < inlineStackEntry.m_unlinkedBlocks.size(); ++i) {
         if (!inlineStackEntry.m_unlinkedBlocks[i].m_needsEarlyReturnLinking)
             continue;
-        BasicBlock* block = m_graph.m_blocks[inlineStackEntry.m_unlinkedBlocks[i].m_blockIndex].get();
-        ASSERT(!block->isLinked);
-        Node* node = block->last();
+        BasicBlock* blockToLink = inlineStackEntry.m_unlinkedBlocks[i].m_block;
+        ASSERT(!blockToLink->isLinked);
+        Node* node = blockToLink->terminal();
         ASSERT(node->op() == Jump);
-        ASSERT(node->takenBlockIndex() == NoBlock);
-        node->setTakenBlockIndex(m_graph.m_blocks.size());
+        ASSERT(!node->targetBlock());
+        node->targetBlock() = block.get();
         inlineStackEntry.m_unlinkedBlocks[i].m_needsEarlyReturnLinking = false;
-#if !ASSERT_DISABLED
-        block->isLinked = true;
-#endif
+        if (verbose)
+            dataLog("Marking ", RawPointer(blockToLink), " as linked (jumps to return)\n");
+        blockToLink->didLink();
     }
     
-    // Need to create a new basic block for the continuation at the caller.
-    OwnPtr<BasicBlock> block = adoptPtr(new BasicBlock(nextOffset, m_numArguments, m_numLocals));
-#if DFG_ENABLE(DEBUG_VERBOSE)
-    dataLogF("Creating inline epilogue basic block %p, #%zu for %p bc#%u at inline depth %u.\n", block.get(), m_graph.m_blocks.size(), m_inlineStackTop->executable(), m_currentIndex, CodeOrigin::inlineDepthForCallFrame(inlineCallFrame()));
-#endif
     m_currentBlock = block.get();
-    ASSERT(m_inlineStackTop->m_caller->m_blockLinkingTargets.isEmpty() || m_graph.m_blocks[m_inlineStackTop->m_caller->m_blockLinkingTargets.last()]->bytecodeBegin < nextOffset);
-    m_inlineStackTop->m_caller->m_unlinkedBlocks.append(UnlinkedBlock(m_graph.m_blocks.size()));
-    m_inlineStackTop->m_caller->m_blockLinkingTargets.append(m_graph.m_blocks.size());
-    m_graph.m_blocks.append(block.release());
+    ASSERT(m_inlineStackTop->m_caller->m_blockLinkingTargets.isEmpty() || m_inlineStackTop->m_caller->m_blockLinkingTargets.last()->bytecodeBegin < nextOffset);
+    if (verbose)
+        dataLog("Adding unlinked block ", RawPointer(block.get()), " (many returns)\n");
+    if (callerLinkability == CallerDoesNormalLinking) {
+        m_inlineStackTop->m_caller->m_unlinkedBlocks.append(UnlinkedBlock(block.get()));
+        m_inlineStackTop->m_caller->m_blockLinkingTargets.append(block.get());
+    }
+    m_graph.appendBlock(block);
     prepareToParseBlock();
+}
+
+void ByteCodeParser::cancelLinkingForBlock(InlineStackEntry* inlineStackEntry, BasicBlock* block)
+{
+    // It's possible that the callsite block head is not owned by the caller.
+    if (!inlineStackEntry->m_unlinkedBlocks.isEmpty()) {
+        // It's definitely owned by the caller, because the caller created new blocks.
+        // Assert that this all adds up.
+        ASSERT_UNUSED(block, inlineStackEntry->m_unlinkedBlocks.last().m_block == block);
+        ASSERT(inlineStackEntry->m_unlinkedBlocks.last().m_needsNormalLinking);
+        inlineStackEntry->m_unlinkedBlocks.last().m_needsNormalLinking = false;
+    } else {
+        // It's definitely not owned by the caller. Tell the caller that he does not
+        // need to link his callsite block head, because we did it for him.
+        ASSERT(inlineStackEntry->m_callsiteBlockHeadNeedsLinking);
+        ASSERT_UNUSED(block, inlineStackEntry->m_callsiteBlockHead == block);
+        inlineStackEntry->m_callsiteBlockHeadNeedsLinking = false;
+    }
+}
+
+template<typename ChecksFunctor>
+bool ByteCodeParser::attemptToInlineCall(Node* callTargetNode, int resultOperand, CallVariant callee, int registerOffset, int argumentCountIncludingThis, unsigned nextOffset, InlineCallFrame::Kind kind, CallerLinkability callerLinkability, SpeculatedType prediction, unsigned& inliningBalance, const ChecksFunctor& insertChecks)
+{
+    CodeSpecializationKind specializationKind = InlineCallFrame::specializationKindFor(kind);
     
-    // At this point we return and continue to generate code for the caller, but
-    // in the new basic block.
-#if DFG_ENABLE(DEBUG_VERBOSE)
-    dataLogF("Done inlining executable %p, continuing code generation in new block.\n", executable);
-#endif
+    if (!inliningBalance)
+        return false;
+    
+    bool didInsertChecks = false;
+    auto insertChecksWithAccounting = [&] () {
+        insertChecks(nullptr);
+        didInsertChecks = true;
+    };
+    
+    if (verbose)
+        dataLog("    Considering callee ", callee, "\n");
+    
+    // Intrinsics and internal functions can only be inlined if we're not doing varargs. This is because
+    // we currently don't have any way of getting profiling information for arguments to non-JS varargs
+    // calls. The prediction propagator won't be of any help because LoadVarargs obscures the data flow,
+    // and there are no callsite value profiles and native function won't have callee value profiles for
+    // those arguments. Even worse, if the intrinsic decides to exit, it won't really have anywhere to
+    // exit to: LoadVarargs is effectful and it's part of the op_call_varargs, so we can't exit without
+    // calling LoadVarargs twice.
+    if (!InlineCallFrame::isVarargs(kind)) {
+        if (InternalFunction* function = callee.internalFunction()) {
+            if (handleConstantInternalFunction(resultOperand, function, registerOffset, argumentCountIncludingThis, specializationKind, insertChecksWithAccounting)) {
+                RELEASE_ASSERT(didInsertChecks);
+                addToGraph(Phantom, callTargetNode);
+                emitArgumentPhantoms(registerOffset, argumentCountIncludingThis);
+                inliningBalance--;
+                return true;
+            }
+            RELEASE_ASSERT(!didInsertChecks);
+            return false;
+        }
+    
+        Intrinsic intrinsic = callee.intrinsicFor(specializationKind);
+        if (intrinsic != NoIntrinsic) {
+            if (handleIntrinsic(resultOperand, intrinsic, registerOffset, argumentCountIncludingThis, prediction, insertChecksWithAccounting)) {
+                RELEASE_ASSERT(didInsertChecks);
+                addToGraph(Phantom, callTargetNode);
+                emitArgumentPhantoms(registerOffset, argumentCountIncludingThis);
+                inliningBalance--;
+                return true;
+            }
+            RELEASE_ASSERT(!didInsertChecks);
+            return false;
+        }
+    }
+    
+    unsigned myInliningCost = inliningCost(callee, argumentCountIncludingThis, specializationKind);
+    if (myInliningCost > inliningBalance)
+        return false;
+
+    Instruction* savedCurrentInstruction = m_currentInstruction;
+    inlineCall(callTargetNode, resultOperand, callee, registerOffset, argumentCountIncludingThis, nextOffset, kind, callerLinkability, insertChecks);
+    inliningBalance -= myInliningCost;
+    m_currentInstruction = savedCurrentInstruction;
     return true;
 }
 
-void ByteCodeParser::setIntrinsicResult(bool usesResult, int resultOperand, Node* node)
+bool ByteCodeParser::handleInlining(
+    Node* callTargetNode, int resultOperand, const CallLinkStatus& callLinkStatus,
+    int registerOffsetOrFirstFreeReg, VirtualRegister thisArgument,
+    VirtualRegister argumentsArgument, unsigned argumentsOffset, int argumentCountIncludingThis,
+    unsigned nextOffset, NodeType callOp, InlineCallFrame::Kind kind, SpeculatedType prediction)
 {
-    if (!usesResult)
-        return;
-    set(resultOperand, node);
+    if (verbose) {
+        dataLog("Handling inlining...\n");
+        dataLog("Stack: ", currentCodeOrigin(), "\n");
+    }
+    CodeSpecializationKind specializationKind = InlineCallFrame::specializationKindFor(kind);
+    
+    if (!callLinkStatus.size()) {
+        if (verbose)
+            dataLog("Bailing inlining.\n");
+        return false;
+    }
+    
+    if (InlineCallFrame::isVarargs(kind)
+        && callLinkStatus.maxNumArguments() > Options::maximumVarargsForInlining()) {
+        if (verbose)
+            dataLog("Bailing inlining because of varargs.\n");
+        return false;
+    }
+        
+    unsigned inliningBalance = Options::maximumFunctionForCallInlineCandidateInstructionCount();
+    if (specializationKind == CodeForConstruct)
+        inliningBalance = std::min(inliningBalance, Options::maximumFunctionForConstructInlineCandidateInstructionCount());
+    if (callLinkStatus.isClosureCall())
+        inliningBalance = std::min(inliningBalance, Options::maximumFunctionForClosureCallInlineCandidateInstructionCount());
+    
+    // First check if we can avoid creating control flow. Our inliner does some CFG
+    // simplification on the fly and this helps reduce compile times, but we can only leverage
+    // this in cases where we don't need control flow diamonds to check the callee.
+    if (!callLinkStatus.couldTakeSlowPath() && callLinkStatus.size() == 1) {
+        int registerOffset;
+        
+        // Only used for varargs calls.
+        unsigned mandatoryMinimum = 0;
+        unsigned maxNumArguments = 0;
+
+        if (InlineCallFrame::isVarargs(kind)) {
+            if (FunctionExecutable* functionExecutable = callLinkStatus[0].functionExecutable())
+                mandatoryMinimum = functionExecutable->parameterCount();
+            else
+                mandatoryMinimum = 0;
+            
+            // includes "this"
+            maxNumArguments = std::max(
+                callLinkStatus.maxNumArguments(),
+                mandatoryMinimum + 1);
+            
+            // We sort of pretend that this *is* the number of arguments that were passed.
+            argumentCountIncludingThis = maxNumArguments;
+            
+            registerOffset = registerOffsetOrFirstFreeReg + 1;
+            registerOffset -= maxNumArguments; // includes "this"
+            registerOffset -= JSStack::CallFrameHeaderSize;
+            registerOffset = -WTF::roundUpToMultipleOf(
+                stackAlignmentRegisters(),
+                -registerOffset);
+        } else
+            registerOffset = registerOffsetOrFirstFreeReg;
+        
+        bool result = attemptToInlineCall(
+            callTargetNode, resultOperand, callLinkStatus[0], registerOffset,
+            argumentCountIncludingThis, nextOffset, kind, CallerDoesNormalLinking, prediction,
+            inliningBalance, [&] (CodeBlock* codeBlock) {
+                emitFunctionChecks(callLinkStatus[0], callTargetNode, thisArgument);
+
+                // If we have a varargs call, we want to extract the arguments right now.
+                if (InlineCallFrame::isVarargs(kind)) {
+                    int remappedRegisterOffset =
+                        m_inlineStackTop->remapOperand(VirtualRegister(registerOffset)).offset();
+                    
+                    ensureLocals(VirtualRegister(remappedRegisterOffset).toLocal());
+                    
+                    int argumentStart = registerOffset + JSStack::CallFrameHeaderSize;
+                    int remappedArgumentStart =
+                        m_inlineStackTop->remapOperand(VirtualRegister(argumentStart)).offset();
+
+                    LoadVarargsData* data = m_graph.m_loadVarargsData.add();
+                    data->start = VirtualRegister(remappedArgumentStart + 1);
+                    data->count = VirtualRegister(remappedRegisterOffset + JSStack::ArgumentCount);
+                    data->offset = argumentsOffset;
+                    data->limit = maxNumArguments;
+                    data->mandatoryMinimum = mandatoryMinimum;
+            
+                    addToGraph(LoadVarargs, OpInfo(data), get(argumentsArgument));
+
+                    // LoadVarargs may OSR exit. Hence, we need to keep alive callTargetNode, thisArgument
+                    // and argumentsArgument for the baseline JIT. However, we only need a Phantom for
+                    // callTargetNode because the other 2 are still in use and alive at this point.
+                    addToGraph(Phantom, callTargetNode);
+
+                    // In DFG IR before SSA, we cannot insert control flow between after the
+                    // LoadVarargs and the last SetArgument. This isn't a problem once we get to DFG
+                    // SSA. Fortunately, we also have other reasons for not inserting control flow
+                    // before SSA.
+            
+                    VariableAccessData* countVariable = newVariableAccessData(
+                        VirtualRegister(remappedRegisterOffset + JSStack::ArgumentCount));
+                    // This is pretty lame, but it will force the count to be flushed as an int. This doesn't
+                    // matter very much, since our use of a SetArgument and Flushes for this local slot is
+                    // mostly just a formality.
+                    countVariable->predict(SpecInt32);
+                    countVariable->mergeIsProfitableToUnbox(true);
+                    Node* setArgumentCount = addToGraph(SetArgument, OpInfo(countVariable));
+                    m_currentBlock->variablesAtTail.setOperand(countVariable->local(), setArgumentCount);
+
+                    set(VirtualRegister(argumentStart), get(thisArgument), ImmediateNakedSet);
+                    for (unsigned argument = 1; argument < maxNumArguments; ++argument) {
+                        VariableAccessData* variable = newVariableAccessData(
+                            VirtualRegister(remappedArgumentStart + argument));
+                        variable->mergeShouldNeverUnbox(true); // We currently have nowhere to put the type check on the LoadVarargs. LoadVarargs is effectful, so after it finishes, we cannot exit.
+                        
+                        // For a while it had been my intention to do things like this inside the
+                        // prediction injection phase. But in this case it's really best to do it here,
+                        // because it's here that we have access to the variable access datas for the
+                        // inlining we're about to do.
+                        //
+                        // Something else that's interesting here is that we'd really love to get
+                        // predictions from the arguments loaded at the callsite, rather than the
+                        // arguments received inside the callee. But that probably won't matter for most
+                        // calls.
+                        if (codeBlock && argument < static_cast<unsigned>(codeBlock->numParameters())) {
+                            ConcurrentJITLocker locker(codeBlock->m_lock);
+                            if (ValueProfile* profile = codeBlock->valueProfileForArgument(argument))
+                                variable->predict(profile->computeUpdatedPrediction(locker));
+                        }
+                        
+                        Node* setArgument = addToGraph(SetArgument, OpInfo(variable));
+                        m_currentBlock->variablesAtTail.setOperand(variable->local(), setArgument);
+                    }
+                }
+            });
+        if (verbose) {
+            dataLog("Done inlining (simple).\n");
+            dataLog("Stack: ", currentCodeOrigin(), "\n");
+            dataLog("Result: ", result, "\n");
+        }
+        return result;
+    }
+    
+    // We need to create some kind of switch over callee. For now we only do this if we believe that
+    // we're in the top tier. We have two reasons for this: first, it provides us an opportunity to
+    // do more detailed polyvariant/polymorphic profiling; and second, it reduces compile times in
+    // the DFG. And by polyvariant profiling we mean polyvariant profiling of *this* call. Note that
+    // we could improve that aspect of this by doing polymorphic inlining but having the profiling
+    // also.
+    if (!isFTL(m_graph.m_plan.mode) || !Options::enablePolymorphicCallInlining()
+        || InlineCallFrame::isVarargs(kind)) {
+        if (verbose) {
+            dataLog("Bailing inlining (hard).\n");
+            dataLog("Stack: ", currentCodeOrigin(), "\n");
+        }
+        return false;
+    }
+    
+    unsigned oldOffset = m_currentIndex;
+    
+    bool allAreClosureCalls = true;
+    bool allAreDirectCalls = true;
+    for (unsigned i = callLinkStatus.size(); i--;) {
+        if (callLinkStatus[i].isClosureCall())
+            allAreDirectCalls = false;
+        else
+            allAreClosureCalls = false;
+    }
+    
+    Node* thingToSwitchOn;
+    if (allAreDirectCalls)
+        thingToSwitchOn = callTargetNode;
+    else if (allAreClosureCalls)
+        thingToSwitchOn = addToGraph(GetExecutable, callTargetNode);
+    else {
+        // FIXME: We should be able to handle this case, but it's tricky and we don't know of cases
+        // where it would be beneficial. It might be best to handle these cases as if all calls were
+        // closure calls.
+        // https://bugs.webkit.org/show_bug.cgi?id=136020
+        if (verbose) {
+            dataLog("Bailing inlining (mix).\n");
+            dataLog("Stack: ", currentCodeOrigin(), "\n");
+        }
+        return false;
+    }
+    
+    if (verbose) {
+        dataLog("Doing hard inlining...\n");
+        dataLog("Stack: ", currentCodeOrigin(), "\n");
+    }
+    
+    int registerOffset = registerOffsetOrFirstFreeReg;
+    
+    // This makes me wish that we were in SSA all the time. We need to pick a variable into which to
+    // store the callee so that it will be accessible to all of the blocks we're about to create. We
+    // get away with doing an immediate-set here because we wouldn't have performed any side effects
+    // yet.
+    if (verbose)
+        dataLog("Register offset: ", registerOffset);
+    VirtualRegister calleeReg(registerOffset + JSStack::Callee);
+    calleeReg = m_inlineStackTop->remapOperand(calleeReg);
+    if (verbose)
+        dataLog("Callee is going to be ", calleeReg, "\n");
+    setDirect(calleeReg, callTargetNode, ImmediateSetWithFlush);
+    
+    SwitchData& data = *m_graph.m_switchData.add();
+    data.kind = SwitchCell;
+    addToGraph(Switch, OpInfo(&data), thingToSwitchOn);
+    
+    BasicBlock* originBlock = m_currentBlock;
+    if (verbose)
+        dataLog("Marking ", RawPointer(originBlock), " as linked (origin of poly inline)\n");
+    originBlock->didLink();
+    cancelLinkingForBlock(m_inlineStackTop, originBlock);
+    
+    // Each inlined callee will have a landing block that it returns at. They should all have jumps
+    // to the continuation block, which we create last.
+    Vector<BasicBlock*> landingBlocks;
+    
+    // We may force this true if we give up on inlining any of the edges.
+    bool couldTakeSlowPath = callLinkStatus.couldTakeSlowPath();
+    
+    if (verbose)
+        dataLog("About to loop over functions at ", currentCodeOrigin(), ".\n");
+    
+    for (unsigned i = 0; i < callLinkStatus.size(); ++i) {
+        m_currentIndex = oldOffset;
+        RefPtr<BasicBlock> block = adoptRef(new BasicBlock(UINT_MAX, m_numArguments, m_numLocals, PNaN));
+        m_currentBlock = block.get();
+        m_graph.appendBlock(block);
+        prepareToParseBlock();
+        
+        Node* myCallTargetNode = getDirect(calleeReg);
+        
+        bool inliningResult = attemptToInlineCall(
+            myCallTargetNode, resultOperand, callLinkStatus[i], registerOffset,
+            argumentCountIncludingThis, nextOffset, kind, CallerLinksManually, prediction,
+            inliningBalance, [&] (CodeBlock*) { });
+        
+        if (!inliningResult) {
+            // That failed so we let the block die. Nothing interesting should have been added to
+            // the block. We also give up on inlining any of the (less frequent) callees.
+            ASSERT(m_currentBlock == block.get());
+            ASSERT(m_graph.m_blocks.last() == block);
+            m_graph.killBlockAndItsContents(block.get());
+            m_graph.m_blocks.removeLast();
+            
+            // The fact that inlining failed means we need a slow path.
+            couldTakeSlowPath = true;
+            break;
+        }
+        
+        JSCell* thingToCaseOn;
+        if (allAreDirectCalls)
+            thingToCaseOn = callLinkStatus[i].nonExecutableCallee();
+        else {
+            ASSERT(allAreClosureCalls);
+            thingToCaseOn = callLinkStatus[i].executable();
+        }
+        data.cases.append(SwitchCase(m_graph.freeze(thingToCaseOn), block.get()));
+        m_currentIndex = nextOffset;
+        processSetLocalQueue(); // This only comes into play for intrinsics, since normal inlined code will leave an empty queue.
+        addToGraph(Jump);
+        if (verbose)
+            dataLog("Marking ", RawPointer(m_currentBlock), " as linked (tail of poly inlinee)\n");
+        m_currentBlock->didLink();
+        landingBlocks.append(m_currentBlock);
+
+        if (verbose)
+            dataLog("Finished inlining ", callLinkStatus[i], " at ", currentCodeOrigin(), ".\n");
+    }
+    
+    RefPtr<BasicBlock> slowPathBlock = adoptRef(
+        new BasicBlock(UINT_MAX, m_numArguments, m_numLocals, PNaN));
+    m_currentIndex = oldOffset;
+    data.fallThrough = BranchTarget(slowPathBlock.get());
+    m_graph.appendBlock(slowPathBlock);
+    if (verbose)
+        dataLog("Marking ", RawPointer(slowPathBlock.get()), " as linked (slow path block)\n");
+    slowPathBlock->didLink();
+    prepareToParseBlock();
+    m_currentBlock = slowPathBlock.get();
+    Node* myCallTargetNode = getDirect(calleeReg);
+    if (couldTakeSlowPath) {
+        addCall(
+            resultOperand, callOp, OpInfo(), myCallTargetNode, argumentCountIncludingThis,
+            registerOffset, prediction);
+    } else {
+        addToGraph(CheckBadCell);
+        addToGraph(Phantom, myCallTargetNode);
+        emitArgumentPhantoms(registerOffset, argumentCountIncludingThis);
+        
+        set(VirtualRegister(resultOperand), addToGraph(BottomValue));
+    }
+
+    m_currentIndex = nextOffset;
+    processSetLocalQueue();
+    addToGraph(Jump);
+    landingBlocks.append(m_currentBlock);
+    
+    RefPtr<BasicBlock> continuationBlock = adoptRef(
+        new BasicBlock(UINT_MAX, m_numArguments, m_numLocals, PNaN));
+    m_graph.appendBlock(continuationBlock);
+    if (verbose)
+        dataLog("Adding unlinked block ", RawPointer(continuationBlock.get()), " (continuation)\n");
+    m_inlineStackTop->m_unlinkedBlocks.append(UnlinkedBlock(continuationBlock.get()));
+    prepareToParseBlock();
+    m_currentBlock = continuationBlock.get();
+    
+    for (unsigned i = landingBlocks.size(); i--;)
+        landingBlocks[i]->terminal()->targetBlock() = continuationBlock.get();
+    
+    m_currentIndex = oldOffset;
+    
+    if (verbose) {
+        dataLog("Done inlining (hard).\n");
+        dataLog("Stack: ", currentCodeOrigin(), "\n");
+    }
+    return true;
 }
 
-bool ByteCodeParser::handleMinMax(bool usesResult, int resultOperand, NodeType op, int registerOffset, int argumentCountIncludingThis)
+template<typename ChecksFunctor>
+bool ByteCodeParser::handleMinMax(int resultOperand, NodeType op, int registerOffset, int argumentCountIncludingThis, const ChecksFunctor& insertChecks)
 {
     if (argumentCountIncludingThis == 1) { // Math.min()
-        setIntrinsicResult(usesResult, resultOperand, constantNaN());
+        insertChecks();
+        set(VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_constantNaN)));
         return true;
     }
      
     if (argumentCountIncludingThis == 2) { // Math.min(x)
-        Node* result = get(registerOffset + argumentToOperand(1));
+        insertChecks();
+        Node* result = get(VirtualRegister(virtualRegisterForArgument(1, registerOffset)));
         addToGraph(Phantom, Edge(result, NumberUse));
-        setIntrinsicResult(usesResult, resultOperand, result);
+        set(VirtualRegister(resultOperand), result);
         return true;
     }
     
     if (argumentCountIncludingThis == 3) { // Math.min(x, y)
-        setIntrinsicResult(usesResult, resultOperand, addToGraph(op, get(registerOffset + argumentToOperand(1)), get(registerOffset + argumentToOperand(2))));
+        insertChecks();
+        set(VirtualRegister(resultOperand), addToGraph(op, get(virtualRegisterForArgument(1, registerOffset)), get(virtualRegisterForArgument(2, registerOffset))));
         return true;
     }
     
@@ -1466,43 +1843,82 @@ bool ByteCodeParser::handleMinMax(bool usesResult, int resultOperand, NodeType o
     return false;
 }
 
-// FIXME: We dead-code-eliminate unused Math intrinsics, but that's invalid because
-// they need to perform the ToNumber conversion, which can have side-effects.
-bool ByteCodeParser::handleIntrinsic(bool usesResult, int resultOperand, Intrinsic intrinsic, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction)
+template<typename ChecksFunctor>
+bool ByteCodeParser::handleIntrinsic(int resultOperand, Intrinsic intrinsic, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction, const ChecksFunctor& insertChecks)
 {
     switch (intrinsic) {
     case AbsIntrinsic: {
         if (argumentCountIncludingThis == 1) { // Math.abs()
-            setIntrinsicResult(usesResult, resultOperand, constantNaN());
+            insertChecks();
+            set(VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_constantNaN)));
             return true;
         }
 
         if (!MacroAssembler::supportsFloatingPointAbs())
             return false;
 
-        Node* node = addToGraph(ArithAbs, get(registerOffset + argumentToOperand(1)));
+        insertChecks();
+        Node* node = addToGraph(ArithAbs, get(virtualRegisterForArgument(1, registerOffset)));
         if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow))
-            node->mergeFlags(NodeMayOverflow);
-        setIntrinsicResult(usesResult, resultOperand, node);
+            node->mergeFlags(NodeMayOverflowInDFG);
+        set(VirtualRegister(resultOperand), node);
         return true;
     }
 
     case MinIntrinsic:
-        return handleMinMax(usesResult, resultOperand, ArithMin, registerOffset, argumentCountIncludingThis);
+        return handleMinMax(resultOperand, ArithMin, registerOffset, argumentCountIncludingThis, insertChecks);
         
     case MaxIntrinsic:
-        return handleMinMax(usesResult, resultOperand, ArithMax, registerOffset, argumentCountIncludingThis);
-        
-    case SqrtIntrinsic: {
-        if (argumentCountIncludingThis == 1) { // Math.sqrt()
-            setIntrinsicResult(usesResult, resultOperand, constantNaN());
+        return handleMinMax(resultOperand, ArithMax, registerOffset, argumentCountIncludingThis, insertChecks);
+
+    case SqrtIntrinsic:
+    case CosIntrinsic:
+    case SinIntrinsic:
+    case LogIntrinsic: {
+        if (argumentCountIncludingThis == 1) {
+            insertChecks();
+            set(VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_constantNaN)));
             return true;
         }
         
-        if (!MacroAssembler::supportsFloatingPointSqrt())
+        switch (intrinsic) {
+        case SqrtIntrinsic:
+            insertChecks();
+            set(VirtualRegister(resultOperand), addToGraph(ArithSqrt, get(virtualRegisterForArgument(1, registerOffset))));
+            return true;
+            
+        case CosIntrinsic:
+            insertChecks();
+            set(VirtualRegister(resultOperand), addToGraph(ArithCos, get(virtualRegisterForArgument(1, registerOffset))));
+            return true;
+            
+        case SinIntrinsic:
+            insertChecks();
+            set(VirtualRegister(resultOperand), addToGraph(ArithSin, get(virtualRegisterForArgument(1, registerOffset))));
+            return true;
+
+        case LogIntrinsic:
+            insertChecks();
+            set(VirtualRegister(resultOperand), addToGraph(ArithLog, get(virtualRegisterForArgument(1, registerOffset))));
+            return true;
+            
+        default:
+            RELEASE_ASSERT_NOT_REACHED();
             return false;
-        
-        setIntrinsicResult(usesResult, resultOperand, addToGraph(ArithSqrt, get(registerOffset + argumentToOperand(1))));
+        }
+    }
+
+    case PowIntrinsic: {
+        if (argumentCountIncludingThis < 3) {
+            // Math.pow() and Math.pow(x) return NaN.
+            insertChecks();
+            set(VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_constantNaN)));
+            return true;
+        }
+        insertChecks();
+        VirtualRegister xOperand = virtualRegisterForArgument(1, registerOffset);
+        VirtualRegister yOperand = virtualRegisterForArgument(2, registerOffset);
+        set(VirtualRegister(resultOperand), addToGraph(ArithPow, get(xOperand), get(yOperand)));
         return true;
     }
         
@@ -1510,7 +1926,7 @@ bool ByteCodeParser::handleIntrinsic(bool usesResult, int resultOperand, Intrins
         if (argumentCountIncludingThis != 2)
             return false;
         
-        ArrayMode arrayMode = getArrayMode(m_currentInstruction[5].u.arrayProfile);
+        ArrayMode arrayMode = getArrayMode(m_currentInstruction[OPCODE_LENGTH(op_call) - 2].u.arrayProfile);
         if (!arrayMode.isJSArray())
             return false;
         switch (arrayMode.type()) {
@@ -1519,9 +1935,9 @@ bool ByteCodeParser::handleIntrinsic(bool usesResult, int resultOperand, Intrins
         case Array::Double:
         case Array::Contiguous:
         case Array::ArrayStorage: {
-            Node* arrayPush = addToGraph(ArrayPush, OpInfo(arrayMode.asWord()), OpInfo(prediction), get(registerOffset + argumentToOperand(0)), get(registerOffset + argumentToOperand(1)));
-            if (usesResult)
-                set(resultOperand, arrayPush);
+            insertChecks();
+            Node* arrayPush = addToGraph(ArrayPush, OpInfo(arrayMode.asWord()), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset)));
+            set(VirtualRegister(resultOperand), arrayPush);
             
             return true;
         }
@@ -1535,7 +1951,7 @@ bool ByteCodeParser::handleIntrinsic(bool usesResult, int resultOperand, Intrins
         if (argumentCountIncludingThis != 1)
             return false;
         
-        ArrayMode arrayMode = getArrayMode(m_currentInstruction[5].u.arrayProfile);
+        ArrayMode arrayMode = getArrayMode(m_currentInstruction[OPCODE_LENGTH(op_call) - 2].u.arrayProfile);
         if (!arrayMode.isJSArray())
             return false;
         switch (arrayMode.type()) {
@@ -1543,9 +1959,9 @@ bool ByteCodeParser::handleIntrinsic(bool usesResult, int resultOperand, Intrins
         case Array::Double:
         case Array::Contiguous:
         case Array::ArrayStorage: {
-            Node* arrayPop = addToGraph(ArrayPop, OpInfo(arrayMode.asWord()), OpInfo(prediction), get(registerOffset + argumentToOperand(0)));
-            if (usesResult)
-                set(resultOperand, arrayPop);
+            insertChecks();
+            Node* arrayPop = addToGraph(ArrayPop, OpInfo(arrayMode.asWord()), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset)));
+            set(VirtualRegister(resultOperand), arrayPop);
             return true;
         }
             
@@ -1558,12 +1974,12 @@ bool ByteCodeParser::handleIntrinsic(bool usesResult, int resultOperand, Intrins
         if (argumentCountIncludingThis != 2)
             return false;
 
-        int thisOperand = registerOffset + argumentToOperand(0);
-        int indexOperand = registerOffset + argumentToOperand(1);
-        Node* charCode = addToGraph(StringCharCodeAt, OpInfo(ArrayMode(Array::String).asWord()), get(thisOperand), getToInt32(indexOperand));
+        insertChecks();
+        VirtualRegister thisOperand = virtualRegisterForArgument(0, registerOffset);
+        VirtualRegister indexOperand = virtualRegisterForArgument(1, registerOffset);
+        Node* charCode = addToGraph(StringCharCodeAt, OpInfo(ArrayMode(Array::String).asWord()), get(thisOperand), get(indexOperand));
 
-        if (usesResult)
-            set(resultOperand, charCode);
+        set(VirtualRegister(resultOperand), charCode);
         return true;
     }
 
@@ -1571,23 +1987,33 @@ bool ByteCodeParser::handleIntrinsic(bool usesResult, int resultOperand, Intrins
         if (argumentCountIncludingThis != 2)
             return false;
 
-        int thisOperand = registerOffset + argumentToOperand(0);
-        int indexOperand = registerOffset + argumentToOperand(1);
-        Node* charCode = addToGraph(StringCharAt, OpInfo(ArrayMode(Array::String).asWord()), get(thisOperand), getToInt32(indexOperand));
+        insertChecks();
+        VirtualRegister thisOperand = virtualRegisterForArgument(0, registerOffset);
+        VirtualRegister indexOperand = virtualRegisterForArgument(1, registerOffset);
+        Node* charCode = addToGraph(StringCharAt, OpInfo(ArrayMode(Array::String).asWord()), get(thisOperand), get(indexOperand));
 
-        if (usesResult)
-            set(resultOperand, charCode);
+        set(VirtualRegister(resultOperand), charCode);
+        return true;
+    }
+    case Clz32Intrinsic: {
+        insertChecks();
+        if (argumentCountIncludingThis == 1)
+            set(VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_graph.freeze(jsNumber(32)))));
+        else {
+            Node* operand = get(virtualRegisterForArgument(1, registerOffset));
+            set(VirtualRegister(resultOperand), addToGraph(ArithClz32, operand));
+        }
         return true;
     }
     case FromCharCodeIntrinsic: {
         if (argumentCountIncludingThis != 2)
             return false;
 
-        int indexOperand = registerOffset + argumentToOperand(1);
-        Node* charCode = addToGraph(StringFromCharCode, getToInt32(indexOperand));
+        insertChecks();
+        VirtualRegister indexOperand = virtualRegisterForArgument(1, registerOffset);
+        Node* charCode = addToGraph(StringFromCharCode, get(indexOperand));
 
-        if (usesResult)
-            set(resultOperand, charCode);
+        set(VirtualRegister(resultOperand), charCode);
 
         return true;
     }
@@ -1596,9 +2022,9 @@ bool ByteCodeParser::handleIntrinsic(bool usesResult, int resultOperand, Intrins
         if (argumentCountIncludingThis != 2)
             return false;
         
-        Node* regExpExec = addToGraph(RegExpExec, OpInfo(0), OpInfo(prediction), get(registerOffset + argumentToOperand(0)), get(registerOffset + argumentToOperand(1)));
-        if (usesResult)
-            set(resultOperand, regExpExec);
+        insertChecks();
+        Node* regExpExec = addToGraph(RegExpExec, OpInfo(0), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset)));
+        set(VirtualRegister(resultOperand), regExpExec);
         
         return true;
     }
@@ -1607,21 +2033,98 @@ bool ByteCodeParser::handleIntrinsic(bool usesResult, int resultOperand, Intrins
         if (argumentCountIncludingThis != 2)
             return false;
         
-        Node* regExpExec = addToGraph(RegExpTest, OpInfo(0), OpInfo(prediction), get(registerOffset + argumentToOperand(0)), get(registerOffset + argumentToOperand(1)));
-        if (usesResult)
-            set(resultOperand, regExpExec);
+        insertChecks();
+        Node* regExpExec = addToGraph(RegExpTest, OpInfo(0), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset)));
+        set(VirtualRegister(resultOperand), regExpExec);
         
         return true;
     }
-
+    case RoundIntrinsic: {
+        if (argumentCountIncludingThis == 1) {
+            insertChecks();
+            set(VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_constantNaN)));
+            return true;
+        }
+        if (argumentCountIncludingThis == 2) {
+            insertChecks();
+            Node* operand = get(virtualRegisterForArgument(1, registerOffset));
+            Node* roundNode = addToGraph(ArithRound, OpInfo(0), OpInfo(prediction), operand);
+            set(VirtualRegister(resultOperand), roundNode);
+            return true;
+        }
+        return false;
+    }
     case IMulIntrinsic: {
         if (argumentCountIncludingThis != 3)
             return false;
-        int leftOperand = registerOffset + argumentToOperand(1);
-        int rightOperand = registerOffset + argumentToOperand(2);
-        Node* left = getToInt32(leftOperand);
-        Node* right = getToInt32(rightOperand);
-        setIntrinsicResult(usesResult, resultOperand, addToGraph(ArithIMul, left, right));
+        insertChecks();
+        VirtualRegister leftOperand = virtualRegisterForArgument(1, registerOffset);
+        VirtualRegister rightOperand = virtualRegisterForArgument(2, registerOffset);
+        Node* left = get(leftOperand);
+        Node* right = get(rightOperand);
+        set(VirtualRegister(resultOperand), addToGraph(ArithIMul, left, right));
+        return true;
+    }
+        
+    case FRoundIntrinsic: {
+        if (argumentCountIncludingThis != 2)
+            return false;
+        insertChecks();
+        VirtualRegister operand = virtualRegisterForArgument(1, registerOffset);
+        set(VirtualRegister(resultOperand), addToGraph(ArithFRound, get(operand)));
+        return true;
+    }
+        
+    case DFGTrueIntrinsic: {
+        insertChecks();
+        set(VirtualRegister(resultOperand), jsConstant(jsBoolean(true)));
+        return true;
+    }
+        
+    case OSRExitIntrinsic: {
+        insertChecks();
+        addToGraph(ForceOSRExit);
+        set(VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_constantUndefined)));
+        return true;
+    }
+        
+    case IsFinalTierIntrinsic: {
+        insertChecks();
+        set(VirtualRegister(resultOperand),
+            jsConstant(jsBoolean(Options::useFTLJIT() ? isFTL(m_graph.m_plan.mode) : true)));
+        return true;
+    }
+        
+    case SetInt32HeapPredictionIntrinsic: {
+        insertChecks();
+        for (int i = 1; i < argumentCountIncludingThis; ++i) {
+            Node* node = get(virtualRegisterForArgument(i, registerOffset));
+            if (node->hasHeapPrediction())
+                node->setHeapPrediction(SpecInt32);
+        }
+        set(VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_constantUndefined)));
+        return true;
+    }
+        
+    case CheckInt32Intrinsic: {
+        insertChecks();
+        for (int i = 1; i < argumentCountIncludingThis; ++i) {
+            Node* node = get(virtualRegisterForArgument(i, registerOffset));
+            addToGraph(Phantom, Edge(node, Int32Use));
+        }
+        set(VirtualRegister(resultOperand), jsConstant(jsBoolean(true)));
+        return true;
+    }
+        
+    case FiatInt52Intrinsic: {
+        if (argumentCountIncludingThis != 2)
+            return false;
+        insertChecks();
+        VirtualRegister operand = virtualRegisterForArgument(1, registerOffset);
+        if (enableInt52())
+            set(VirtualRegister(resultOperand), addToGraph(FiatInt52, get(operand)));
+        else
+            set(VirtualRegister(resultOperand), get(operand));
         return true;
     }
         
@@ -1630,10 +2133,68 @@ bool ByteCodeParser::handleIntrinsic(bool usesResult, int resultOperand, Intrins
     }
 }
 
+template<typename ChecksFunctor>
+bool ByteCodeParser::handleTypedArrayConstructor(
+    int resultOperand, InternalFunction* function, int registerOffset,
+    int argumentCountIncludingThis, TypedArrayType type, const ChecksFunctor& insertChecks)
+{
+    if (!isTypedView(type))
+        return false;
+    
+    if (function->classInfo() != constructorClassInfoForType(type))
+        return false;
+    
+    if (function->globalObject() != m_inlineStackTop->m_codeBlock->globalObject())
+        return false;
+    
+    // We only have an intrinsic for the case where you say:
+    //
+    // new FooArray(blah);
+    //
+    // Of course, 'blah' could be any of the following:
+    //
+    // - Integer, indicating that you want to allocate an array of that length.
+    //   This is the thing we're hoping for, and what we can actually do meaningful
+    //   optimizations for.
+    //
+    // - Array buffer, indicating that you want to create a view onto that _entire_
+    //   buffer.
+    //
+    // - Non-buffer object, indicating that you want to create a copy of that
+    //   object by pretending that it quacks like an array.
+    //
+    // - Anything else, indicating that you want to have an exception thrown at
+    //   you.
+    //
+    // The intrinsic, NewTypedArray, will behave as if it could do any of these
+    // things up until we do Fixup. Thereafter, if child1 (i.e. 'blah') is
+    // predicted Int32, then we lock it in as a normal typed array allocation.
+    // Otherwise, NewTypedArray turns into a totally opaque function call that
+    // may clobber the world - by virtue of it accessing properties on what could
+    // be an object.
+    //
+    // Note that although the generic form of NewTypedArray sounds sort of awful,
+    // it is actually quite likely to be more efficient than a fully generic
+    // Construct. So, we might want to think about making NewTypedArray variadic,
+    // or else making Construct not super slow.
+    
+    if (argumentCountIncludingThis != 2)
+        return false;
+
+    insertChecks();
+    set(VirtualRegister(resultOperand),
+        addToGraph(NewTypedArray, OpInfo(type), get(virtualRegisterForArgument(1, registerOffset))));
+    return true;
+}
+
+template<typename ChecksFunctor>
 bool ByteCodeParser::handleConstantInternalFunction(
-    bool usesResult, int resultOperand, InternalFunction* function, int registerOffset,
-    int argumentCountIncludingThis, SpeculatedType prediction, CodeSpecializationKind kind)
+    int resultOperand, InternalFunction* function, int registerOffset,
+    int argumentCountIncludingThis, CodeSpecializationKind kind, const ChecksFunctor& insertChecks)
 {
+    if (verbose)
+        dataLog("    Handling constant internal function ", JSValue(function), "\n");
+    
     // If we ever find that we have a lot of internal functions that we specialize for,
     // then we should probably have some sort of hashtable dispatch, or maybe even
     // dispatch straight through the MethodTable of the InternalFunction. But for now,
@@ -1641,309 +2202,383 @@ bool ByteCodeParser::handleConstantInternalFunction(
     // we know about is small enough, that having just a linear cascade of if statements
     // is good enough.
     
-    UNUSED_PARAM(prediction); // Remove this once we do more things.
-    
-    if (function->classInfo() == &ArrayConstructor::s_info) {
+    if (function->classInfo() == ArrayConstructor::info()) {
+        if (function->globalObject() != m_inlineStackTop->m_codeBlock->globalObject())
+            return false;
+        
+        insertChecks();
         if (argumentCountIncludingThis == 2) {
-            setIntrinsicResult(
-                usesResult, resultOperand,
-                addToGraph(NewArrayWithSize, OpInfo(ArrayWithUndecided), get(registerOffset + argumentToOperand(1))));
+            set(VirtualRegister(resultOperand),
+                addToGraph(NewArrayWithSize, OpInfo(ArrayWithUndecided), get(virtualRegisterForArgument(1, registerOffset))));
             return true;
         }
         
+        // FIXME: Array constructor should use "this" as newTarget.
         for (int i = 1; i < argumentCountIncludingThis; ++i)
-            addVarArgChild(get(registerOffset + argumentToOperand(i)));
-        setIntrinsicResult(
-            usesResult, resultOperand,
+            addVarArgChild(get(virtualRegisterForArgument(i, registerOffset)));
+        set(VirtualRegister(resultOperand),
             addToGraph(Node::VarArg, NewArray, OpInfo(ArrayWithUndecided), OpInfo(0)));
         return true;
-    } else if (function->classInfo() == &StringConstructor::s_info) {
+    }
+    
+    if (function->classInfo() == StringConstructor::info()) {
+        insertChecks();
+        
         Node* result;
         
         if (argumentCountIncludingThis <= 1)
-            result = cellConstant(m_vm->smallStrings.emptyString());
+            result = jsConstant(m_vm->smallStrings.emptyString());
         else
-            result = addToGraph(ToString, get(registerOffset + argumentToOperand(1)));
+            result = addToGraph(CallStringConstructor, get(virtualRegisterForArgument(1, registerOffset)));
         
         if (kind == CodeForConstruct)
             result = addToGraph(NewStringObject, OpInfo(function->globalObject()->stringObjectStructure()), result);
         
-        setIntrinsicResult(usesResult, resultOperand, result);
+        set(VirtualRegister(resultOperand), result);
         return true;
     }
     
+    for (unsigned typeIndex = 0; typeIndex < NUMBER_OF_TYPED_ARRAY_TYPES; ++typeIndex) {
+        bool result = handleTypedArrayConstructor(
+            resultOperand, function, registerOffset, argumentCountIncludingThis,
+            indexToTypedArrayType(typeIndex), insertChecks);
+        if (result)
+            return true;
+    }
+    
     return false;
 }
 
-Node* ByteCodeParser::handleGetByOffset(SpeculatedType prediction, Node* base, unsigned identifierNumber, PropertyOffset offset)
+Node* ByteCodeParser::handleGetByOffset(SpeculatedType prediction, Node* base, const StructureSet& structureSet, unsigned identifierNumber, PropertyOffset offset, NodeType op)
 {
+    if (base->hasConstant()) {
+        if (JSValue constant = m_graph.tryGetConstantProperty(base->asJSValue(), structureSet, offset)) {
+            addToGraph(Phantom, base);
+            return weakJSConstant(constant);
+        }
+    }
+    
     Node* propertyStorage;
     if (isInlineOffset(offset))
         propertyStorage = base;
     else
         propertyStorage = addToGraph(GetButterfly, base);
-    // FIXME: It would be far more efficient for load elimination (and safer from
-    // an OSR standpoint) if GetByOffset also referenced the object we were loading
-    // from, and if we could load eliminate a GetByOffset even if the butterfly
-    // had changed. That would be a great success.
-    Node* getByOffset = addToGraph(GetByOffset, OpInfo(m_graph.m_storageAccessData.size()), OpInfo(prediction), propertyStorage);
-
-    StorageAccessData storageAccessData;
-    storageAccessData.offset = indexRelativeToBase(offset);
-    storageAccessData.identifierNumber = identifierNumber;
-    m_graph.m_storageAccessData.append(storageAccessData);
+    
+    StorageAccessData* data = m_graph.m_storageAccessData.add();
+    data->offset = offset;
+    data->identifierNumber = identifierNumber;
+    
+    Node* getByOffset = addToGraph(op, OpInfo(data), OpInfo(prediction), propertyStorage, base);
 
     return getByOffset;
 }
 
-void ByteCodeParser::handleGetByOffset(
-    int destinationOperand, SpeculatedType prediction, Node* base, unsigned identifierNumber,
-    PropertyOffset offset)
+Node* ByteCodeParser::handlePutByOffset(Node* base, unsigned identifier, PropertyOffset offset, Node* value)
+{
+    Node* propertyStorage;
+    if (isInlineOffset(offset))
+        propertyStorage = base;
+    else
+        propertyStorage = addToGraph(GetButterfly, base);
+    
+    StorageAccessData* data = m_graph.m_storageAccessData.add();
+    data->offset = offset;
+    data->identifierNumber = identifier;
+    
+    Node* result = addToGraph(PutByOffset, OpInfo(data), propertyStorage, base, value);
+    
+    return result;
+}
+
+void ByteCodeParser::emitChecks(const ConstantStructureCheckVector& vector)
 {
-    set(destinationOperand, handleGetByOffset(prediction, base, identifierNumber, offset));
+    for (unsigned i = 0; i < vector.size(); ++i)
+        cellConstantWithStructureCheck(vector[i].constant(), vector[i].structure());
 }
 
 void ByteCodeParser::handleGetById(
     int destinationOperand, SpeculatedType prediction, Node* base, unsigned identifierNumber,
     const GetByIdStatus& getByIdStatus)
 {
-    if (!getByIdStatus.isSimple()
-        || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache)
-        || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadWeakConstantCache)) {
-        set(destinationOperand,
-            addToGraph(
-                getByIdStatus.makesCalls() ? GetByIdFlush : GetById,
-                OpInfo(identifierNumber), OpInfo(prediction), base));
+    NodeType getById = getByIdStatus.makesCalls() ? GetByIdFlush : GetById;
+    
+    if (!getByIdStatus.isSimple() || !getByIdStatus.numVariants() || !Options::enableAccessInlining()) {
+        set(VirtualRegister(destinationOperand),
+            addToGraph(getById, OpInfo(identifierNumber), OpInfo(prediction), base));
+        return;
+    }
+    
+    if (getByIdStatus.numVariants() > 1) {
+        if (getByIdStatus.makesCalls() || !isFTL(m_graph.m_plan.mode)
+            || !Options::enablePolymorphicAccessInlining()) {
+            set(VirtualRegister(destinationOperand),
+                addToGraph(getById, OpInfo(identifierNumber), OpInfo(prediction), base));
+            return;
+        }
+        
+        if (m_graph.compilation())
+            m_graph.compilation()->noticeInlinedGetById();
+    
+        // 1) Emit prototype structure checks for all chains. This could sort of maybe not be
+        //    optimal, if there is some rarely executed case in the chain that requires a lot
+        //    of checks and those checks are not watchpointable.
+        for (unsigned variantIndex = getByIdStatus.numVariants(); variantIndex--;)
+            emitChecks(getByIdStatus[variantIndex].constantChecks());
+        
+        // 2) Emit a MultiGetByOffset
+        MultiGetByOffsetData* data = m_graph.m_multiGetByOffsetData.add();
+        data->variants = getByIdStatus.variants();
+        data->identifierNumber = identifierNumber;
+        set(VirtualRegister(destinationOperand),
+            addToGraph(MultiGetByOffset, OpInfo(data), OpInfo(prediction), base));
         return;
     }
     
-    ASSERT(getByIdStatus.structureSet().size());
+    ASSERT(getByIdStatus.numVariants() == 1);
+    GetByIdVariant variant = getByIdStatus[0];
                 
-    // The implementation of GetByOffset does not know to terminate speculative
-    // execution if it doesn't have a prediction, so we do it manually.
-    if (prediction == SpecNone)
-        addToGraph(ForceOSRExit);
-    else if (m_graph.m_compilation)
-        m_graph.m_compilation->noticeInlinedGetById();
+    if (m_graph.compilation())
+        m_graph.compilation()->noticeInlinedGetById();
     
-    Node* originalBaseForBaselineJIT = base;
+    Node* originalBase = base;
                 
-    addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(getByIdStatus.structureSet())), base);
+    addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(variant.structureSet())), base);
     
-    if (!getByIdStatus.chain().isEmpty()) {
-        Structure* currentStructure = getByIdStatus.structureSet().singletonStructure();
-        JSObject* currentObject = 0;
-        for (unsigned i = 0; i < getByIdStatus.chain().size(); ++i) {
-            currentObject = asObject(currentStructure->prototypeForLookup(m_inlineStackTop->m_codeBlock));
-            currentStructure = getByIdStatus.chain()[i];
-            base = addStructureTransitionCheck(currentObject, currentStructure);
-        }
-    }
+    emitChecks(variant.constantChecks());
+
+    if (variant.alternateBase())
+        base = weakJSConstant(variant.alternateBase());
     
     // Unless we want bugs like https://bugs.webkit.org/show_bug.cgi?id=88783, we need to
     // ensure that the base of the original get_by_id is kept alive until we're done with
     // all of the speculations. We only insert the Phantom if there had been a CheckStructure
-    // on something other than the base following the CheckStructure on base, or if the
-    // access was compiled to a WeakJSConstant specific value, in which case we might not
-    // have any explicit use of the base at all.
-    if (getByIdStatus.specificValue() || originalBaseForBaselineJIT != base)
-        addToGraph(Phantom, originalBaseForBaselineJIT);
+    // on something other than the base following the CheckStructure on base.
+    if (originalBase != base)
+        addToGraph(Phantom, originalBase);
     
-    if (getByIdStatus.specificValue()) {
-        ASSERT(getByIdStatus.specificValue().isCell());
-        
-        set(destinationOperand, cellConstant(getByIdStatus.specificValue().asCell()));
+    Node* loadedValue = handleGetByOffset(
+        variant.callLinkStatus() ? SpecCellOther : prediction,
+        base, variant.baseStructure(), identifierNumber, variant.offset(),
+        variant.callLinkStatus() ? GetGetterSetterByOffset : GetByOffset);
+    
+    if (!variant.callLinkStatus()) {
+        set(VirtualRegister(destinationOperand), loadedValue);
         return;
     }
     
-    handleGetByOffset(
-        destinationOperand, prediction, base, identifierNumber, getByIdStatus.offset());
-}
-
-void ByteCodeParser::prepareToParseBlock()
-{
-    for (unsigned i = 0; i < m_constants.size(); ++i)
-        m_constants[i] = ConstantRecord();
-    m_cellConstantNodes.clear();
+    Node* getter = addToGraph(GetGetter, loadedValue);
+    
+    // Make a call. We don't try to get fancy with using the smallest operand number because
+    // the stack layout phase should compress the stack anyway.
+    
+    unsigned numberOfParameters = 0;
+    numberOfParameters++; // The 'this' argument.
+    numberOfParameters++; // True return PC.
+    
+    // Start with a register offset that corresponds to the last in-use register.
+    int registerOffset = virtualRegisterForLocal(
+        m_inlineStackTop->m_profiledBlock->m_numCalleeRegisters - 1).offset();
+    registerOffset -= numberOfParameters;
+    registerOffset -= JSStack::CallFrameHeaderSize;
+    
+    // Get the alignment right.
+    registerOffset = -WTF::roundUpToMultipleOf(
+        stackAlignmentRegisters(),
+        -registerOffset);
+    
+    ensureLocals(
+        m_inlineStackTop->remapOperand(
+            VirtualRegister(registerOffset)).toLocal());
+    
+    // Issue SetLocals. This has two effects:
+    // 1) That's how handleCall() sees the arguments.
+    // 2) If we inline then this ensures that the arguments are flushed so that if you use
+    //    the dreaded arguments object on the getter, the right things happen. Well, sort of -
+    //    since we only really care about 'this' in this case. But we're not going to take that
+    //    shortcut.
+    int nextRegister = registerOffset + JSStack::CallFrameHeaderSize;
+    set(VirtualRegister(nextRegister++), originalBase, ImmediateNakedSet);
+    
+    handleCall(
+        destinationOperand, Call, InlineCallFrame::GetterCall, OPCODE_LENGTH(op_get_by_id),
+        getter, numberOfParameters - 1, registerOffset, *variant.callLinkStatus(), prediction);
 }
 
-Node* ByteCodeParser::getScope(bool skipTop, unsigned skipCount)
+void ByteCodeParser::emitPutById(
+    Node* base, unsigned identifierNumber, Node* value, const PutByIdStatus& putByIdStatus, bool isDirect)
 {
-    Node* localBase;
-    if (inlineCallFrame() && !inlineCallFrame()->isClosureCall()) {
-        ASSERT(inlineCallFrame()->callee);
-        localBase = cellConstant(inlineCallFrame()->callee->scope());
-    } else
-        localBase = addToGraph(GetMyScope);
-    if (skipTop) {
-        ASSERT(!inlineCallFrame());
-        localBase = addToGraph(SkipTopScope, localBase);
-    }
-    for (unsigned n = skipCount; n--;)
-        localBase = addToGraph(SkipScope, localBase);
-    return localBase;
+    if (isDirect)
+        addToGraph(PutByIdDirect, OpInfo(identifierNumber), base, value);
+    else
+        addToGraph(putByIdStatus.makesCalls() ? PutByIdFlush : PutById, OpInfo(identifierNumber), base, value);
 }
 
-bool ByteCodeParser::parseResolveOperations(SpeculatedType prediction, unsigned identifier, ResolveOperations* resolveOperations, PutToBaseOperation* putToBaseOperation, Node** base, Node** value)
+void ByteCodeParser::handlePutById(
+    Node* base, unsigned identifierNumber, Node* value,
+    const PutByIdStatus& putByIdStatus, bool isDirect)
 {
-    if (resolveOperations->isEmpty()) {
-        addToGraph(ForceOSRExit);
-        return false;
+    if (!putByIdStatus.isSimple() || !putByIdStatus.numVariants() || !Options::enableAccessInlining()) {
+        if (!putByIdStatus.isSet())
+            addToGraph(ForceOSRExit);
+        emitPutById(base, identifierNumber, value, putByIdStatus, isDirect);
+        return;
     }
-    JSGlobalObject* globalObject = m_inlineStackTop->m_codeBlock->globalObject();
-    int skipCount = 0;
-    bool skipTop = false;
-    bool skippedScopes = false;
-    bool setBase = false;
-    ResolveOperation* pc = resolveOperations->data();
-    Node* localBase = 0;
-    bool resolvingBase = true;
-    while (resolvingBase) {
-        switch (pc->m_operation) {
-        case ResolveOperation::ReturnGlobalObjectAsBase:
-            *base = cellConstant(globalObject);
-            ASSERT(!value);
-            return true;
-
-        case ResolveOperation::SetBaseToGlobal:
-            *base = cellConstant(globalObject);
-            setBase = true;
-            resolvingBase = false;
-            ++pc;
-            break;
-
-        case ResolveOperation::SetBaseToUndefined:
-            *base = constantUndefined();
-            setBase = true;
-            resolvingBase = false;
-            ++pc;
-            break;
-
-        case ResolveOperation::SetBaseToScope:
-            localBase = getScope(skipTop, skipCount);
-            *base = localBase;
-            setBase = true;
-
-            resolvingBase = false;
-
-            // Reset the scope skipping as we've already loaded it
-            skippedScopes = false;
-            ++pc;
-            break;
-        case ResolveOperation::ReturnScopeAsBase:
-            *base = getScope(skipTop, skipCount);
-            ASSERT(!value);
-            return true;
-
-        case ResolveOperation::SkipTopScopeNode:
-            ASSERT(!inlineCallFrame());
-            skipTop = true;
-            skippedScopes = true;
-            ++pc;
-            break;
-
-        case ResolveOperation::SkipScopes:
-            skipCount += pc->m_scopesToSkip;
-            skippedScopes = true;
-            ++pc;
-            break;
-
-        case ResolveOperation::CheckForDynamicEntriesBeforeGlobalScope:
-            return false;
-
-        case ResolveOperation::Fail:
-            return false;
-
-        default:
-            resolvingBase = false;
+    
+    if (putByIdStatus.numVariants() > 1) {
+        if (!isFTL(m_graph.m_plan.mode) || putByIdStatus.makesCalls()
+            || !Options::enablePolymorphicAccessInlining()) {
+            emitPutById(base, identifierNumber, value, putByIdStatus, isDirect);
+            return;
         }
-    }
-    if (skippedScopes)
-        localBase = getScope(skipTop, skipCount);
-
-    if (base && !setBase)
-        *base = localBase;
-
-    ASSERT(value);
-    ResolveOperation* resolveValueOperation = pc;
-    switch (resolveValueOperation->m_operation) {
-    case ResolveOperation::GetAndReturnGlobalProperty: {
-        ResolveGlobalStatus status = ResolveGlobalStatus::computeFor(m_inlineStackTop->m_profiledBlock, m_currentIndex, resolveValueOperation, m_codeBlock->identifier(identifier));
-        if (status.isSimple()) {
-            ASSERT(status.structure());
-
-            Node* globalObjectNode = addStructureTransitionCheck(globalObject, status.structure());
-
-            if (status.specificValue()) {
-                ASSERT(status.specificValue().isCell());
-                *value = cellConstant(status.specificValue().asCell());
-            } else
-                *value = handleGetByOffset(prediction, globalObjectNode, identifier, status.offset());
-            return true;
+        
+        if (m_graph.compilation())
+            m_graph.compilation()->noticeInlinedPutById();
+        
+        if (!isDirect) {
+            for (unsigned variantIndex = putByIdStatus.numVariants(); variantIndex--;) {
+                if (putByIdStatus[variantIndex].kind() != PutByIdVariant::Transition)
+                    continue;
+                emitChecks(putByIdStatus[variantIndex].constantChecks());
+            }
         }
-
-        Node* resolve = addToGraph(ResolveGlobal, OpInfo(m_graph.m_resolveGlobalData.size()), OpInfo(prediction));
-        m_graph.m_resolveGlobalData.append(ResolveGlobalData());
-        ResolveGlobalData& data = m_graph.m_resolveGlobalData.last();
-        data.identifierNumber = identifier;
-        data.resolveOperations = resolveOperations;
-        data.putToBaseOperation = putToBaseOperation;
-        data.resolvePropertyIndex = resolveValueOperation - resolveOperations->data();
-        *value = resolve;
-        return true;
+        
+        MultiPutByOffsetData* data = m_graph.m_multiPutByOffsetData.add();
+        data->variants = putByIdStatus.variants();
+        data->identifierNumber = identifierNumber;
+        addToGraph(MultiPutByOffset, OpInfo(data), base, value);
+        return;
     }
-    case ResolveOperation::GetAndReturnGlobalVar: {
-        *value = addToGraph(
-            GetGlobalVar,
-            OpInfo(globalObject->assertRegisterIsInThisObject(pc->m_registerAddress)),
-            OpInfo(prediction));
-        return true;
+    
+    ASSERT(putByIdStatus.numVariants() == 1);
+    const PutByIdVariant& variant = putByIdStatus[0];
+    
+    switch (variant.kind()) {
+    case PutByIdVariant::Replace: {
+        addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(variant.structure())), base);
+        handlePutByOffset(base, identifierNumber, variant.offset(), value);
+        if (m_graph.compilation())
+            m_graph.compilation()->noticeInlinedPutById();
+        return;
     }
-    case ResolveOperation::GetAndReturnGlobalVarWatchable: {
-        SpeculatedType prediction = getPrediction();
-
-        JSGlobalObject* globalObject = m_inlineStackTop->m_codeBlock->globalObject();
+    
+    case PutByIdVariant::Transition: {
+        addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(variant.oldStructure())), base);
+        emitChecks(variant.constantChecks());
 
-        Identifier ident = m_codeBlock->identifier(identifier);
-        SymbolTableEntry entry = globalObject->symbolTable()->get(ident.impl());
-        if (!entry.couldBeWatched()) {
-            *value = addToGraph(GetGlobalVar, OpInfo(globalObject->assertRegisterIsInThisObject(pc->m_registerAddress)), OpInfo(prediction));
-            return true;
-        }
+        ASSERT(variant.oldStructureForTransition()->transitionWatchpointSetHasBeenInvalidated());
+    
+        Node* propertyStorage;
+        Transition* transition = m_graph.m_transitions.add(
+            variant.oldStructureForTransition(), variant.newStructure());
 
-        // The watchpoint is still intact! This means that we will get notified if the
-        // current value in the global variable changes. So, we can inline that value.
-        // Moreover, currently we can assume that this value is a JSFunction*, which
-        // implies that it's a cell. This simplifies things, since in general we'd have
-        // to use a JSConstant for non-cells and a WeakJSConstant for cells. So instead
-        // of having both cases we just assert that the value is a cell.
+        if (variant.reallocatesStorage()) {
 
-        // NB. If it wasn't for CSE, GlobalVarWatchpoint would have no need for the
-        // register pointer. But CSE tracks effects on global variables by comparing
-        // register pointers. Because CSE executes multiple times while the backend
-        // executes once, we use the following performance trade-off:
-        // - The node refers directly to the register pointer to make CSE super cheap.
-        // - To perform backend code generation, the node only contains the identifier
-        //   number, from which it is possible to get (via a few average-time O(1)
-        //   lookups) to the WatchpointSet.
+            // If we're growing the property storage then it must be because we're
+            // storing into the out-of-line storage.
+            ASSERT(!isInlineOffset(variant.offset()));
 
-        addToGraph(GlobalVarWatchpoint, OpInfo(globalObject->assertRegisterIsInThisObject(pc->m_registerAddress)), OpInfo(identifier));
+            if (!variant.oldStructureForTransition()->outOfLineCapacity()) {
+                propertyStorage = addToGraph(
+                    AllocatePropertyStorage, OpInfo(transition), base);
+            } else {
+                propertyStorage = addToGraph(
+                    ReallocatePropertyStorage, OpInfo(transition),
+                    base, addToGraph(GetButterfly, base));
+            }
+        } else {
+            if (isInlineOffset(variant.offset()))
+                propertyStorage = base;
+            else
+                propertyStorage = addToGraph(GetButterfly, base);
+        }
 
-        JSValue specificValue = globalObject->registerAt(entry.getIndex()).get();
-        ASSERT(specificValue.isCell());
-        *value = cellConstant(specificValue.asCell());
-        return true;
-    }
-    case ResolveOperation::GetAndReturnScopedVar: {
-        Node* getScopeRegisters = addToGraph(GetScopeRegisters, localBase);
-        *value = addToGraph(GetScopedVar, OpInfo(resolveValueOperation->m_offset), OpInfo(prediction), getScopeRegisters);
-        return true;
+        StorageAccessData* data = m_graph.m_storageAccessData.add();
+        data->offset = variant.offset();
+        data->identifierNumber = identifierNumber;
+        
+        addToGraph(
+            PutByOffset,
+            OpInfo(data),
+            propertyStorage,
+            base,
+            value);
+
+        // FIXME: PutStructure goes last until we fix either
+        // https://bugs.webkit.org/show_bug.cgi?id=142921 or
+        // https://bugs.webkit.org/show_bug.cgi?id=142924.
+        addToGraph(PutStructure, OpInfo(transition), base);
+
+        if (m_graph.compilation())
+            m_graph.compilation()->noticeInlinedPutById();
+        return;
     }
-    default:
-        CRASH();
-        return false;
+        
+    case PutByIdVariant::Setter: {
+        Node* originalBase = base;
+        
+        addToGraph(
+            CheckStructure, OpInfo(m_graph.addStructureSet(variant.structure())), base);
+        
+        emitChecks(variant.constantChecks());
+        
+        if (variant.alternateBase())
+            base = weakJSConstant(variant.alternateBase());
+        
+        Node* loadedValue = handleGetByOffset(
+            SpecCellOther, base, variant.baseStructure(), identifierNumber, variant.offset(),
+            GetGetterSetterByOffset);
+        
+        Node* setter = addToGraph(GetSetter, loadedValue);
+        
+        // Make a call. We don't try to get fancy with using the smallest operand number because
+        // the stack layout phase should compress the stack anyway.
+    
+        unsigned numberOfParameters = 0;
+        numberOfParameters++; // The 'this' argument.
+        numberOfParameters++; // The new value.
+        numberOfParameters++; // True return PC.
+    
+        // Start with a register offset that corresponds to the last in-use register.
+        int registerOffset = virtualRegisterForLocal(
+            m_inlineStackTop->m_profiledBlock->m_numCalleeRegisters - 1).offset();
+        registerOffset -= numberOfParameters;
+        registerOffset -= JSStack::CallFrameHeaderSize;
+    
+        // Get the alignment right.
+        registerOffset = -WTF::roundUpToMultipleOf(
+            stackAlignmentRegisters(),
+            -registerOffset);
+    
+        ensureLocals(
+            m_inlineStackTop->remapOperand(
+                VirtualRegister(registerOffset)).toLocal());
+    
+        int nextRegister = registerOffset + JSStack::CallFrameHeaderSize;
+        set(VirtualRegister(nextRegister++), originalBase, ImmediateNakedSet);
+        set(VirtualRegister(nextRegister++), value, ImmediateNakedSet);
+    
+        handleCall(
+            VirtualRegister().offset(), Call, InlineCallFrame::SetterCall,
+            OPCODE_LENGTH(op_put_by_id), setter, numberOfParameters - 1, registerOffset,
+            *variant.callLinkStatus(), SpecOther);
+        return;
     }
+    
+    default: {
+        emitPutById(base, identifierNumber, value, putByIdStatus, isDirect);
+        return;
+    } }
+}
+
+void ByteCodeParser::prepareToParseBlock()
+{
+    clearCaches();
+    ASSERT(m_setLocalQueue.isEmpty());
+}
 
+void ByteCodeParser::clearCaches()
+{
+    m_constants.resize(0);
 }
 
 bool ByteCodeParser::parseBlock(unsigned limit)
@@ -1957,13 +2592,15 @@ bool ByteCodeParser::parseBlock(unsigned limit)
     // If we are the first basic block, introduce markers for arguments. This allows
     // us to track if a use of an argument may use the actual argument passed, as
     // opposed to using a value we set explicitly.
-    if (m_currentBlock == m_graph.m_blocks[0].get() && !inlineCallFrame()) {
+    if (m_currentBlock == m_graph.block(0) && !inlineCallFrame()) {
         m_graph.m_arguments.resize(m_numArguments);
         for (unsigned argument = 0; argument < m_numArguments; ++argument) {
             VariableAccessData* variable = newVariableAccessData(
-                argumentToOperand(argument), m_codeBlock->isCaptured(argumentToOperand(argument)));
+                virtualRegisterForArgument(argument));
             variable->mergeStructureCheckHoistingFailed(
                 m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache));
+            variable->mergeCheckArrayHoistingFailed(
+                m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadIndexingType));
             
             Node* setArgument = addToGraph(SetArgument, OpInfo(variable));
             m_graph.m_arguments[argument] = setArgument;
@@ -1972,8 +2609,8 @@ bool ByteCodeParser::parseBlock(unsigned limit)
     }
 
     while (true) {
-        m_currentProfilingIndex = m_currentIndex;
-
+        processSetLocalQueue();
+        
         // Don't extend over jump destinations.
         if (m_currentIndex == limit) {
             // Ordinarily we want to plant a jump. But refuse to do this if the block is
@@ -1985,11 +2622,6 @@ bool ByteCodeParser::parseBlock(unsigned limit)
             // to be true.
             if (!m_currentBlock->isEmpty())
                 addToGraph(Jump, OpInfo(m_currentIndex));
-            else {
-#if DFG_ENABLE(DEBUG_VERBOSE)
-                dataLogF("Refusing to plant jump at limit %u because block %p is empty.\n", limit, m_currentBlock);
-#endif
-            }
             return shouldContinueParsing;
         }
         
@@ -1998,8 +2630,11 @@ bool ByteCodeParser::parseBlock(unsigned limit)
         m_currentInstruction = currentInstruction; // Some methods want to use this, and we'd rather not thread it through calls.
         OpcodeID opcodeID = interpreter->getOpcodeID(currentInstruction->u.opcode);
         
-        if (m_graph.m_compilation && opcodeID != op_call_put_result) {
-            addToGraph(CountExecution, OpInfo(m_graph.m_compilation->executionCounterFor(
+        if (Options::verboseDFGByteCodeParsing())
+            dataLog("    parsing ", currentCodeOrigin(), "\n");
+        
+        if (m_graph.compilation()) {
+            addToGraph(CountExecution, OpInfo(m_graph.compilation()->executionCounterFor(
                 Profiler::OriginStack(*m_vm->m_perBytecodeProfiler, m_codeBlock, currentCodeOrigin()))));
         }
         
@@ -2007,65 +2642,77 @@ bool ByteCodeParser::parseBlock(unsigned limit)
 
         // === Function entry opcodes ===
 
-        case op_enter:
+        case op_enter: {
+            Node* undefined = addToGraph(JSConstant, OpInfo(m_constantUndefined));
             // Initialize all locals to undefined.
             for (int i = 0; i < m_inlineStackTop->m_codeBlock->m_numVars; ++i)
-                set(i, constantUndefined(), SetOnEntry);
+                set(virtualRegisterForLocal(i), undefined, ImmediateNakedSet);
             NEXT_OPCODE(op_enter);
-
-        case op_convert_this: {
+        }
+            
+        case op_to_this: {
             Node* op1 = getThis();
-            if (op1->op() != ConvertThis) {
-                ValueProfile* profile =
-                    m_inlineStackTop->m_profiledBlock->valueProfileForBytecodeOffset(m_currentProfilingIndex);
-                profile->computeUpdatedPrediction();
-#if DFG_ENABLE(DEBUG_VERBOSE)
-                dataLogF("[bc#%u]: profile %p: ", m_currentProfilingIndex, profile);
-                profile->dump(WTF::dataFile());
-                dataLogF("\n");
-#endif
-                if (profile->m_singletonValueIsTop
-                    || !profile->m_singletonValue
-                    || !profile->m_singletonValue.isCell()
-                    || profile->m_singletonValue.asCell()->classInfo() != &Structure::s_info)
-                    setThis(addToGraph(ConvertThis, op1));
-                else {
+            if (op1->op() != ToThis) {
+                Structure* cachedStructure = currentInstruction[2].u.structure.get();
+                if (currentInstruction[2].u.toThisStatus != ToThisOK
+                    || !cachedStructure
+                    || cachedStructure->classInfo()->methodTable.toThis != JSObject::info()->methodTable.toThis
+                    || m_inlineStackTop->m_profiledBlock->couldTakeSlowCase(m_currentIndex)
+                    || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache)
+                    || (op1->op() == GetLocal && op1->variableAccessData()->structureCheckHoistingFailed())) {
+                    setThis(addToGraph(ToThis, op1));
+                } else {
                     addToGraph(
                         CheckStructure,
-                        OpInfo(m_graph.addStructureSet(jsCast<Structure*>(profile->m_singletonValue.asCell()))),
+                        OpInfo(m_graph.addStructureSet(cachedStructure)),
                         op1);
                 }
             }
-            NEXT_OPCODE(op_convert_this);
+            NEXT_OPCODE(op_to_this);
         }
 
         case op_create_this: {
             int calleeOperand = currentInstruction[2].u.operand;
-            Node* callee = get(calleeOperand);
+            Node* callee = get(VirtualRegister(calleeOperand));
+
+            JSFunction* function = callee->dynamicCastConstant<JSFunction*>();
+            if (!function) {
+                JSCell* cachedFunction = currentInstruction[4].u.jsCell.unvalidatedGet();
+                if (cachedFunction
+                    && cachedFunction != JSCell::seenMultipleCalleeObjects()
+                    && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCell)) {
+                    ASSERT(cachedFunction->inherits(JSFunction::info()));
+
+                    FrozenValue* frozen = m_graph.freeze(cachedFunction);
+                    addToGraph(CheckCell, OpInfo(frozen), callee);
+                    set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(JSConstant, OpInfo(frozen)));
+
+                    function = static_cast<JSFunction*>(cachedFunction);
+                }
+            }
+
             bool alreadyEmitted = false;
-            if (callee->op() == WeakJSConstant) {
-                JSCell* cell = callee->weakConstant();
-                ASSERT(cell->inherits(&JSFunction::s_info));
-                
-                JSFunction* function = jsCast<JSFunction*>(cell);
-                ObjectAllocationProfile* allocationProfile = function->tryGetAllocationProfile();
-                if (allocationProfile) {
-                    addToGraph(AllocationProfileWatchpoint, OpInfo(function));
-                    // The callee is still live up to this point.
-                    addToGraph(Phantom, callee);
-                    set(currentInstruction[1].u.operand,
-                        addToGraph(NewObject, OpInfo(allocationProfile->structure())));
-                    alreadyEmitted = true;
+            if (function) {
+                if (FunctionRareData* rareData = function->rareData()) {
+                    if (Structure* structure = rareData->allocationStructure()) {
+                        m_graph.freeze(rareData);
+                        m_graph.watchpoints().addLazily(rareData->allocationProfileWatchpointSet());
+                        // The callee is still live up to this point.
+                        addToGraph(Phantom, callee);
+                        set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(NewObject, OpInfo(structure)));
+                        alreadyEmitted = true;
+                    }
                 }
             }
-            if (!alreadyEmitted)
-                set(currentInstruction[1].u.operand,
+            if (!alreadyEmitted) {
+                set(VirtualRegister(currentInstruction[1].u.operand),
                     addToGraph(CreateThis, OpInfo(currentInstruction[3].u.operand), callee));
+            }
             NEXT_OPCODE(op_create_this);
         }
 
         case op_new_object: {
-            set(currentInstruction[1].u.operand,
+            set(VirtualRegister(currentInstruction[1].u.operand),
                 addToGraph(NewObject,
                     OpInfo(currentInstruction[3].u.objectAllocationProfile->structure())));
             NEXT_OPCODE(op_new_object);
@@ -2075,16 +2722,16 @@ bool ByteCodeParser::parseBlock(unsigned limit)
             int startOperand = currentInstruction[2].u.operand;
             int numOperands = currentInstruction[3].u.operand;
             ArrayAllocationProfile* profile = currentInstruction[4].u.arrayAllocationProfile;
-            for (int operandIdx = startOperand; operandIdx < startOperand + numOperands; ++operandIdx)
-                addVarArgChild(get(operandIdx));
-            set(currentInstruction[1].u.operand, addToGraph(Node::VarArg, NewArray, OpInfo(profile->selectIndexingType()), OpInfo(0)));
+            for (int operandIdx = startOperand; operandIdx > startOperand - numOperands; --operandIdx)
+                addVarArgChild(get(VirtualRegister(operandIdx)));
+            set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(Node::VarArg, NewArray, OpInfo(profile->selectIndexingType()), OpInfo(0)));
             NEXT_OPCODE(op_new_array);
         }
             
         case op_new_array_with_size: {
             int lengthOperand = currentInstruction[2].u.operand;
             ArrayAllocationProfile* profile = currentInstruction[3].u.arrayAllocationProfile;
-            set(currentInstruction[1].u.operand, addToGraph(NewArrayWithSize, OpInfo(profile->selectIndexingType()), get(lengthOperand)));
+            set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(NewArrayWithSize, OpInfo(profile->selectIndexingType()), get(VirtualRegister(lengthOperand))));
             NEXT_OPCODE(op_new_array_with_size);
         }
             
@@ -2106,239 +2753,223 @@ bool ByteCodeParser::parseBlock(unsigned limit)
             }
             
             m_graph.m_newArrayBufferData.append(data);
-            set(currentInstruction[1].u.operand, addToGraph(NewArrayBuffer, OpInfo(&m_graph.m_newArrayBufferData.last())));
+            set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(NewArrayBuffer, OpInfo(&m_graph.m_newArrayBufferData.last())));
             NEXT_OPCODE(op_new_array_buffer);
         }
             
         case op_new_regexp: {
-            set(currentInstruction[1].u.operand, addToGraph(NewRegexp, OpInfo(currentInstruction[2].u.operand)));
+            set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(NewRegexp, OpInfo(currentInstruction[2].u.operand)));
             NEXT_OPCODE(op_new_regexp);
         }
             
-        case op_get_callee: {
-            ValueProfile* profile = currentInstruction[2].u.profile;
-            profile->computeUpdatedPrediction();
-            if (profile->m_singletonValueIsTop
-                || !profile->m_singletonValue
-                || !profile->m_singletonValue.isCell())
-                set(currentInstruction[1].u.operand, get(JSStack::Callee));
-            else {
-                ASSERT(profile->m_singletonValue.asCell()->inherits(&JSFunction::s_info));
-                Node* actualCallee = get(JSStack::Callee);
-                addToGraph(CheckFunction, OpInfo(profile->m_singletonValue.asCell()), actualCallee);
-                set(currentInstruction[1].u.operand, addToGraph(WeakJSConstant, OpInfo(profile->m_singletonValue.asCell())));
-            }
-            NEXT_OPCODE(op_get_callee);
-        }
-
         // === Bitwise operations ===
 
         case op_bitand: {
-            Node* op1 = getToInt32(currentInstruction[2].u.operand);
-            Node* op2 = getToInt32(currentInstruction[3].u.operand);
-            set(currentInstruction[1].u.operand, addToGraph(BitAnd, op1, op2));
+            Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
+            Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
+            set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(BitAnd, op1, op2));
             NEXT_OPCODE(op_bitand);
         }
 
         case op_bitor: {
-            Node* op1 = getToInt32(currentInstruction[2].u.operand);
-            Node* op2 = getToInt32(currentInstruction[3].u.operand);
-            set(currentInstruction[1].u.operand, addToGraph(BitOr, op1, op2));
+            Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
+            Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
+            set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(BitOr, op1, op2));
             NEXT_OPCODE(op_bitor);
         }
 
         case op_bitxor: {
-            Node* op1 = getToInt32(currentInstruction[2].u.operand);
-            Node* op2 = getToInt32(currentInstruction[3].u.operand);
-            set(currentInstruction[1].u.operand, addToGraph(BitXor, op1, op2));
+            Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
+            Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
+            set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(BitXor, op1, op2));
             NEXT_OPCODE(op_bitxor);
         }
 
         case op_rshift: {
-            Node* op1 = getToInt32(currentInstruction[2].u.operand);
-            Node* op2 = getToInt32(currentInstruction[3].u.operand);
-            Node* result;
-            // Optimize out shifts by zero.
-            if (isInt32Constant(op2) && !(valueOfInt32Constant(op2) & 0x1f))
-                result = op1;
-            else
-                result = addToGraph(BitRShift, op1, op2);
-            set(currentInstruction[1].u.operand, result);
+            Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
+            Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
+            set(VirtualRegister(currentInstruction[1].u.operand),
+                addToGraph(BitRShift, op1, op2));
             NEXT_OPCODE(op_rshift);
         }
 
         case op_lshift: {
-            Node* op1 = getToInt32(currentInstruction[2].u.operand);
-            Node* op2 = getToInt32(currentInstruction[3].u.operand);
-            Node* result;
-            // Optimize out shifts by zero.
-            if (isInt32Constant(op2) && !(valueOfInt32Constant(op2) & 0x1f))
-                result = op1;
-            else
-                result = addToGraph(BitLShift, op1, op2);
-            set(currentInstruction[1].u.operand, result);
+            Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
+            Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
+            set(VirtualRegister(currentInstruction[1].u.operand),
+                addToGraph(BitLShift, op1, op2));
             NEXT_OPCODE(op_lshift);
         }
 
         case op_urshift: {
-            Node* op1 = getToInt32(currentInstruction[2].u.operand);
-            Node* op2 = getToInt32(currentInstruction[3].u.operand);
-            Node* result;
-            // The result of a zero-extending right shift is treated as an unsigned value.
-            // This means that if the top bit is set, the result is not in the int32 range,
-            // and as such must be stored as a double. If the shift amount is a constant,
-            // we may be able to optimize.
-            if (isInt32Constant(op2)) {
-                // If we know we are shifting by a non-zero amount, then since the operation
-                // zero fills we know the top bit of the result must be zero, and as such the
-                // result must be within the int32 range. Conversely, if this is a shift by
-                // zero, then the result may be changed by the conversion to unsigned, but it
-                // is not necessary to perform the shift!
-                if (valueOfInt32Constant(op2) & 0x1f)
-                    result = addToGraph(BitURShift, op1, op2);
-                else
-                    result = makeSafe(addToGraph(UInt32ToNumber, op1));
-            }  else {
-                // Cannot optimize at this stage; shift & potentially rebox as a double.
-                result = addToGraph(BitURShift, op1, op2);
-                result = makeSafe(addToGraph(UInt32ToNumber, result));
-            }
-            set(currentInstruction[1].u.operand, result);
+            Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
+            Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
+            set(VirtualRegister(currentInstruction[1].u.operand),
+                addToGraph(BitURShift, op1, op2));
             NEXT_OPCODE(op_urshift);
         }
+            
+        case op_unsigned: {
+            set(VirtualRegister(currentInstruction[1].u.operand),
+                makeSafe(addToGraph(UInt32ToNumber, get(VirtualRegister(currentInstruction[2].u.operand)))));
+            NEXT_OPCODE(op_unsigned);
+        }
 
         // === Increment/Decrement opcodes ===
 
         case op_inc: {
-            unsigned srcDst = currentInstruction[1].u.operand;
-            Node* op = get(srcDst);
-            set(srcDst, makeSafe(addToGraph(ArithAdd, op, one())));
+            int srcDst = currentInstruction[1].u.operand;
+            VirtualRegister srcDstVirtualRegister = VirtualRegister(srcDst);
+            Node* op = get(srcDstVirtualRegister);
+            set(srcDstVirtualRegister, makeSafe(addToGraph(ArithAdd, op, addToGraph(JSConstant, OpInfo(m_constantOne)))));
             NEXT_OPCODE(op_inc);
         }
 
         case op_dec: {
-            unsigned srcDst = currentInstruction[1].u.operand;
-            Node* op = get(srcDst);
-            set(srcDst, makeSafe(addToGraph(ArithSub, op, one())));
+            int srcDst = currentInstruction[1].u.operand;
+            VirtualRegister srcDstVirtualRegister = VirtualRegister(srcDst);
+            Node* op = get(srcDstVirtualRegister);
+            set(srcDstVirtualRegister, makeSafe(addToGraph(ArithSub, op, addToGraph(JSConstant, OpInfo(m_constantOne)))));
             NEXT_OPCODE(op_dec);
         }
 
         // === Arithmetic operations ===
 
         case op_add: {
-            Node* op1 = get(currentInstruction[2].u.operand);
-            Node* op2 = get(currentInstruction[3].u.operand);
+            Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
+            Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
             if (op1->hasNumberResult() && op2->hasNumberResult())
-                set(currentInstruction[1].u.operand, makeSafe(addToGraph(ArithAdd, op1, op2)));
+                set(VirtualRegister(currentInstruction[1].u.operand), makeSafe(addToGraph(ArithAdd, op1, op2)));
             else
-                set(currentInstruction[1].u.operand, makeSafe(addToGraph(ValueAdd, op1, op2)));
+                set(VirtualRegister(currentInstruction[1].u.operand), makeSafe(addToGraph(ValueAdd, op1, op2)));
             NEXT_OPCODE(op_add);
         }
 
         case op_sub: {
-            Node* op1 = get(currentInstruction[2].u.operand);
-            Node* op2 = get(currentInstruction[3].u.operand);
-            set(currentInstruction[1].u.operand, makeSafe(addToGraph(ArithSub, op1, op2)));
+            Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
+            Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
+            set(VirtualRegister(currentInstruction[1].u.operand), makeSafe(addToGraph(ArithSub, op1, op2)));
             NEXT_OPCODE(op_sub);
         }
 
         case op_negate: {
-            Node* op1 = get(currentInstruction[2].u.operand);
-            set(currentInstruction[1].u.operand, makeSafe(addToGraph(ArithNegate, op1)));
+            Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
+            set(VirtualRegister(currentInstruction[1].u.operand), makeSafe(addToGraph(ArithNegate, op1)));
             NEXT_OPCODE(op_negate);
         }
 
         case op_mul: {
             // Multiply requires that the inputs are not truncated, unfortunately.
-            Node* op1 = get(currentInstruction[2].u.operand);
-            Node* op2 = get(currentInstruction[3].u.operand);
-            set(currentInstruction[1].u.operand, makeSafe(addToGraph(ArithMul, op1, op2)));
+            Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
+            Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
+            set(VirtualRegister(currentInstruction[1].u.operand), makeSafe(addToGraph(ArithMul, op1, op2)));
             NEXT_OPCODE(op_mul);
         }
 
         case op_mod: {
-            Node* op1 = get(currentInstruction[2].u.operand);
-            Node* op2 = get(currentInstruction[3].u.operand);
-            set(currentInstruction[1].u.operand, makeSafe(addToGraph(ArithMod, op1, op2)));
+            Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
+            Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
+            set(VirtualRegister(currentInstruction[1].u.operand), makeSafe(addToGraph(ArithMod, op1, op2)));
             NEXT_OPCODE(op_mod);
         }
 
         case op_div: {
-            Node* op1 = get(currentInstruction[2].u.operand);
-            Node* op2 = get(currentInstruction[3].u.operand);
-            set(currentInstruction[1].u.operand, makeDivSafe(addToGraph(ArithDiv, op1, op2)));
+            Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
+            Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
+            set(VirtualRegister(currentInstruction[1].u.operand), makeDivSafe(addToGraph(ArithDiv, op1, op2)));
             NEXT_OPCODE(op_div);
         }
 
         // === Misc operations ===
 
-#if ENABLE(DEBUG_WITH_BREAKPOINT)
         case op_debug:
             addToGraph(Breakpoint);
             NEXT_OPCODE(op_debug);
-#endif
+
+        case op_profile_will_call: {
+            addToGraph(ProfileWillCall);
+            NEXT_OPCODE(op_profile_will_call);
+        }
+
+        case op_profile_did_call: {
+            addToGraph(ProfileDidCall);
+            NEXT_OPCODE(op_profile_did_call);
+        }
+
         case op_mov: {
-            Node* op = get(currentInstruction[2].u.operand);
-            set(currentInstruction[1].u.operand, op);
+            Node* op = get(VirtualRegister(currentInstruction[2].u.operand));
+            set(VirtualRegister(currentInstruction[1].u.operand), op);
             NEXT_OPCODE(op_mov);
         }
 
+        case op_check_tdz: {
+            Node* op = get(VirtualRegister(currentInstruction[1].u.operand));
+            addToGraph(CheckNotEmpty, op);
+            NEXT_OPCODE(op_check_tdz);
+        }
+
         case op_check_has_instance:
-            addToGraph(CheckHasInstance, get(currentInstruction[3].u.operand));
+            addToGraph(CheckHasInstance, get(VirtualRegister(currentInstruction[3].u.operand)));
             NEXT_OPCODE(op_check_has_instance);
 
         case op_instanceof: {
-            Node* value = get(currentInstruction[2].u.operand);
-            Node* prototype = get(currentInstruction[3].u.operand);
-            set(currentInstruction[1].u.operand, addToGraph(InstanceOf, value, prototype));
+            Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
+            Node* prototype = get(VirtualRegister(currentInstruction[3].u.operand));
+            set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(InstanceOf, value, prototype));
             NEXT_OPCODE(op_instanceof);
         }
             
         case op_is_undefined: {
-            Node* value = get(currentInstruction[2].u.operand);
-            set(currentInstruction[1].u.operand, addToGraph(IsUndefined, value));
+            Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
+            set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(IsUndefined, value));
             NEXT_OPCODE(op_is_undefined);
         }
 
         case op_is_boolean: {
-            Node* value = get(currentInstruction[2].u.operand);
-            set(currentInstruction[1].u.operand, addToGraph(IsBoolean, value));
+            Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
+            set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(IsBoolean, value));
             NEXT_OPCODE(op_is_boolean);
         }
 
         case op_is_number: {
-            Node* value = get(currentInstruction[2].u.operand);
-            set(currentInstruction[1].u.operand, addToGraph(IsNumber, value));
+            Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
+            set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(IsNumber, value));
             NEXT_OPCODE(op_is_number);
         }
 
         case op_is_string: {
-            Node* value = get(currentInstruction[2].u.operand);
-            set(currentInstruction[1].u.operand, addToGraph(IsString, value));
+            Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
+            set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(IsString, value));
             NEXT_OPCODE(op_is_string);
         }
 
         case op_is_object: {
-            Node* value = get(currentInstruction[2].u.operand);
-            set(currentInstruction[1].u.operand, addToGraph(IsObject, value));
+            Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
+            set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(IsObject, value));
             NEXT_OPCODE(op_is_object);
         }
 
+        case op_is_object_or_null: {
+            Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
+            set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(IsObjectOrNull, value));
+            NEXT_OPCODE(op_is_object_or_null);
+        }
+
         case op_is_function: {
-            Node* value = get(currentInstruction[2].u.operand);
-            set(currentInstruction[1].u.operand, addToGraph(IsFunction, value));
+            Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
+            set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(IsFunction, value));
             NEXT_OPCODE(op_is_function);
         }
 
         case op_not: {
-            Node* value = get(currentInstruction[2].u.operand);
-            set(currentInstruction[1].u.operand, addToGraph(LogicalNot, value));
+            Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
+            set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(LogicalNot, value));
             NEXT_OPCODE(op_not);
         }
             
         case op_to_primitive: {
-            Node* value = get(currentInstruction[2].u.operand);
-            set(currentInstruction[1].u.operand, addToGraph(ToPrimitive, value));
+            Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
+            set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(ToPrimitive, value));
             NEXT_OPCODE(op_to_primitive);
         }
             
@@ -2352,9 +2983,9 @@ bool ByteCodeParser::parseBlock(unsigned limit)
 #else
             const unsigned maxRopeArguments = 3;
 #endif
-            OwnArrayPtr<Node*> toStringNodes = adoptArrayPtr(new Node*[numOperands]);
+            auto toStringNodes = std::make_unique<Node*[]>(numOperands);
             for (int i = 0; i < numOperands; i++)
-                toStringNodes[i] = addToGraph(ToString, get(startOperand + i));
+                toStringNodes[i] = addToGraph(ToString, get(VirtualRegister(startOperand - i)));
 
             for (int i = 0; i < numOperands; i++)
                 addToGraph(Phantom, toStringNodes[i]);
@@ -2375,182 +3006,110 @@ bool ByteCodeParser::parseBlock(unsigned limit)
                 ASSERT(indexInOperands < maxRopeArguments);
                 operands[indexInOperands++] = toStringNodes[operandIdx];
             }
-            set(currentInstruction[1].u.operand,
+            set(VirtualRegister(currentInstruction[1].u.operand),
                 addToGraph(MakeRope, operands[0], operands[1], operands[2]));
             NEXT_OPCODE(op_strcat);
         }
 
         case op_less: {
-            Node* op1 = get(currentInstruction[2].u.operand);
-            Node* op2 = get(currentInstruction[3].u.operand);
-            if (canFold(op1) && canFold(op2)) {
-                JSValue a = valueOfJSConstant(op1);
-                JSValue b = valueOfJSConstant(op2);
-                if (a.isNumber() && b.isNumber()) {
-                    set(currentInstruction[1].u.operand,
-                        getJSConstantForValue(jsBoolean(a.asNumber() < b.asNumber())));
-                    NEXT_OPCODE(op_less);
-                }
-            }
-            set(currentInstruction[1].u.operand, addToGraph(CompareLess, op1, op2));
+            Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
+            Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
+            set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareLess, op1, op2));
             NEXT_OPCODE(op_less);
         }
 
         case op_lesseq: {
-            Node* op1 = get(currentInstruction[2].u.operand);
-            Node* op2 = get(currentInstruction[3].u.operand);
-            if (canFold(op1) && canFold(op2)) {
-                JSValue a = valueOfJSConstant(op1);
-                JSValue b = valueOfJSConstant(op2);
-                if (a.isNumber() && b.isNumber()) {
-                    set(currentInstruction[1].u.operand,
-                        getJSConstantForValue(jsBoolean(a.asNumber() <= b.asNumber())));
-                    NEXT_OPCODE(op_lesseq);
-                }
-            }
-            set(currentInstruction[1].u.operand, addToGraph(CompareLessEq, op1, op2));
+            Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
+            Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
+            set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareLessEq, op1, op2));
             NEXT_OPCODE(op_lesseq);
         }
 
         case op_greater: {
-            Node* op1 = get(currentInstruction[2].u.operand);
-            Node* op2 = get(currentInstruction[3].u.operand);
-            if (canFold(op1) && canFold(op2)) {
-                JSValue a = valueOfJSConstant(op1);
-                JSValue b = valueOfJSConstant(op2);
-                if (a.isNumber() && b.isNumber()) {
-                    set(currentInstruction[1].u.operand,
-                        getJSConstantForValue(jsBoolean(a.asNumber() > b.asNumber())));
-                    NEXT_OPCODE(op_greater);
-                }
-            }
-            set(currentInstruction[1].u.operand, addToGraph(CompareGreater, op1, op2));
+            Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
+            Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
+            set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareGreater, op1, op2));
             NEXT_OPCODE(op_greater);
         }
 
         case op_greatereq: {
-            Node* op1 = get(currentInstruction[2].u.operand);
-            Node* op2 = get(currentInstruction[3].u.operand);
-            if (canFold(op1) && canFold(op2)) {
-                JSValue a = valueOfJSConstant(op1);
-                JSValue b = valueOfJSConstant(op2);
-                if (a.isNumber() && b.isNumber()) {
-                    set(currentInstruction[1].u.operand,
-                        getJSConstantForValue(jsBoolean(a.asNumber() >= b.asNumber())));
-                    NEXT_OPCODE(op_greatereq);
-                }
-            }
-            set(currentInstruction[1].u.operand, addToGraph(CompareGreaterEq, op1, op2));
+            Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
+            Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
+            set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareGreaterEq, op1, op2));
             NEXT_OPCODE(op_greatereq);
         }
 
         case op_eq: {
-            Node* op1 = get(currentInstruction[2].u.operand);
-            Node* op2 = get(currentInstruction[3].u.operand);
-            if (canFold(op1) && canFold(op2)) {
-                JSValue a = valueOfJSConstant(op1);
-                JSValue b = valueOfJSConstant(op2);
-                set(currentInstruction[1].u.operand,
-                    getJSConstantForValue(jsBoolean(JSValue::equal(m_codeBlock->globalObject()->globalExec(), a, b))));
-                NEXT_OPCODE(op_eq);
-            }
-            set(currentInstruction[1].u.operand, addToGraph(CompareEq, op1, op2));
+            Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
+            Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
+            set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareEq, op1, op2));
             NEXT_OPCODE(op_eq);
         }
 
         case op_eq_null: {
-            Node* value = get(currentInstruction[2].u.operand);
-            set(currentInstruction[1].u.operand, addToGraph(CompareEqConstant, value, constantNull()));
+            Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
+            set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareEqConstant, value, addToGraph(JSConstant, OpInfo(m_constantNull))));
             NEXT_OPCODE(op_eq_null);
         }
 
         case op_stricteq: {
-            Node* op1 = get(currentInstruction[2].u.operand);
-            Node* op2 = get(currentInstruction[3].u.operand);
-            if (canFold(op1) && canFold(op2)) {
-                JSValue a = valueOfJSConstant(op1);
-                JSValue b = valueOfJSConstant(op2);
-                set(currentInstruction[1].u.operand,
-                    getJSConstantForValue(jsBoolean(JSValue::strictEqual(m_codeBlock->globalObject()->globalExec(), a, b))));
-                NEXT_OPCODE(op_stricteq);
-            }
-            if (isConstantForCompareStrictEq(op1))
-                set(currentInstruction[1].u.operand, addToGraph(CompareStrictEqConstant, op2, op1));
-            else if (isConstantForCompareStrictEq(op2))
-                set(currentInstruction[1].u.operand, addToGraph(CompareStrictEqConstant, op1, op2));
-            else
-                set(currentInstruction[1].u.operand, addToGraph(CompareStrictEq, op1, op2));
+            Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
+            Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
+            set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareStrictEq, op1, op2));
             NEXT_OPCODE(op_stricteq);
         }
 
         case op_neq: {
-            Node* op1 = get(currentInstruction[2].u.operand);
-            Node* op2 = get(currentInstruction[3].u.operand);
-            if (canFold(op1) && canFold(op2)) {
-                JSValue a = valueOfJSConstant(op1);
-                JSValue b = valueOfJSConstant(op2);
-                set(currentInstruction[1].u.operand,
-                    getJSConstantForValue(jsBoolean(!JSValue::equal(m_codeBlock->globalObject()->globalExec(), a, b))));
-                NEXT_OPCODE(op_neq);
-            }
-            set(currentInstruction[1].u.operand, addToGraph(LogicalNot, addToGraph(CompareEq, op1, op2)));
+            Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
+            Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
+            set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(LogicalNot, addToGraph(CompareEq, op1, op2)));
             NEXT_OPCODE(op_neq);
         }
 
         case op_neq_null: {
-            Node* value = get(currentInstruction[2].u.operand);
-            set(currentInstruction[1].u.operand, addToGraph(LogicalNot, addToGraph(CompareEqConstant, value, constantNull())));
+            Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
+            set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(LogicalNot, addToGraph(CompareEqConstant, value, addToGraph(JSConstant, OpInfo(m_constantNull)))));
             NEXT_OPCODE(op_neq_null);
         }
 
         case op_nstricteq: {
-            Node* op1 = get(currentInstruction[2].u.operand);
-            Node* op2 = get(currentInstruction[3].u.operand);
-            if (canFold(op1) && canFold(op2)) {
-                JSValue a = valueOfJSConstant(op1);
-                JSValue b = valueOfJSConstant(op2);
-                set(currentInstruction[1].u.operand,
-                    getJSConstantForValue(jsBoolean(!JSValue::strictEqual(m_codeBlock->globalObject()->globalExec(), a, b))));
-                NEXT_OPCODE(op_nstricteq);
-            }
+            Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
+            Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
             Node* invertedResult;
-            if (isConstantForCompareStrictEq(op1))
-                invertedResult = addToGraph(CompareStrictEqConstant, op2, op1);
-            else if (isConstantForCompareStrictEq(op2))
-                invertedResult = addToGraph(CompareStrictEqConstant, op1, op2);
-            else
-                invertedResult = addToGraph(CompareStrictEq, op1, op2);
-            set(currentInstruction[1].u.operand, addToGraph(LogicalNot, invertedResult));
+            invertedResult = addToGraph(CompareStrictEq, op1, op2);
+            set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(LogicalNot, invertedResult));
             NEXT_OPCODE(op_nstricteq);
         }
 
         // === Property access operations ===
 
         case op_get_by_val: {
-            SpeculatedType prediction = getPrediction();
+            SpeculatedType prediction = getPredictionWithoutOSRExit();
             
-            Node* base = get(currentInstruction[2].u.operand);
-            ArrayMode arrayMode = getArrayModeAndEmitChecks(currentInstruction[4].u.arrayProfile, Array::Read, base);
-            Node* property = get(currentInstruction[3].u.operand);
+            Node* base = get(VirtualRegister(currentInstruction[2].u.operand));
+            ArrayMode arrayMode = getArrayMode(currentInstruction[4].u.arrayProfile, Array::Read);
+            Node* property = get(VirtualRegister(currentInstruction[3].u.operand));
             Node* getByVal = addToGraph(GetByVal, OpInfo(arrayMode.asWord()), OpInfo(prediction), base, property);
-            set(currentInstruction[1].u.operand, getByVal);
+            set(VirtualRegister(currentInstruction[1].u.operand), getByVal);
 
             NEXT_OPCODE(op_get_by_val);
         }
 
+        case op_put_by_val_direct:
         case op_put_by_val: {
-            Node* base = get(currentInstruction[1].u.operand);
+            Node* base = get(VirtualRegister(currentInstruction[1].u.operand));
 
-            ArrayMode arrayMode = getArrayModeAndEmitChecks(currentInstruction[4].u.arrayProfile, Array::Write, base);
+            ArrayMode arrayMode = getArrayMode(currentInstruction[4].u.arrayProfile, Array::Write);
             
-            Node* property = get(currentInstruction[2].u.operand);
-            Node* value = get(currentInstruction[3].u.operand);
+            Node* property = get(VirtualRegister(currentInstruction[2].u.operand));
+            Node* value = get(VirtualRegister(currentInstruction[3].u.operand));
             
             addVarArgChild(base);
             addVarArgChild(property);
             addVarArgChild(value);
             addVarArgChild(0); // Leave room for property storage.
-            addToGraph(Node::VarArg, PutByVal, OpInfo(arrayMode.asWord()), OpInfo(0));
+            addVarArgChild(0); // Leave room for length.
+            addToGraph(Node::VarArg, opcodeID == op_put_by_val_direct ? PutByValDirect : PutByVal, OpInfo(arrayMode.asWord()), OpInfo(0));
 
             NEXT_OPCODE(op_put_by_val);
         }
@@ -2560,12 +3119,14 @@ bool ByteCodeParser::parseBlock(unsigned limit)
         case op_get_array_length: {
             SpeculatedType prediction = getPrediction();
             
-            Node* base = get(currentInstruction[2].u.operand);
+            Node* base = get(VirtualRegister(currentInstruction[2].u.operand));
             unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[3].u.operand];
             
-            Identifier identifier = m_codeBlock->identifier(identifierNumber);
+            UniquedStringImpl* uid = m_graph.identifiers()[identifierNumber];
             GetByIdStatus getByIdStatus = GetByIdStatus::computeFor(
-                m_inlineStackTop->m_profiledBlock, m_currentIndex, identifier);
+                m_inlineStackTop->m_profiledBlock, m_dfgCodeBlock,
+                m_inlineStackTop->m_stubInfos, m_dfgStubInfos,
+                currentCodeOrigin(), uid);
             
             handleGetById(
                 currentInstruction[1].u.operand, prediction, base, identifierNumber, getByIdStatus);
@@ -2578,115 +3139,17 @@ bool ByteCodeParser::parseBlock(unsigned limit)
         case op_put_by_id_transition_normal:
         case op_put_by_id_transition_direct_out_of_line:
         case op_put_by_id_transition_normal_out_of_line: {
-            Node* value = get(currentInstruction[3].u.operand);
-            Node* base = get(currentInstruction[1].u.operand);
+            Node* value = get(VirtualRegister(currentInstruction[3].u.operand));
+            Node* base = get(VirtualRegister(currentInstruction[1].u.operand));
             unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[2].u.operand];
             bool direct = currentInstruction[8].u.operand;
 
             PutByIdStatus putByIdStatus = PutByIdStatus::computeFor(
-                m_inlineStackTop->m_profiledBlock,
-                m_currentIndex,
-                m_codeBlock->identifier(identifierNumber));
-            bool canCountAsInlined = true;
-            if (!putByIdStatus.isSet()) {
-                addToGraph(ForceOSRExit);
-                canCountAsInlined = false;
-            }
-            
-            bool hasExitSite =
-                m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache)
-                || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadWeakConstantCache);
-            
-            if (!hasExitSite && putByIdStatus.isSimpleReplace()) {
-                addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(putByIdStatus.oldStructure())), base);
-                Node* propertyStorage;
-                if (isInlineOffset(putByIdStatus.offset()))
-                    propertyStorage = base;
-                else
-                    propertyStorage = addToGraph(GetButterfly, base);
-                addToGraph(PutByOffset, OpInfo(m_graph.m_storageAccessData.size()), propertyStorage, base, value);
-                
-                StorageAccessData storageAccessData;
-                storageAccessData.offset = indexRelativeToBase(putByIdStatus.offset());
-                storageAccessData.identifierNumber = identifierNumber;
-                m_graph.m_storageAccessData.append(storageAccessData);
-            } else if (!hasExitSite
-                       && putByIdStatus.isSimpleTransition()
-                       && structureChainIsStillValid(
-                           direct,
-                           putByIdStatus.oldStructure(),
-                           putByIdStatus.structureChain())) {
-
-                addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(putByIdStatus.oldStructure())), base);
-                if (!direct) {
-                    if (!putByIdStatus.oldStructure()->storedPrototype().isNull()) {
-                        addStructureTransitionCheck(
-                            putByIdStatus.oldStructure()->storedPrototype().asCell());
-                    }
-                    
-                    for (WriteBarrier<Structure>* it = putByIdStatus.structureChain()->head(); *it; ++it) {
-                        JSValue prototype = (*it)->storedPrototype();
-                        if (prototype.isNull())
-                            continue;
-                        ASSERT(prototype.isCell());
-                        addStructureTransitionCheck(prototype.asCell());
-                    }
-                }
-                ASSERT(putByIdStatus.oldStructure()->transitionWatchpointSetHasBeenInvalidated());
-                
-                Node* propertyStorage;
-                StructureTransitionData* transitionData =
-                    m_graph.addStructureTransitionData(
-                        StructureTransitionData(
-                            putByIdStatus.oldStructure(),
-                            putByIdStatus.newStructure()));
-
-                if (putByIdStatus.oldStructure()->outOfLineCapacity()
-                    != putByIdStatus.newStructure()->outOfLineCapacity()) {
-                    
-                    // If we're growing the property storage then it must be because we're
-                    // storing into the out-of-line storage.
-                    ASSERT(!isInlineOffset(putByIdStatus.offset()));
-                    
-                    if (!putByIdStatus.oldStructure()->outOfLineCapacity()) {
-                        propertyStorage = addToGraph(
-                            AllocatePropertyStorage, OpInfo(transitionData), base);
-                    } else {
-                        propertyStorage = addToGraph(
-                            ReallocatePropertyStorage, OpInfo(transitionData),
-                            base, addToGraph(GetButterfly, base));
-                    }
-                } else {
-                    if (isInlineOffset(putByIdStatus.offset()))
-                        propertyStorage = base;
-                    else
-                        propertyStorage = addToGraph(GetButterfly, base);
-                }
-                
-                addToGraph(PutStructure, OpInfo(transitionData), base);
-                
-                addToGraph(
-                    PutByOffset,
-                    OpInfo(m_graph.m_storageAccessData.size()),
-                    propertyStorage,
-                    base,
-                    value);
-                
-                StorageAccessData storageAccessData;
-                storageAccessData.offset = indexRelativeToBase(putByIdStatus.offset());
-                storageAccessData.identifierNumber = identifierNumber;
-                m_graph.m_storageAccessData.append(storageAccessData);
-            } else {
-                if (direct)
-                    addToGraph(PutByIdDirect, OpInfo(identifierNumber), base, value);
-                else
-                    addToGraph(PutById, OpInfo(identifierNumber), base, value);
-                canCountAsInlined = false;
-            }
+                m_inlineStackTop->m_profiledBlock, m_dfgCodeBlock,
+                m_inlineStackTop->m_stubInfos, m_dfgStubInfos,
+                currentCodeOrigin(), m_graph.identifiers()[identifierNumber]);
             
-            if (canCountAsInlined && m_graph.m_compilation)
-                m_graph.m_compilation->noticeInlinedPutById();
-
+            handlePutById(base, identifierNumber, value, putByIdStatus, direct);
             NEXT_OPCODE(op_put_by_id);
         }
 
@@ -2695,398 +3158,264 @@ bool ByteCodeParser::parseBlock(unsigned limit)
         }
 
         case op_init_global_const: {
-            Node* value = get(currentInstruction[2].u.operand);
+            Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
+            JSGlobalObject* globalObject = m_inlineStackTop->m_codeBlock->globalObject();
             addToGraph(
                 PutGlobalVar,
-                OpInfo(m_inlineStackTop->m_codeBlock->globalObject()->assertRegisterIsInThisObject(currentInstruction[1].u.registerPointer)),
-                value);
+                OpInfo(globalObject->assertVariableIsInThisObject(currentInstruction[1].u.variablePointer)),
+                weakJSConstant(globalObject), value);
             NEXT_OPCODE(op_init_global_const);
         }
 
-        case op_init_global_const_check: {
-            Node* value = get(currentInstruction[2].u.operand);
-            CodeBlock* codeBlock = m_inlineStackTop->m_codeBlock;
-            JSGlobalObject* globalObject = codeBlock->globalObject();
-            unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[4].u.operand];
-            Identifier identifier = m_codeBlock->identifier(identifierNumber);
-            SymbolTableEntry entry = globalObject->symbolTable()->get(identifier.impl());
-            if (!entry.couldBeWatched()) {
-                addToGraph(
-                    PutGlobalVar,
-                    OpInfo(globalObject->assertRegisterIsInThisObject(currentInstruction[1].u.registerPointer)),
-                    value);
-                NEXT_OPCODE(op_init_global_const_check);
-            }
-            addToGraph(
-                PutGlobalVarCheck,
-                OpInfo(codeBlock->globalObject()->assertRegisterIsInThisObject(currentInstruction[1].u.registerPointer)),
-                OpInfo(identifierNumber),
-                value);
-            NEXT_OPCODE(op_init_global_const_check);
+        case op_profile_type: {
+            Node* valueToProfile = get(VirtualRegister(currentInstruction[1].u.operand));
+            addToGraph(ProfileType, OpInfo(currentInstruction[2].u.location), valueToProfile);
+            NEXT_OPCODE(op_profile_type);
         }
 
+        case op_profile_control_flow: {
+            BasicBlockLocation* basicBlockLocation = currentInstruction[1].u.basicBlockLocation;
+            addToGraph(ProfileControlFlow, OpInfo(basicBlockLocation));
+            NEXT_OPCODE(op_profile_control_flow);
+        }
 
         // === Block terminators. ===
 
         case op_jmp: {
-            unsigned relativeOffset = currentInstruction[1].u.operand;
+            int relativeOffset = currentInstruction[1].u.operand;
             addToGraph(Jump, OpInfo(m_currentIndex + relativeOffset));
+            if (relativeOffset <= 0)
+                flushForTerminal();
             LAST_OPCODE(op_jmp);
         }
 
         case op_jtrue: {
             unsigned relativeOffset = currentInstruction[2].u.operand;
-            Node* condition = get(currentInstruction[1].u.operand);
-            if (canFold(condition)) {
-                TriState state = valueOfJSConstant(condition).pureToBoolean();
-                if (state == TrueTriState) {
-                    addToGraph(Jump, OpInfo(m_currentIndex + relativeOffset));
-                    LAST_OPCODE(op_jtrue);
-                } else if (state == FalseTriState) {
-                    // Emit a placeholder for this bytecode operation but otherwise
-                    // just fall through.
-                    addToGraph(Phantom);
-                    NEXT_OPCODE(op_jtrue);
-                }
-            }
-            addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_jtrue)), condition);
+            Node* condition = get(VirtualRegister(currentInstruction[1].u.operand));
+            addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + OPCODE_LENGTH(op_jtrue))), condition);
             LAST_OPCODE(op_jtrue);
         }
 
         case op_jfalse: {
             unsigned relativeOffset = currentInstruction[2].u.operand;
-            Node* condition = get(currentInstruction[1].u.operand);
-            if (canFold(condition)) {
-                TriState state = valueOfJSConstant(condition).pureToBoolean();
-                if (state == FalseTriState) {
-                    addToGraph(Jump, OpInfo(m_currentIndex + relativeOffset));
-                    LAST_OPCODE(op_jfalse);
-                } else if (state == TrueTriState) {
-                    // Emit a placeholder for this bytecode operation but otherwise
-                    // just fall through.
-                    addToGraph(Phantom);
-                    NEXT_OPCODE(op_jfalse);
-                }
-            }
-            addToGraph(Branch, OpInfo(m_currentIndex + OPCODE_LENGTH(op_jfalse)), OpInfo(m_currentIndex + relativeOffset), condition);
+            Node* condition = get(VirtualRegister(currentInstruction[1].u.operand));
+            addToGraph(Branch, OpInfo(branchData(m_currentIndex + OPCODE_LENGTH(op_jfalse), m_currentIndex + relativeOffset)), condition);
             LAST_OPCODE(op_jfalse);
         }
 
         case op_jeq_null: {
             unsigned relativeOffset = currentInstruction[2].u.operand;
-            Node* value = get(currentInstruction[1].u.operand);
-            Node* condition = addToGraph(CompareEqConstant, value, constantNull());
-            addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_jeq_null)), condition);
+            Node* value = get(VirtualRegister(currentInstruction[1].u.operand));
+            Node* condition = addToGraph(CompareEqConstant, value, addToGraph(JSConstant, OpInfo(m_constantNull)));
+            addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + OPCODE_LENGTH(op_jeq_null))), condition);
             LAST_OPCODE(op_jeq_null);
         }
 
         case op_jneq_null: {
             unsigned relativeOffset = currentInstruction[2].u.operand;
-            Node* value = get(currentInstruction[1].u.operand);
-            Node* condition = addToGraph(CompareEqConstant, value, constantNull());
-            addToGraph(Branch, OpInfo(m_currentIndex + OPCODE_LENGTH(op_jneq_null)), OpInfo(m_currentIndex + relativeOffset), condition);
+            Node* value = get(VirtualRegister(currentInstruction[1].u.operand));
+            Node* condition = addToGraph(CompareEqConstant, value, addToGraph(JSConstant, OpInfo(m_constantNull)));
+            addToGraph(Branch, OpInfo(branchData(m_currentIndex + OPCODE_LENGTH(op_jneq_null), m_currentIndex + relativeOffset)), condition);
             LAST_OPCODE(op_jneq_null);
         }
 
         case op_jless: {
             unsigned relativeOffset = currentInstruction[3].u.operand;
-            Node* op1 = get(currentInstruction[1].u.operand);
-            Node* op2 = get(currentInstruction[2].u.operand);
-            if (canFold(op1) && canFold(op2)) {
-                JSValue aValue = valueOfJSConstant(op1);
-                JSValue bValue = valueOfJSConstant(op2);
-                if (aValue.isNumber() && bValue.isNumber()) {
-                    double a = aValue.asNumber();
-                    double b = bValue.asNumber();
-                    if (a < b) {
-                        addToGraph(Jump, OpInfo(m_currentIndex + relativeOffset));
-                        LAST_OPCODE(op_jless);
-                    } else {
-                        // Emit a placeholder for this bytecode operation but otherwise
-                        // just fall through.
-                        addToGraph(Phantom);
-                        NEXT_OPCODE(op_jless);
-                    }
-                }
-            }
+            Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand));
+            Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand));
             Node* condition = addToGraph(CompareLess, op1, op2);
-            addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_jless)), condition);
+            addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + OPCODE_LENGTH(op_jless))), condition);
             LAST_OPCODE(op_jless);
         }
 
         case op_jlesseq: {
             unsigned relativeOffset = currentInstruction[3].u.operand;
-            Node* op1 = get(currentInstruction[1].u.operand);
-            Node* op2 = get(currentInstruction[2].u.operand);
-            if (canFold(op1) && canFold(op2)) {
-                JSValue aValue = valueOfJSConstant(op1);
-                JSValue bValue = valueOfJSConstant(op2);
-                if (aValue.isNumber() && bValue.isNumber()) {
-                    double a = aValue.asNumber();
-                    double b = bValue.asNumber();
-                    if (a <= b) {
-                        addToGraph(Jump, OpInfo(m_currentIndex + relativeOffset));
-                        LAST_OPCODE(op_jlesseq);
-                    } else {
-                        // Emit a placeholder for this bytecode operation but otherwise
-                        // just fall through.
-                        addToGraph(Phantom);
-                        NEXT_OPCODE(op_jlesseq);
-                    }
-                }
-            }
+            Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand));
+            Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand));
             Node* condition = addToGraph(CompareLessEq, op1, op2);
-            addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_jlesseq)), condition);
+            addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + OPCODE_LENGTH(op_jlesseq))), condition);
             LAST_OPCODE(op_jlesseq);
         }
 
         case op_jgreater: {
             unsigned relativeOffset = currentInstruction[3].u.operand;
-            Node* op1 = get(currentInstruction[1].u.operand);
-            Node* op2 = get(currentInstruction[2].u.operand);
-            if (canFold(op1) && canFold(op2)) {
-                JSValue aValue = valueOfJSConstant(op1);
-                JSValue bValue = valueOfJSConstant(op2);
-                if (aValue.isNumber() && bValue.isNumber()) {
-                    double a = aValue.asNumber();
-                    double b = bValue.asNumber();
-                    if (a > b) {
-                        addToGraph(Jump, OpInfo(m_currentIndex + relativeOffset));
-                        LAST_OPCODE(op_jgreater);
-                    } else {
-                        // Emit a placeholder for this bytecode operation but otherwise
-                        // just fall through.
-                        addToGraph(Phantom);
-                        NEXT_OPCODE(op_jgreater);
-                    }
-                }
-            }
+            Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand));
+            Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand));
             Node* condition = addToGraph(CompareGreater, op1, op2);
-            addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_jgreater)), condition);
+            addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + OPCODE_LENGTH(op_jgreater))), condition);
             LAST_OPCODE(op_jgreater);
         }
 
         case op_jgreatereq: {
             unsigned relativeOffset = currentInstruction[3].u.operand;
-            Node* op1 = get(currentInstruction[1].u.operand);
-            Node* op2 = get(currentInstruction[2].u.operand);
-            if (canFold(op1) && canFold(op2)) {
-                JSValue aValue = valueOfJSConstant(op1);
-                JSValue bValue = valueOfJSConstant(op2);
-                if (aValue.isNumber() && bValue.isNumber()) {
-                    double a = aValue.asNumber();
-                    double b = bValue.asNumber();
-                    if (a >= b) {
-                        addToGraph(Jump, OpInfo(m_currentIndex + relativeOffset));
-                        LAST_OPCODE(op_jgreatereq);
-                    } else {
-                        // Emit a placeholder for this bytecode operation but otherwise
-                        // just fall through.
-                        addToGraph(Phantom);
-                        NEXT_OPCODE(op_jgreatereq);
-                    }
-                }
-            }
+            Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand));
+            Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand));
             Node* condition = addToGraph(CompareGreaterEq, op1, op2);
-            addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_jgreatereq)), condition);
+            addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + OPCODE_LENGTH(op_jgreatereq))), condition);
             LAST_OPCODE(op_jgreatereq);
         }
 
         case op_jnless: {
             unsigned relativeOffset = currentInstruction[3].u.operand;
-            Node* op1 = get(currentInstruction[1].u.operand);
-            Node* op2 = get(currentInstruction[2].u.operand);
-            if (canFold(op1) && canFold(op2)) {
-                JSValue aValue = valueOfJSConstant(op1);
-                JSValue bValue = valueOfJSConstant(op2);
-                if (aValue.isNumber() && bValue.isNumber()) {
-                    double a = aValue.asNumber();
-                    double b = bValue.asNumber();
-                    if (a < b) {
-                        // Emit a placeholder for this bytecode operation but otherwise
-                        // just fall through.
-                        addToGraph(Phantom);
-                        NEXT_OPCODE(op_jnless);
-                    } else {
-                        addToGraph(Jump, OpInfo(m_currentIndex + relativeOffset));
-                        LAST_OPCODE(op_jnless);
-                    }
-                }
-            }
+            Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand));
+            Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand));
             Node* condition = addToGraph(CompareLess, op1, op2);
-            addToGraph(Branch, OpInfo(m_currentIndex + OPCODE_LENGTH(op_jnless)), OpInfo(m_currentIndex + relativeOffset), condition);
+            addToGraph(Branch, OpInfo(branchData(m_currentIndex + OPCODE_LENGTH(op_jnless), m_currentIndex + relativeOffset)), condition);
             LAST_OPCODE(op_jnless);
         }
 
         case op_jnlesseq: {
             unsigned relativeOffset = currentInstruction[3].u.operand;
-            Node* op1 = get(currentInstruction[1].u.operand);
-            Node* op2 = get(currentInstruction[2].u.operand);
-            if (canFold(op1) && canFold(op2)) {
-                JSValue aValue = valueOfJSConstant(op1);
-                JSValue bValue = valueOfJSConstant(op2);
-                if (aValue.isNumber() && bValue.isNumber()) {
-                    double a = aValue.asNumber();
-                    double b = bValue.asNumber();
-                    if (a <= b) {
-                        // Emit a placeholder for this bytecode operation but otherwise
-                        // just fall through.
-                        addToGraph(Phantom);
-                        NEXT_OPCODE(op_jnlesseq);
-                    } else {
-                        addToGraph(Jump, OpInfo(m_currentIndex + relativeOffset));
-                        LAST_OPCODE(op_jnlesseq);
-                    }
-                }
-            }
+            Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand));
+            Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand));
             Node* condition = addToGraph(CompareLessEq, op1, op2);
-            addToGraph(Branch, OpInfo(m_currentIndex + OPCODE_LENGTH(op_jnlesseq)), OpInfo(m_currentIndex + relativeOffset), condition);
+            addToGraph(Branch, OpInfo(branchData(m_currentIndex + OPCODE_LENGTH(op_jnlesseq), m_currentIndex + relativeOffset)), condition);
             LAST_OPCODE(op_jnlesseq);
         }
 
         case op_jngreater: {
             unsigned relativeOffset = currentInstruction[3].u.operand;
-            Node* op1 = get(currentInstruction[1].u.operand);
-            Node* op2 = get(currentInstruction[2].u.operand);
-            if (canFold(op1) && canFold(op2)) {
-                JSValue aValue = valueOfJSConstant(op1);
-                JSValue bValue = valueOfJSConstant(op2);
-                if (aValue.isNumber() && bValue.isNumber()) {
-                    double a = aValue.asNumber();
-                    double b = bValue.asNumber();
-                    if (a > b) {
-                        // Emit a placeholder for this bytecode operation but otherwise
-                        // just fall through.
-                        addToGraph(Phantom);
-                        NEXT_OPCODE(op_jngreater);
-                    } else {
-                        addToGraph(Jump, OpInfo(m_currentIndex + relativeOffset));
-                        LAST_OPCODE(op_jngreater);
-                    }
-                }
-            }
+            Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand));
+            Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand));
             Node* condition = addToGraph(CompareGreater, op1, op2);
-            addToGraph(Branch, OpInfo(m_currentIndex + OPCODE_LENGTH(op_jngreater)), OpInfo(m_currentIndex + relativeOffset), condition);
+            addToGraph(Branch, OpInfo(branchData(m_currentIndex + OPCODE_LENGTH(op_jngreater), m_currentIndex + relativeOffset)), condition);
             LAST_OPCODE(op_jngreater);
         }
 
         case op_jngreatereq: {
             unsigned relativeOffset = currentInstruction[3].u.operand;
-            Node* op1 = get(currentInstruction[1].u.operand);
-            Node* op2 = get(currentInstruction[2].u.operand);
-            if (canFold(op1) && canFold(op2)) {
-                JSValue aValue = valueOfJSConstant(op1);
-                JSValue bValue = valueOfJSConstant(op2);
-                if (aValue.isNumber() && bValue.isNumber()) {
-                    double a = aValue.asNumber();
-                    double b = bValue.asNumber();
-                    if (a >= b) {
-                        // Emit a placeholder for this bytecode operation but otherwise
-                        // just fall through.
-                        addToGraph(Phantom);
-                        NEXT_OPCODE(op_jngreatereq);
-                    } else {
-                        addToGraph(Jump, OpInfo(m_currentIndex + relativeOffset));
-                        LAST_OPCODE(op_jngreatereq);
-                    }
-                }
-            }
+            Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand));
+            Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand));
             Node* condition = addToGraph(CompareGreaterEq, op1, op2);
-            addToGraph(Branch, OpInfo(m_currentIndex + OPCODE_LENGTH(op_jngreatereq)), OpInfo(m_currentIndex + relativeOffset), condition);
+            addToGraph(Branch, OpInfo(branchData(m_currentIndex + OPCODE_LENGTH(op_jngreatereq), m_currentIndex + relativeOffset)), condition);
             LAST_OPCODE(op_jngreatereq);
         }
+            
+        case op_switch_imm: {
+            SwitchData& data = *m_graph.m_switchData.add();
+            data.kind = SwitchImm;
+            data.switchTableIndex = m_inlineStackTop->m_switchRemap[currentInstruction[1].u.operand];
+            data.fallThrough.setBytecodeIndex(m_currentIndex + currentInstruction[2].u.operand);
+            SimpleJumpTable& table = m_codeBlock->switchJumpTable(data.switchTableIndex);
+            for (unsigned i = 0; i < table.branchOffsets.size(); ++i) {
+                if (!table.branchOffsets[i])
+                    continue;
+                unsigned target = m_currentIndex + table.branchOffsets[i];
+                if (target == data.fallThrough.bytecodeIndex())
+                    continue;
+                data.cases.append(SwitchCase::withBytecodeIndex(m_graph.freeze(jsNumber(static_cast<int32_t>(table.min + i))), target));
+            }
+            addToGraph(Switch, OpInfo(&data), get(VirtualRegister(currentInstruction[3].u.operand)));
+            flushIfTerminal(data);
+            LAST_OPCODE(op_switch_imm);
+        }
+            
+        case op_switch_char: {
+            SwitchData& data = *m_graph.m_switchData.add();
+            data.kind = SwitchChar;
+            data.switchTableIndex = m_inlineStackTop->m_switchRemap[currentInstruction[1].u.operand];
+            data.fallThrough.setBytecodeIndex(m_currentIndex + currentInstruction[2].u.operand);
+            SimpleJumpTable& table = m_codeBlock->switchJumpTable(data.switchTableIndex);
+            for (unsigned i = 0; i < table.branchOffsets.size(); ++i) {
+                if (!table.branchOffsets[i])
+                    continue;
+                unsigned target = m_currentIndex + table.branchOffsets[i];
+                if (target == data.fallThrough.bytecodeIndex())
+                    continue;
+                data.cases.append(
+                    SwitchCase::withBytecodeIndex(LazyJSValue::singleCharacterString(table.min + i), target));
+            }
+            addToGraph(Switch, OpInfo(&data), get(VirtualRegister(currentInstruction[3].u.operand)));
+            flushIfTerminal(data);
+            LAST_OPCODE(op_switch_char);
+        }
+
+        case op_switch_string: {
+            SwitchData& data = *m_graph.m_switchData.add();
+            data.kind = SwitchString;
+            data.switchTableIndex = currentInstruction[1].u.operand;
+            data.fallThrough.setBytecodeIndex(m_currentIndex + currentInstruction[2].u.operand);
+            StringJumpTable& table = m_codeBlock->stringSwitchJumpTable(data.switchTableIndex);
+            StringJumpTable::StringOffsetTable::iterator iter;
+            StringJumpTable::StringOffsetTable::iterator end = table.offsetTable.end();
+            for (iter = table.offsetTable.begin(); iter != end; ++iter) {
+                unsigned target = m_currentIndex + iter->value.branchOffset;
+                if (target == data.fallThrough.bytecodeIndex())
+                    continue;
+                data.cases.append(
+                    SwitchCase::withBytecodeIndex(LazyJSValue::knownStringImpl(iter->key.get()), target));
+            }
+            addToGraph(Switch, OpInfo(&data), get(VirtualRegister(currentInstruction[3].u.operand)));
+            flushIfTerminal(data);
+            LAST_OPCODE(op_switch_string);
+        }
 
         case op_ret:
-            flushArgumentsAndCapturedVariables();
             if (inlineCallFrame()) {
-                if (m_inlineStackTop->m_returnValue != InvalidVirtualRegister)
-                    setDirect(m_inlineStackTop->m_returnValue, get(currentInstruction[1].u.operand));
+                flushForReturn();
+                if (m_inlineStackTop->m_returnValue.isValid())
+                    setDirect(m_inlineStackTop->m_returnValue, get(VirtualRegister(currentInstruction[1].u.operand)), ImmediateSetWithFlush);
                 m_inlineStackTop->m_didReturn = true;
                 if (m_inlineStackTop->m_unlinkedBlocks.isEmpty()) {
                     // If we're returning from the first block, then we're done parsing.
-                    ASSERT(m_inlineStackTop->m_callsiteBlockHead == m_graph.m_blocks.size() - 1);
+                    ASSERT(m_inlineStackTop->m_callsiteBlockHead == m_graph.lastBlock());
                     shouldContinueParsing = false;
                     LAST_OPCODE(op_ret);
                 } else {
                     // If inlining created blocks, and we're doing a return, then we need some
                     // special linking.
-                    ASSERT(m_inlineStackTop->m_unlinkedBlocks.last().m_blockIndex == m_graph.m_blocks.size() - 1);
+                    ASSERT(m_inlineStackTop->m_unlinkedBlocks.last().m_block == m_graph.lastBlock());
                     m_inlineStackTop->m_unlinkedBlocks.last().m_needsNormalLinking = false;
                 }
                 if (m_currentIndex + OPCODE_LENGTH(op_ret) != m_inlineStackTop->m_codeBlock->instructions().size() || m_inlineStackTop->m_didEarlyReturn) {
                     ASSERT(m_currentIndex + OPCODE_LENGTH(op_ret) <= m_inlineStackTop->m_codeBlock->instructions().size());
-                    addToGraph(Jump, OpInfo(NoBlock));
+                    addToGraph(Jump, OpInfo(0));
                     m_inlineStackTop->m_unlinkedBlocks.last().m_needsEarlyReturnLinking = true;
                     m_inlineStackTop->m_didEarlyReturn = true;
                 }
                 LAST_OPCODE(op_ret);
             }
-            addToGraph(Return, get(currentInstruction[1].u.operand));
+            addToGraph(Return, get(VirtualRegister(currentInstruction[1].u.operand)));
+            flushForReturn();
             LAST_OPCODE(op_ret);
             
         case op_end:
-            flushArgumentsAndCapturedVariables();
             ASSERT(!inlineCallFrame());
-            addToGraph(Return, get(currentInstruction[1].u.operand));
+            addToGraph(Return, get(VirtualRegister(currentInstruction[1].u.operand)));
+            flushForReturn();
             LAST_OPCODE(op_end);
 
         case op_throw:
-            addToGraph(Throw, get(currentInstruction[1].u.operand));
-            flushAllArgumentsAndCapturedVariablesInInlineStack();
+            addToGraph(Throw, get(VirtualRegister(currentInstruction[1].u.operand)));
+            flushForTerminal();
             addToGraph(Unreachable);
             LAST_OPCODE(op_throw);
             
         case op_throw_static_error:
             addToGraph(ThrowReferenceError);
-            flushAllArgumentsAndCapturedVariablesInInlineStack();
+            flushForTerminal();
             addToGraph(Unreachable);
             LAST_OPCODE(op_throw_static_error);
             
         case op_call:
-            handleCall(interpreter, currentInstruction, Call, CodeForCall);
+            handleCall(currentInstruction, Call, CodeForCall);
+            // Verify that handleCall(), which could have inlined the callee, didn't trash m_currentInstruction
+            ASSERT(m_currentInstruction == currentInstruction);
             NEXT_OPCODE(op_call);
             
         case op_construct:
-            handleCall(interpreter, currentInstruction, Construct, CodeForConstruct);
+            handleCall(currentInstruction, Construct, CodeForConstruct);
             NEXT_OPCODE(op_construct);
             
         case op_call_varargs: {
-            ASSERT(inlineCallFrame());
-            ASSERT(currentInstruction[3].u.operand == m_inlineStackTop->m_codeBlock->argumentsRegister());
-            ASSERT(!m_inlineStackTop->m_codeBlock->symbolTable()->slowArguments());
-            // It would be cool to funnel this into handleCall() so that it can handle
-            // inlining. But currently that won't be profitable anyway, since none of the
-            // uses of call_varargs will be inlineable. So we set this up manually and
-            // without inline/intrinsic detection.
-            
-            Instruction* putInstruction = currentInstruction + OPCODE_LENGTH(op_call_varargs);
-            
-            SpeculatedType prediction = SpecNone;
-            if (interpreter->getOpcodeID(putInstruction->u.opcode) == op_call_put_result) {
-                m_currentProfilingIndex = m_currentIndex + OPCODE_LENGTH(op_call_varargs);
-                prediction = getPrediction();
-            }
-            
-            addToGraph(CheckArgumentsNotCreated);
-            
-            unsigned argCount = inlineCallFrame()->arguments.size();
-            if (JSStack::CallFrameHeaderSize + argCount > m_parameterSlots)
-                m_parameterSlots = JSStack::CallFrameHeaderSize + argCount;
-            
-            addVarArgChild(get(currentInstruction[1].u.operand)); // callee
-            addVarArgChild(get(currentInstruction[2].u.operand)); // this
-            for (unsigned argument = 1; argument < argCount; ++argument)
-                addVarArgChild(get(argumentToOperand(argument)));
-            
-            Node* call = addToGraph(Node::VarArg, Call, OpInfo(0), OpInfo(prediction));
-            if (interpreter->getOpcodeID(putInstruction->u.opcode) == op_call_put_result)
-                set(putInstruction[1].u.operand, call);
-            
+            handleVarargsCall(currentInstruction, CallVarargs, CodeForCall);
             NEXT_OPCODE(op_call_varargs);
         }
             
-        case op_call_put_result:
-            NEXT_OPCODE(op_call_put_result);
+        case op_construct_varargs: {
+            handleVarargsCall(currentInstruction, ConstructVarargs, CodeForConstruct);
+            NEXT_OPCODE(op_construct_varargs);
+        }
             
         case op_jneq_ptr:
             // Statically speculate for now. It makes sense to let speculate-only jneq_ptr
@@ -3094,201 +3423,290 @@ bool ByteCodeParser::parseBlock(unsigned limit)
             // already gnarly enough as it is.
             ASSERT(pointerIsFunction(currentInstruction[2].u.specialPointer));
             addToGraph(
-                CheckFunction,
-                OpInfo(actualPointerFor(m_inlineStackTop->m_codeBlock, currentInstruction[2].u.specialPointer)),
-                get(currentInstruction[1].u.operand));
+                CheckCell,
+                OpInfo(m_graph.freeze(static_cast<JSCell*>(actualPointerFor(
+                    m_inlineStackTop->m_codeBlock, currentInstruction[2].u.specialPointer)))),
+                get(VirtualRegister(currentInstruction[1].u.operand)));
             addToGraph(Jump, OpInfo(m_currentIndex + OPCODE_LENGTH(op_jneq_ptr)));
             LAST_OPCODE(op_jneq_ptr);
 
-        case op_get_scoped_var: {
-            SpeculatedType prediction = getPrediction();
+        case op_resolve_scope: {
             int dst = currentInstruction[1].u.operand;
-            int slot = currentInstruction[2].u.operand;
-            int depth = currentInstruction[3].u.operand;
-            bool hasTopScope = m_codeBlock->codeType() == FunctionCode && m_inlineStackTop->m_codeBlock->needsFullScopeChain();
-            ASSERT(!hasTopScope || depth >= 1);
-            Node* scope = getScope(hasTopScope, depth - hasTopScope);
-            Node* getScopeRegisters = addToGraph(GetScopeRegisters, scope);
-            Node* getScopedVar = addToGraph(GetScopedVar, OpInfo(slot), OpInfo(prediction), getScopeRegisters);
-            set(dst, getScopedVar);
-            NEXT_OPCODE(op_get_scoped_var);
-        }
-
-        case op_put_scoped_var: {
-            int slot = currentInstruction[1].u.operand;
-            int depth = currentInstruction[2].u.operand;
-            int source = currentInstruction[3].u.operand;
-            bool hasTopScope = m_codeBlock->codeType() == FunctionCode && m_inlineStackTop->m_codeBlock->needsFullScopeChain();
-            ASSERT(!hasTopScope || depth >= 1);
-            Node* scope = getScope(hasTopScope, depth - hasTopScope);
-            Node* scopeRegisters = addToGraph(GetScopeRegisters, scope);
-            addToGraph(PutScopedVar, OpInfo(slot), scope, scopeRegisters, get(source));
-            NEXT_OPCODE(op_put_scoped_var);
-        }
-
-        case op_resolve:
-        case op_resolve_global_property:
-        case op_resolve_global_var:
-        case op_resolve_scoped_var:
-        case op_resolve_scoped_var_on_top_scope:
-        case op_resolve_scoped_var_with_top_scope_check: {
-            SpeculatedType prediction = getPrediction();
-            
-            unsigned identifier = m_inlineStackTop->m_identifierRemap[currentInstruction[2].u.operand];
-            ResolveOperations* operations = currentInstruction[3].u.resolveOperations;
-            Node* value = 0;
-            if (parseResolveOperations(prediction, identifier, operations, 0, 0, &value)) {
-                set(currentInstruction[1].u.operand, value);
-                NEXT_OPCODE(op_resolve);
+            ResolveType resolveType = static_cast<ResolveType>(currentInstruction[4].u.operand);
+            unsigned depth = currentInstruction[5].u.operand;
+
+            // get_from_scope and put_to_scope depend on this watchpoint forcing OSR exit, so they don't add their own watchpoints.
+            if (needsVarInjectionChecks(resolveType))
+                addToGraph(VarInjectionWatchpoint);
+
+            switch (resolveType) {
+            case GlobalProperty:
+            case GlobalVar:
+            case GlobalPropertyWithVarInjectionChecks:
+            case GlobalVarWithVarInjectionChecks:
+                set(VirtualRegister(dst), weakJSConstant(m_inlineStackTop->m_codeBlock->globalObject()));
+                if (resolveType == GlobalPropertyWithVarInjectionChecks || resolveType == GlobalVarWithVarInjectionChecks)
+                    addToGraph(Phantom, getDirect(m_inlineStackTop->remapOperand(VirtualRegister(currentInstruction[2].u.operand))));
+                break;
+            case LocalClosureVar:
+            case ClosureVar:
+            case ClosureVarWithVarInjectionChecks: {
+                Node* localBase = get(VirtualRegister(currentInstruction[2].u.operand));
+                addToGraph(Phantom, localBase); // OSR exit cannot handle resolve_scope on a DCE'd scope.
+                
+                // We have various forms of constant folding here. This is necessary to avoid
+                // spurious recompiles in dead-but-foldable code.
+                if (SymbolTable* symbolTable = currentInstruction[6].u.symbolTable.get()) {
+                    InferredValue* singleton = symbolTable->singletonScope();
+                    if (JSValue value = singleton->inferredValue()) {
+                        m_graph.watchpoints().addLazily(singleton);
+                        set(VirtualRegister(dst), weakJSConstant(value));
+                        break;
+                    }
+                }
+                if (JSScope* scope = localBase->dynamicCastConstant<JSScope*>()) {
+                    for (unsigned n = depth; n--;)
+                        scope = scope->next();
+                    set(VirtualRegister(dst), weakJSConstant(scope));
+                    break;
+                }
+                for (unsigned n = depth; n--;)
+                    localBase = addToGraph(SkipScope, localBase);
+                set(VirtualRegister(dst), localBase);
+                break;
             }
-
-            Node* resolve = addToGraph(Resolve, OpInfo(m_graph.m_resolveOperationsData.size()), OpInfo(prediction));
-            m_graph.m_resolveOperationsData.append(ResolveOperationData());
-            ResolveOperationData& data = m_graph.m_resolveOperationsData.last();
-            data.identifierNumber = identifier;
-            data.resolveOperations = operations;
-
-            set(currentInstruction[1].u.operand, resolve);
-
-            NEXT_OPCODE(op_resolve);
+            case Dynamic:
+                RELEASE_ASSERT_NOT_REACHED();
+                break;
+            }
+            NEXT_OPCODE(op_resolve_scope);
         }
 
-        case op_put_to_base_variable:
-        case op_put_to_base: {
-            unsigned base = currentInstruction[1].u.operand;
-            unsigned identifier = m_inlineStackTop->m_identifierRemap[currentInstruction[2].u.operand];
-            unsigned value = currentInstruction[3].u.operand;
-            PutToBaseOperation* putToBase = currentInstruction[4].u.putToBaseOperation;
-
-            if (putToBase->m_isDynamic) {
-                addToGraph(PutById, OpInfo(identifier), get(base), get(value));
-                NEXT_OPCODE(op_put_to_base);
+        case op_get_from_scope: {
+            int dst = currentInstruction[1].u.operand;
+            int scope = currentInstruction[2].u.operand;
+            unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[3].u.operand];
+            UniquedStringImpl* uid = m_graph.identifiers()[identifierNumber];
+            ResolveType resolveType = ResolveModeAndType(currentInstruction[4].u.operand).type();
+
+            Structure* structure = 0;
+            WatchpointSet* watchpoints = 0;
+            uintptr_t operand;
+            {
+                ConcurrentJITLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
+                if (resolveType == GlobalVar || resolveType == GlobalVarWithVarInjectionChecks)
+                    watchpoints = currentInstruction[5].u.watchpointSet;
+                else
+                    structure = currentInstruction[5].u.structure.get();
+                operand = reinterpret_cast<uintptr_t>(currentInstruction[6].u.pointer);
             }
 
-            switch (putToBase->m_kind) {
-            case PutToBaseOperation::Uninitialised:
-                addToGraph(ForceOSRExit);
-                addToGraph(Phantom, get(base));
-                addToGraph(Phantom, get(value));
-                break;
+            UNUSED_PARAM(watchpoints); // We will use this in the future. For now we set it as a way of documenting the fact that that's what index 5 is in GlobalVar mode.
+
+            JSGlobalObject* globalObject = m_inlineStackTop->m_codeBlock->globalObject();
 
-            case PutToBaseOperation::GlobalVariablePutChecked: {
-                CodeBlock* codeBlock = m_inlineStackTop->m_codeBlock;
-                JSGlobalObject* globalObject = codeBlock->globalObject();
-                SymbolTableEntry entry = globalObject->symbolTable()->get(m_codeBlock->identifier(identifier).impl());
-                if (entry.couldBeWatched()) {
-                    addToGraph(PutGlobalVarCheck,
-                               OpInfo(codeBlock->globalObject()->assertRegisterIsInThisObject(putToBase->m_registerAddress)),
-                               OpInfo(identifier),
-                               get(value));
+            switch (resolveType) {
+            case GlobalProperty:
+            case GlobalPropertyWithVarInjectionChecks: {
+                SpeculatedType prediction = getPrediction();
+                GetByIdStatus status = GetByIdStatus::computeFor(structure, uid);
+                if (status.state() != GetByIdStatus::Simple
+                    || status.numVariants() != 1
+                    || status[0].structureSet().size() != 1) {
+                    set(VirtualRegister(dst), addToGraph(GetByIdFlush, OpInfo(identifierNumber), OpInfo(prediction), get(VirtualRegister(scope))));
                     break;
                 }
-            }
-            case PutToBaseOperation::GlobalVariablePut:
-                addToGraph(PutGlobalVar,
-                           OpInfo(m_inlineStackTop->m_codeBlock->globalObject()->assertRegisterIsInThisObject(putToBase->m_registerAddress)),
-                           get(value));
+                Node* base = cellConstantWithStructureCheck(globalObject, status[0].structureSet().onlyStructure());
+                addToGraph(Phantom, get(VirtualRegister(scope)));
+                set(VirtualRegister(dst), handleGetByOffset(prediction, base, status[0].structureSet(), identifierNumber, operand));
                 break;
-            case PutToBaseOperation::VariablePut: {
-                Node* scope = get(base);
-                Node* scopeRegisters = addToGraph(GetScopeRegisters, scope);
-                addToGraph(PutScopedVar, OpInfo(putToBase->m_offset), scope, scopeRegisters, get(value));
+            }
+            case GlobalVar:
+            case GlobalVarWithVarInjectionChecks: {
+                addToGraph(Phantom, get(VirtualRegister(scope)));
+                WatchpointSet* watchpointSet;
+                ScopeOffset offset;
+                {
+                    ConcurrentJITLocker locker(globalObject->symbolTable()->m_lock);
+                    SymbolTableEntry entry = globalObject->symbolTable()->get(locker, uid);
+                    watchpointSet = entry.watchpointSet();
+                    offset = entry.scopeOffset();
+                }
+                if (watchpointSet && watchpointSet->state() == IsWatched) {
+                    // This has a fun concurrency story. There is the possibility of a race in two
+                    // directions:
+                    //
+                    // We see that the set IsWatched, but in the meantime it gets invalidated: this is
+                    // fine because if we saw that it IsWatched then we add a watchpoint. If it gets
+                    // invalidated, then this compilation is invalidated. Note that in the meantime we
+                    // may load an absurd value from the global object. It's fine to load an absurd
+                    // value if the compilation is invalidated anyway.
+                    //
+                    // We see that the set IsWatched, but the value isn't yet initialized: this isn't
+                    // possible because of the ordering of operations.
+                    //
+                    // Here's how we order operations:
+                    //
+                    // Main thread stores to the global object: always store a value first, and only
+                    // after that do we touch the watchpoint set. There is a fence in the touch, that
+                    // ensures that the store to the global object always happens before the touch on the
+                    // set.
+                    //
+                    // Compilation thread: always first load the state of the watchpoint set, and then
+                    // load the value. The WatchpointSet::state() method does fences for us to ensure
+                    // that the load of the state happens before our load of the value.
+                    //
+                    // Finalizing compilation: this happens on the main thread and synchronously checks
+                    // validity of all watchpoint sets.
+                    //
+                    // We will only perform optimizations if the load of the state yields IsWatched. That
+                    // means that at least one store would have happened to initialize the original value
+                    // of the variable (that is, the value we'd like to constant fold to). There may be
+                    // other stores that happen after that, but those stores will invalidate the
+                    // watchpoint set and also the compilation.
+                    
+                    // Note that we need to use the operand, which is a direct pointer at the global,
+                    // rather than looking up the global by doing variableAt(offset). That's because the
+                    // internal data structures of JSSegmentedVariableObject are not thread-safe even
+                    // though accessing the global itself is. The segmentation involves a vector spine
+                    // that resizes with malloc/free, so if new globals unrelated to the one we are
+                    // reading are added, we might access freed memory if we do variableAt().
+                    WriteBarrier<Unknown>* pointer = bitwise_cast<WriteBarrier<Unknown>*>(operand);
+                    
+                    ASSERT(globalObject->findVariableIndex(pointer) == offset);
+                    
+                    JSValue value = pointer->get();
+                    if (value) {
+                        m_graph.watchpoints().addLazily(watchpointSet);
+                        set(VirtualRegister(dst), weakJSConstant(value));
+                        break;
+                    }
+                }
+                
+                SpeculatedType prediction = getPrediction();
+                set(VirtualRegister(dst), addToGraph(GetGlobalVar, OpInfo(operand), OpInfo(prediction)));
                 break;
             }
-            case PutToBaseOperation::GlobalPropertyPut: {
-                if (!putToBase->m_structure) {
-                    addToGraph(ForceOSRExit);
-                    addToGraph(Phantom, get(base));
-                    addToGraph(Phantom, get(value));
-                    NEXT_OPCODE(op_put_to_base);
+            case LocalClosureVar:
+            case ClosureVar:
+            case ClosureVarWithVarInjectionChecks: {
+                Node* scopeNode = get(VirtualRegister(scope));
+                
+                // Ideally we wouldn't have to do this Phantom. But:
+                //
+                // For the constant case: we must do it because otherwise we would have no way of knowing
+                // that the scope is live at OSR here.
+                //
+                // For the non-constant case: GetClosureVar could be DCE'd, but baseline's implementation
+                // won't be able to handle an Undefined scope.
+                addToGraph(Phantom, scopeNode);
+                
+                // Constant folding in the bytecode parser is important for performance. This may not
+                // have executed yet. If it hasn't, then we won't have a prediction. Lacking a
+                // prediction, we'd otherwise think that it has to exit. Then when it did execute, we
+                // would recompile. But if we can fold it here, we avoid the exit.
+                if (JSValue value = m_graph.tryGetConstantClosureVar(scopeNode, ScopeOffset(operand))) {
+                    set(VirtualRegister(dst), weakJSConstant(value));
+                    break;
                 }
-                Node* baseNode = get(base);
-                addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(putToBase->m_structure.get())), baseNode);
-                Node* propertyStorage;
-                if (isInlineOffset(putToBase->m_offset))
-                    propertyStorage = baseNode;
-                else
-                    propertyStorage = addToGraph(GetButterfly, baseNode);
-                addToGraph(PutByOffset, OpInfo(m_graph.m_storageAccessData.size()), propertyStorage, baseNode, get(value));
-
-                StorageAccessData storageAccessData;
-                storageAccessData.offset = indexRelativeToBase(putToBase->m_offset);
-                storageAccessData.identifierNumber = identifier;
-                m_graph.m_storageAccessData.append(storageAccessData);
+                SpeculatedType prediction = getPrediction();
+                set(VirtualRegister(dst),
+                    addToGraph(GetClosureVar, OpInfo(operand), OpInfo(prediction), scopeNode));
                 break;
             }
-            case PutToBaseOperation::Readonly:
-            case PutToBaseOperation::Generic:
-                addToGraph(PutById, OpInfo(identifier), get(base), get(value));
+            case Dynamic:
+                RELEASE_ASSERT_NOT_REACHED();
+                break;
             }
-            NEXT_OPCODE(op_put_to_base);
+            NEXT_OPCODE(op_get_from_scope);
         }
 
-        case op_resolve_base_to_global:
-        case op_resolve_base_to_global_dynamic:
-        case op_resolve_base_to_scope:
-        case op_resolve_base_to_scope_with_top_scope_check:
-        case op_resolve_base: {
-            SpeculatedType prediction = getPrediction();
+        case op_put_to_scope: {
+            unsigned scope = currentInstruction[1].u.operand;
+            unsigned identifierNumber = currentInstruction[2].u.operand;
+            if (identifierNumber != UINT_MAX)
+                identifierNumber = m_inlineStackTop->m_identifierRemap[identifierNumber];
+            unsigned value = currentInstruction[3].u.operand;
+            ResolveType resolveType = ResolveModeAndType(currentInstruction[4].u.operand).type();
+            UniquedStringImpl* uid;
+            if (identifierNumber != UINT_MAX)
+                uid = m_graph.identifiers()[identifierNumber];
+            else
+                uid = nullptr;
             
-            unsigned identifier = m_inlineStackTop->m_identifierRemap[currentInstruction[2].u.operand];
-            ResolveOperations* operations = currentInstruction[4].u.resolveOperations;
-            PutToBaseOperation* putToBaseOperation = currentInstruction[5].u.putToBaseOperation;
-
-            Node* base = 0;
-            if (parseResolveOperations(prediction, identifier, operations, 0, &base, 0)) {
-                set(currentInstruction[1].u.operand, base);
-                NEXT_OPCODE(op_resolve_base);
+            Structure* structure = nullptr;
+            WatchpointSet* watchpoints = nullptr;
+            uintptr_t operand;
+            {
+                ConcurrentJITLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
+                if (resolveType == GlobalVar || resolveType == GlobalVarWithVarInjectionChecks || resolveType == LocalClosureVar)
+                    watchpoints = currentInstruction[5].u.watchpointSet;
+                else
+                    structure = currentInstruction[5].u.structure.get();
+                operand = reinterpret_cast<uintptr_t>(currentInstruction[6].u.pointer);
             }
 
-            Node* resolve = addToGraph(currentInstruction[3].u.operand ? ResolveBaseStrictPut : ResolveBase, OpInfo(m_graph.m_resolveOperationsData.size()), OpInfo(prediction));
-            m_graph.m_resolveOperationsData.append(ResolveOperationData());
-            ResolveOperationData& data = m_graph.m_resolveOperationsData.last();
-            data.identifierNumber = identifier;
-            data.resolveOperations = operations;
-            data.putToBaseOperation = putToBaseOperation;
-        
-            set(currentInstruction[1].u.operand, resolve);
+            JSGlobalObject* globalObject = m_inlineStackTop->m_codeBlock->globalObject();
 
-            NEXT_OPCODE(op_resolve_base);
-        }
-        case op_resolve_with_base: {
-            SpeculatedType prediction = getPrediction();
-            unsigned baseDst = currentInstruction[1].u.operand;
-            unsigned valueDst = currentInstruction[2].u.operand;
-            unsigned identifier = m_inlineStackTop->m_identifierRemap[currentInstruction[3].u.operand];
-            ResolveOperations* operations = currentInstruction[4].u.resolveOperations;
-            PutToBaseOperation* putToBaseOperation = currentInstruction[5].u.putToBaseOperation;
-
-            Node* base = 0;
-            Node* value = 0;
-            if (parseResolveOperations(prediction, identifier, operations, putToBaseOperation, &base, &value))
-                setPair(baseDst, base, valueDst, value);
-            else {
-                addToGraph(ForceOSRExit);
-                setPair(baseDst, addToGraph(GarbageValue), valueDst, addToGraph(GarbageValue));
+            switch (resolveType) {
+            case GlobalProperty:
+            case GlobalPropertyWithVarInjectionChecks: {
+                PutByIdStatus status;
+                if (uid)
+                    status = PutByIdStatus::computeFor(globalObject, structure, uid, false);
+                else
+                    status = PutByIdStatus(PutByIdStatus::TakesSlowPath);
+                if (status.numVariants() != 1
+                    || status[0].kind() != PutByIdVariant::Replace
+                    || status[0].structure().size() != 1) {
+                    addToGraph(PutById, OpInfo(identifierNumber), get(VirtualRegister(scope)), get(VirtualRegister(value)));
+                    break;
+                }
+                ASSERT(status[0].structure().onlyStructure() == structure);
+                Node* base = cellConstantWithStructureCheck(globalObject, structure);
+                addToGraph(Phantom, get(VirtualRegister(scope)));
+                handlePutByOffset(base, identifierNumber, static_cast<PropertyOffset>(operand), get(VirtualRegister(value)));
+                // Keep scope alive until after put.
+                addToGraph(Phantom, get(VirtualRegister(scope)));
+                break;
             }
-
-            NEXT_OPCODE(op_resolve_with_base);
-        }
-        case op_resolve_with_this: {
-            SpeculatedType prediction = getPrediction();
-            unsigned baseDst = currentInstruction[1].u.operand;
-            unsigned valueDst = currentInstruction[2].u.operand;
-            unsigned identifier = m_inlineStackTop->m_identifierRemap[currentInstruction[3].u.operand];
-            ResolveOperations* operations = currentInstruction[4].u.resolveOperations;
-
-            Node* base = 0;
-            Node* value = 0;
-            if (parseResolveOperations(prediction, identifier, operations, 0, &base, &value))
-                setPair(baseDst, base, valueDst, value);
-            else {
-                addToGraph(ForceOSRExit);
-                setPair(baseDst, addToGraph(GarbageValue), valueDst, addToGraph(GarbageValue));
+            case GlobalVar:
+            case GlobalVarWithVarInjectionChecks: {
+                if (watchpoints) {
+                    SymbolTableEntry entry = globalObject->symbolTable()->get(uid);
+                    ASSERT_UNUSED(entry, watchpoints == entry.watchpointSet());
+                }
+                Node* valueNode = get(VirtualRegister(value));
+                addToGraph(PutGlobalVar, OpInfo(operand), weakJSConstant(globalObject), valueNode);
+                if (watchpoints && watchpoints->state() != IsInvalidated) {
+                    // Must happen after the store. See comment for GetGlobalVar.
+                    addToGraph(NotifyWrite, OpInfo(watchpoints));
+                }
+                // Keep scope alive until after put.
+                addToGraph(Phantom, get(VirtualRegister(scope)));
+                break;
             }
+            case LocalClosureVar:
+            case ClosureVar:
+            case ClosureVarWithVarInjectionChecks: {
+                Node* scopeNode = get(VirtualRegister(scope));
+                Node* valueNode = get(VirtualRegister(value));
 
-            NEXT_OPCODE(op_resolve_with_this);
+                addToGraph(PutClosureVar, OpInfo(operand), scopeNode, valueNode);
+
+                if (watchpoints && watchpoints->state() != IsInvalidated) {
+                    // Must happen after the store. See comment for GetGlobalVar.
+                    addToGraph(NotifyWrite, OpInfo(watchpoints));
+                }
+                break;
+            }
+            case Dynamic:
+                RELEASE_ASSERT_NOT_REACHED();
+                break;
+            }
+            NEXT_OPCODE(op_put_to_scope);
         }
+
         case op_loop_hint: {
             // Baseline->DFG OSR jumps between loop hints. The DFG assumes that Baseline->DFG
             // OSR can only happen at basic block boundaries. Assert that these two statements
@@ -3301,93 +3719,192 @@ bool ByteCodeParser::parseBlock(unsigned limit)
             if (!m_inlineStackTop->m_caller)
                 m_currentBlock->isOSRTarget = true;
 
-            if (m_vm->watchdog.isEnabled())
+            addToGraph(LoopHint);
+            
+            if (m_vm->watchdog && m_vm->watchdog->isEnabled())
                 addToGraph(CheckWatchdogTimer);
-            else {
-                // Emit a phantom node to ensure that there is a placeholder
-                // node for this bytecode op.
-                addToGraph(Phantom);
-            }
             
             NEXT_OPCODE(op_loop_hint);
         }
             
-        case op_init_lazy_reg: {
-            set(currentInstruction[1].u.operand, getJSConstantForValue(JSValue()));
-            NEXT_OPCODE(op_init_lazy_reg);
+        case op_create_lexical_environment: {
+            FrozenValue* symbolTable = m_graph.freezeStrong(m_graph.symbolTableFor(currentNodeOrigin().semantic));
+            Node* lexicalEnvironment = addToGraph(CreateActivation, OpInfo(symbolTable), get(VirtualRegister(currentInstruction[2].u.operand)));
+            set(VirtualRegister(currentInstruction[1].u.operand), lexicalEnvironment);
+            set(VirtualRegister(currentInstruction[2].u.operand), lexicalEnvironment);
+            NEXT_OPCODE(op_create_lexical_environment);
         }
             
-        case op_create_activation: {
-            set(currentInstruction[1].u.operand, addToGraph(CreateActivation, get(currentInstruction[1].u.operand)));
-            NEXT_OPCODE(op_create_activation);
+        case op_get_scope: {
+            // Help the later stages a bit by doing some small constant folding here. Note that this
+            // only helps for the first basic block. It's extremely important not to constant fold
+            // loads from the scope register later, as that would prevent the DFG from tracking the
+            // bytecode-level liveness of the scope register.
+            Node* callee = get(VirtualRegister(JSStack::Callee));
+            Node* result;
+            if (JSFunction* function = callee->dynamicCastConstant<JSFunction*>())
+                result = weakJSConstant(function->scope());
+            else
+                result = addToGraph(GetScope, callee);
+            set(VirtualRegister(currentInstruction[1].u.operand), result);
+            NEXT_OPCODE(op_get_scope);
         }
             
-        case op_create_arguments: {
-            m_graph.m_hasArguments = true;
-            Node* createArguments = addToGraph(CreateArguments, get(currentInstruction[1].u.operand));
-            set(currentInstruction[1].u.operand, createArguments);
-            set(unmodifiedArgumentsRegister(currentInstruction[1].u.operand), createArguments);
-            NEXT_OPCODE(op_create_arguments);
+        case op_create_direct_arguments: {
+            noticeArgumentsUse();
+            Node* createArguments = addToGraph(CreateDirectArguments);
+            set(VirtualRegister(currentInstruction[1].u.operand), createArguments);
+            NEXT_OPCODE(op_create_direct_arguments);
         }
             
-        case op_tear_off_activation: {
-            addToGraph(TearOffActivation, get(currentInstruction[1].u.operand));
-            NEXT_OPCODE(op_tear_off_activation);
+        case op_create_scoped_arguments: {
+            noticeArgumentsUse();
+            Node* createArguments = addToGraph(CreateScopedArguments, get(VirtualRegister(currentInstruction[2].u.operand)));
+            set(VirtualRegister(currentInstruction[1].u.operand), createArguments);
+            NEXT_OPCODE(op_create_scoped_arguments);
         }
 
-        case op_tear_off_arguments: {
-            m_graph.m_hasArguments = true;
-            addToGraph(TearOffArguments, get(unmodifiedArgumentsRegister(currentInstruction[1].u.operand)), get(currentInstruction[2].u.operand));
-            NEXT_OPCODE(op_tear_off_arguments);
+        case op_create_out_of_band_arguments: {
+            noticeArgumentsUse();
+            Node* createArguments = addToGraph(CreateClonedArguments);
+            set(VirtualRegister(currentInstruction[1].u.operand), createArguments);
+            NEXT_OPCODE(op_create_out_of_band_arguments);
         }
             
-        case op_get_arguments_length: {
-            m_graph.m_hasArguments = true;
-            set(currentInstruction[1].u.operand, addToGraph(GetMyArgumentsLengthSafe));
-            NEXT_OPCODE(op_get_arguments_length);
+        case op_get_from_arguments: {
+            set(VirtualRegister(currentInstruction[1].u.operand),
+                addToGraph(
+                    GetFromArguments,
+                    OpInfo(currentInstruction[3].u.operand),
+                    OpInfo(getPrediction()),
+                    get(VirtualRegister(currentInstruction[2].u.operand))));
+            NEXT_OPCODE(op_get_from_arguments);
         }
             
-        case op_get_argument_by_val: {
-            m_graph.m_hasArguments = true;
-            set(currentInstruction[1].u.operand,
-                addToGraph(
-                    GetMyArgumentByValSafe, OpInfo(0), OpInfo(getPrediction()),
-                    get(currentInstruction[3].u.operand)));
-            NEXT_OPCODE(op_get_argument_by_val);
+        case op_put_to_arguments: {
+            addToGraph(
+                PutToArguments,
+                OpInfo(currentInstruction[2].u.operand),
+                get(VirtualRegister(currentInstruction[1].u.operand)),
+                get(VirtualRegister(currentInstruction[3].u.operand)));
+            NEXT_OPCODE(op_put_to_arguments);
         }
             
         case op_new_func: {
-            if (!currentInstruction[3].u.operand) {
-                set(currentInstruction[1].u.operand,
-                    addToGraph(NewFunctionNoCheck, OpInfo(currentInstruction[2].u.operand)));
-            } else {
-                set(currentInstruction[1].u.operand,
-                    addToGraph(
-                        NewFunction,
-                        OpInfo(currentInstruction[2].u.operand),
-                        get(currentInstruction[1].u.operand)));
-            }
+            FunctionExecutable* decl = m_inlineStackTop->m_profiledBlock->functionDecl(currentInstruction[3].u.operand);
+            FrozenValue* frozen = m_graph.freezeStrong(decl);
+            set(VirtualRegister(currentInstruction[1].u.operand),
+                addToGraph(NewFunction, OpInfo(frozen), get(VirtualRegister(currentInstruction[2].u.operand))));
             NEXT_OPCODE(op_new_func);
         }
-            
+
         case op_new_func_exp: {
-            set(currentInstruction[1].u.operand,
-                addToGraph(NewFunctionExpression, OpInfo(currentInstruction[2].u.operand)));
+            FunctionExecutable* expr = m_inlineStackTop->m_profiledBlock->functionExpr(currentInstruction[3].u.operand);
+            FrozenValue* frozen = m_graph.freezeStrong(expr);
+            set(VirtualRegister(currentInstruction[1].u.operand),
+                addToGraph(NewFunction, OpInfo(frozen), get(VirtualRegister(currentInstruction[2].u.operand))));
             NEXT_OPCODE(op_new_func_exp);
         }
 
         case op_typeof: {
-            set(currentInstruction[1].u.operand,
-                addToGraph(TypeOf, get(currentInstruction[2].u.operand)));
+            set(VirtualRegister(currentInstruction[1].u.operand),
+                addToGraph(TypeOf, get(VirtualRegister(currentInstruction[2].u.operand))));
             NEXT_OPCODE(op_typeof);
         }
 
         case op_to_number: {
-            set(currentInstruction[1].u.operand,
-                addToGraph(Identity, Edge(get(currentInstruction[2].u.operand), NumberUse)));
+            Node* node = get(VirtualRegister(currentInstruction[2].u.operand));
+            addToGraph(Phantom, Edge(node, NumberUse));
+            set(VirtualRegister(currentInstruction[1].u.operand), node);
             NEXT_OPCODE(op_to_number);
         }
 
+        case op_to_string: {
+            Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
+            set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(ToString, value));
+            NEXT_OPCODE(op_to_string);
+        }
+
+        case op_in: {
+            set(VirtualRegister(currentInstruction[1].u.operand),
+                addToGraph(In, get(VirtualRegister(currentInstruction[2].u.operand)), get(VirtualRegister(currentInstruction[3].u.operand))));
+            NEXT_OPCODE(op_in);
+        }
+
+        case op_get_enumerable_length: {
+            set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(GetEnumerableLength, 
+                get(VirtualRegister(currentInstruction[2].u.operand))));
+            NEXT_OPCODE(op_get_enumerable_length);
+        }
+
+        case op_has_generic_property: {
+            set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(HasGenericProperty, 
+                get(VirtualRegister(currentInstruction[2].u.operand)),
+                get(VirtualRegister(currentInstruction[3].u.operand))));
+            NEXT_OPCODE(op_has_generic_property);
+        }
+
+        case op_has_structure_property: {
+            set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(HasStructureProperty, 
+                get(VirtualRegister(currentInstruction[2].u.operand)),
+                get(VirtualRegister(currentInstruction[3].u.operand)),
+                get(VirtualRegister(currentInstruction[4].u.operand))));
+            NEXT_OPCODE(op_has_structure_property);
+        }
+
+        case op_has_indexed_property: {
+            Node* base = get(VirtualRegister(currentInstruction[2].u.operand));
+            ArrayMode arrayMode = getArrayMode(currentInstruction[4].u.arrayProfile, Array::Read);
+            Node* property = get(VirtualRegister(currentInstruction[3].u.operand));
+            Node* hasIterableProperty = addToGraph(HasIndexedProperty, OpInfo(arrayMode.asWord()), base, property);
+            set(VirtualRegister(currentInstruction[1].u.operand), hasIterableProperty);
+            NEXT_OPCODE(op_has_indexed_property);
+        }
+
+        case op_get_direct_pname: {
+            SpeculatedType prediction = getPredictionWithoutOSRExit();
+            
+            Node* base = get(VirtualRegister(currentInstruction[2].u.operand));
+            Node* property = get(VirtualRegister(currentInstruction[3].u.operand));
+            Node* index = get(VirtualRegister(currentInstruction[4].u.operand));
+            Node* enumerator = get(VirtualRegister(currentInstruction[5].u.operand));
+
+            addVarArgChild(base);
+            addVarArgChild(property);
+            addVarArgChild(index);
+            addVarArgChild(enumerator);
+            set(VirtualRegister(currentInstruction[1].u.operand), 
+                addToGraph(Node::VarArg, GetDirectPname, OpInfo(0), OpInfo(prediction)));
+
+            NEXT_OPCODE(op_get_direct_pname);
+        }
+
+        case op_get_property_enumerator: {
+            set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(GetPropertyEnumerator, 
+                get(VirtualRegister(currentInstruction[2].u.operand))));
+            NEXT_OPCODE(op_get_property_enumerator);
+        }
+
+        case op_enumerator_structure_pname: {
+            set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(GetEnumeratorStructurePname,
+                get(VirtualRegister(currentInstruction[2].u.operand)),
+                get(VirtualRegister(currentInstruction[3].u.operand))));
+            NEXT_OPCODE(op_enumerator_structure_pname);
+        }
+
+        case op_enumerator_generic_pname: {
+            set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(GetEnumeratorGenericPname,
+                get(VirtualRegister(currentInstruction[2].u.operand)),
+                get(VirtualRegister(currentInstruction[3].u.operand))));
+            NEXT_OPCODE(op_enumerator_generic_pname);
+        }
+            
+        case op_to_index_string: {
+            set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(ToIndexString, 
+                get(VirtualRegister(currentInstruction[2].u.operand))));
+            NEXT_OPCODE(op_to_index_string);
+        }
+
         default:
             // Parse failed! This should not happen because the capabilities checker
             // should have caught it.
@@ -3397,46 +3914,51 @@ bool ByteCodeParser::parseBlock(unsigned limit)
     }
 }
 
-void ByteCodeParser::linkBlock(BasicBlock* block, Vector<BlockIndex>& possibleTargets)
+void ByteCodeParser::linkBlock(BasicBlock* block, Vector<BasicBlock*>& possibleTargets)
 {
     ASSERT(!block->isLinked);
     ASSERT(!block->isEmpty());
-    Node* node = block->last();
+    Node* node = block->terminal();
     ASSERT(node->isTerminal());
     
     switch (node->op()) {
     case Jump:
-        node->setTakenBlockIndex(m_graph.blockIndexForBytecodeOffset(possibleTargets, node->takenBytecodeOffsetDuringParsing()));
-#if DFG_ENABLE(DEBUG_VERBOSE)
-        dataLogF("Linked basic block %p to %p, #%u.\n", block, m_graph.m_blocks[node->takenBlockIndex()].get(), node->takenBlockIndex());
-#endif
+        node->targetBlock() = blockForBytecodeOffset(possibleTargets, node->targetBytecodeOffsetDuringParsing());
         break;
         
-    case Branch:
-        node->setTakenBlockIndex(m_graph.blockIndexForBytecodeOffset(possibleTargets, node->takenBytecodeOffsetDuringParsing()));
-        node->setNotTakenBlockIndex(m_graph.blockIndexForBytecodeOffset(possibleTargets, node->notTakenBytecodeOffsetDuringParsing()));
-#if DFG_ENABLE(DEBUG_VERBOSE)
-        dataLogF("Linked basic block %p to %p, #%u and %p, #%u.\n", block, m_graph.m_blocks[node->takenBlockIndex()].get(), node->takenBlockIndex(), m_graph.m_blocks[node->notTakenBlockIndex()].get(), node->notTakenBlockIndex());
-#endif
+    case Branch: {
+        BranchData* data = node->branchData();
+        data->taken.block = blockForBytecodeOffset(possibleTargets, data->takenBytecodeIndex());
+        data->notTaken.block = blockForBytecodeOffset(possibleTargets, data->notTakenBytecodeIndex());
+        break;
+    }
+        
+    case Switch: {
+        SwitchData* data = node->switchData();
+        for (unsigned i = node->switchData()->cases.size(); i--;)
+            data->cases[i].target.block = blockForBytecodeOffset(possibleTargets, data->cases[i].target.bytecodeIndex());
+        data->fallThrough.block = blockForBytecodeOffset(possibleTargets, data->fallThrough.bytecodeIndex());
         break;
+    }
         
     default:
-#if DFG_ENABLE(DEBUG_VERBOSE)
-        dataLogF("Marking basic block %p as linked.\n", block);
-#endif
         break;
     }
     
-#if !ASSERT_DISABLED
-    block->isLinked = true;
-#endif
+    if (verbose)
+        dataLog("Marking ", RawPointer(block), " as linked (actually did linking)\n");
+    block->didLink();
 }
 
-void ByteCodeParser::linkBlocks(Vector<UnlinkedBlock>& unlinkedBlocks, Vector<BlockIndex>& possibleTargets)
+void ByteCodeParser::linkBlocks(Vector<UnlinkedBlock>& unlinkedBlocks, Vector<BasicBlock*>& possibleTargets)
 {
     for (size_t i = 0; i < unlinkedBlocks.size(); ++i) {
+        if (verbose)
+            dataLog("Attempting to link ", RawPointer(unlinkedBlocks[i].m_block), "\n");
         if (unlinkedBlocks[i].m_needsNormalLinking) {
-            linkBlock(m_graph.m_blocks[unlinkedBlocks[i].m_blockIndex].get(), possibleTargets);
+            if (verbose)
+                dataLog("    Does need normal linking.\n");
+            linkBlock(unlinkedBlocks[i].m_block, possibleTargets);
             unlinkedBlocks[i].m_needsNormalLinking = false;
         }
     }
@@ -3449,13 +3971,6 @@ void ByteCodeParser::buildOperandMapsIfNecessary()
     
     for (size_t i = 0; i < m_codeBlock->numberOfIdentifiers(); ++i)
         m_identifierMap.add(m_codeBlock->identifier(i).impl(), i);
-    for (size_t i = 0; i < m_codeBlock->numberOfConstantRegisters(); ++i) {
-        JSValue value = m_codeBlock->getConstant(i + FirstConstantRegisterIndex);
-        if (!value)
-            m_emptyJSValueIndex = i + FirstConstantRegisterIndex;
-        else
-            m_jsValueMap.add(JSValue::encode(value), i + FirstConstantRegisterIndex);
-    }
     
     m_haveBuiltOperandMaps = true;
 }
@@ -3464,23 +3979,35 @@ ByteCodeParser::InlineStackEntry::InlineStackEntry(
     ByteCodeParser* byteCodeParser,
     CodeBlock* codeBlock,
     CodeBlock* profiledBlock,
-    BlockIndex callsiteBlockHead,
+    BasicBlock* callsiteBlockHead,
     JSFunction* callee, // Null if this is a closure call.
     VirtualRegister returnValueVR,
     VirtualRegister inlineCallFrameStart,
     int argumentCountIncludingThis,
-    CodeSpecializationKind kind)
+    InlineCallFrame::Kind kind)
     : m_byteCodeParser(byteCodeParser)
     , m_codeBlock(codeBlock)
     , m_profiledBlock(profiledBlock)
-    , m_exitProfile(profiledBlock->exitProfile())
     , m_callsiteBlockHead(callsiteBlockHead)
     , m_returnValue(returnValueVR)
-    , m_lazyOperands(profiledBlock->lazyOperandValueProfiles())
     , m_didReturn(false)
     , m_didEarlyReturn(false)
     , m_caller(byteCodeParser->m_inlineStackTop)
 {
+    {
+        ConcurrentJITLocker locker(m_profiledBlock->m_lock);
+        m_lazyOperands.initialize(locker, m_profiledBlock->lazyOperandValueProfiles());
+        m_exitProfile.initialize(locker, profiledBlock->exitProfile());
+        
+        // We do this while holding the lock because we want to encourage StructureStubInfo's
+        // to be potentially added to operations and because the profiled block could be in the
+        // middle of LLInt->JIT tier-up in which case we would be adding the info's right now.
+        if (m_profiledBlock->hasBaselineJITProfiling()) {
+            m_profiledBlock->getStubInfoMap(locker, m_stubInfos);
+            m_profiledBlock->getCallLinkInfoMap(locker, m_callLinkInfos);
+        }
+    }
+    
     m_argumentPositions.resize(argumentCountIncludingThis);
     for (int i = 0; i < argumentCountIncludingThis; ++i) {
         byteCodeParser->m_graph.m_argumentPositions.append(ArgumentPosition());
@@ -3488,85 +4015,44 @@ ByteCodeParser::InlineStackEntry::InlineStackEntry(
         m_argumentPositions[i] = argumentPosition;
     }
     
-    // Track the code-block-global exit sites.
-    if (m_exitProfile.hasExitSite(ArgumentsEscaped)) {
-        byteCodeParser->m_graph.m_executablesWhoseArgumentsEscaped.add(
-            codeBlock->ownerExecutable());
-    }
-        
     if (m_caller) {
         // Inline case.
         ASSERT(codeBlock != byteCodeParser->m_codeBlock);
-        ASSERT(inlineCallFrameStart != InvalidVirtualRegister);
-        ASSERT(callsiteBlockHead != NoBlock);
-        
-        InlineCallFrame inlineCallFrame;
-        inlineCallFrame.executable.set(*byteCodeParser->m_vm, byteCodeParser->m_codeBlock->ownerExecutable(), codeBlock->ownerExecutable());
-        inlineCallFrame.stackOffset = inlineCallFrameStart + JSStack::CallFrameHeaderSize;
-        if (callee)
-            inlineCallFrame.callee.set(*byteCodeParser->m_vm, byteCodeParser->m_codeBlock->ownerExecutable(), callee);
-        inlineCallFrame.caller = byteCodeParser->currentCodeOrigin();
-        inlineCallFrame.arguments.resize(argumentCountIncludingThis); // Set the number of arguments including this, but don't configure the value recoveries, yet.
-        inlineCallFrame.isCall = isCall(kind);
-        
-        if (inlineCallFrame.caller.inlineCallFrame)
-            inlineCallFrame.capturedVars = inlineCallFrame.caller.inlineCallFrame->capturedVars;
-        else {
-            for (int i = byteCodeParser->m_codeBlock->m_numVars; i--;) {
-                if (byteCodeParser->m_codeBlock->isCaptured(i))
-                    inlineCallFrame.capturedVars.set(i);
-            }
-        }
-
-        for (int i = argumentCountIncludingThis; i--;) {
-            if (codeBlock->isCaptured(argumentToOperand(i)))
-                inlineCallFrame.capturedVars.set(argumentToOperand(i) + inlineCallFrame.stackOffset);
-        }
-        for (size_t i = codeBlock->m_numVars; i--;) {
-            if (codeBlock->isCaptured(i))
-                inlineCallFrame.capturedVars.set(i + inlineCallFrame.stackOffset);
-        }
-
-#if DFG_ENABLE(DEBUG_VERBOSE)
-        dataLogF("Current captured variables: ");
-        inlineCallFrame.capturedVars.dump(WTF::dataFile());
-        dataLogF("\n");
-#endif
+        ASSERT(inlineCallFrameStart.isValid());
+        ASSERT(callsiteBlockHead);
         
-        byteCodeParser->m_codeBlock->inlineCallFrames().append(inlineCallFrame);
-        m_inlineCallFrame = &byteCodeParser->m_codeBlock->inlineCallFrames().last();
+        m_inlineCallFrame = byteCodeParser->m_graph.m_plan.inlineCallFrames->add();
+        byteCodeParser->m_graph.freeze(codeBlock->ownerExecutable());
+        initializeLazyWriteBarrierForInlineCallFrameExecutable(
+            byteCodeParser->m_graph.m_plan.writeBarriers,
+            m_inlineCallFrame->executable,
+            byteCodeParser->m_codeBlock,
+            m_inlineCallFrame,
+            byteCodeParser->m_codeBlock->ownerExecutable(),
+            codeBlock->ownerExecutable());
+        m_inlineCallFrame->setStackOffset(inlineCallFrameStart.offset() - JSStack::CallFrameHeaderSize);
+        if (callee) {
+            m_inlineCallFrame->calleeRecovery = ValueRecovery::constant(callee);
+            m_inlineCallFrame->isClosureCall = false;
+        } else
+            m_inlineCallFrame->isClosureCall = true;
+        m_inlineCallFrame->caller = byteCodeParser->currentCodeOrigin();
+        m_inlineCallFrame->arguments.resizeToFit(argumentCountIncludingThis); // Set the number of arguments including this, but don't configure the value recoveries, yet.
+        m_inlineCallFrame->kind = kind;
         
         byteCodeParser->buildOperandMapsIfNecessary();
         
         m_identifierRemap.resize(codeBlock->numberOfIdentifiers());
-        m_constantRemap.resize(codeBlock->numberOfConstantRegisters());
         m_constantBufferRemap.resize(codeBlock->numberOfConstantBuffers());
+        m_switchRemap.resize(codeBlock->numberOfSwitchJumpTables());
 
         for (size_t i = 0; i < codeBlock->numberOfIdentifiers(); ++i) {
-            StringImpl* rep = codeBlock->identifier(i).impl();
-            IdentifierMap::AddResult result = byteCodeParser->m_identifierMap.add(rep, byteCodeParser->m_codeBlock->numberOfIdentifiers());
+            UniquedStringImpl* rep = codeBlock->identifier(i).impl();
+            BorrowedIdentifierMap::AddResult result = byteCodeParser->m_identifierMap.add(rep, byteCodeParser->m_graph.identifiers().numberOfIdentifiers());
             if (result.isNewEntry)
-                byteCodeParser->m_codeBlock->addIdentifier(Identifier(byteCodeParser->m_vm, rep));
+                byteCodeParser->m_graph.identifiers().addLazily(rep);
             m_identifierRemap[i] = result.iterator->value;
         }
-        for (size_t i = 0; i < codeBlock->numberOfConstantRegisters(); ++i) {
-            JSValue value = codeBlock->getConstant(i + FirstConstantRegisterIndex);
-            if (!value) {
-                if (byteCodeParser->m_emptyJSValueIndex == UINT_MAX) {
-                    byteCodeParser->m_emptyJSValueIndex = byteCodeParser->m_codeBlock->numberOfConstantRegisters() + FirstConstantRegisterIndex;
-                    byteCodeParser->m_codeBlock->addConstant(JSValue());
-                    byteCodeParser->m_constants.append(ConstantRecord());
-                }
-                m_constantRemap[i] = byteCodeParser->m_emptyJSValueIndex;
-                continue;
-            }
-            JSValueMap::AddResult result = byteCodeParser->m_jsValueMap.add(JSValue::encode(value), byteCodeParser->m_codeBlock->numberOfConstantRegisters() + FirstConstantRegisterIndex);
-            if (result.isNewEntry) {
-                byteCodeParser->m_codeBlock->addConstant(value);
-                byteCodeParser->m_constants.append(ConstantRecord());
-            }
-            m_constantRemap[i] = result.iterator->value;
-        }
         for (unsigned i = 0; i < codeBlock->numberOfConstantBuffers(); ++i) {
             // If we inline the same code block multiple times, we don't want to needlessly
             // duplicate its constant buffers.
@@ -3581,49 +4067,57 @@ ByteCodeParser::InlineStackEntry::InlineStackEntry(
             m_constantBufferRemap[i] = newIndex;
             byteCodeParser->m_constantBufferCache.add(ConstantBufferKey(codeBlock, i), newIndex);
         }
+        for (unsigned i = 0; i < codeBlock->numberOfSwitchJumpTables(); ++i) {
+            m_switchRemap[i] = byteCodeParser->m_codeBlock->numberOfSwitchJumpTables();
+            byteCodeParser->m_codeBlock->addSwitchJumpTable() = codeBlock->switchJumpTable(i);
+        }
         m_callsiteBlockHeadNeedsLinking = true;
     } else {
         // Machine code block case.
         ASSERT(codeBlock == byteCodeParser->m_codeBlock);
         ASSERT(!callee);
-        ASSERT(returnValueVR == InvalidVirtualRegister);
-        ASSERT(inlineCallFrameStart == InvalidVirtualRegister);
-        ASSERT(callsiteBlockHead == NoBlock);
+        ASSERT(!returnValueVR.isValid());
+        ASSERT(!inlineCallFrameStart.isValid());
+        ASSERT(!callsiteBlockHead);
 
         m_inlineCallFrame = 0;
 
         m_identifierRemap.resize(codeBlock->numberOfIdentifiers());
-        m_constantRemap.resize(codeBlock->numberOfConstantRegisters());
         m_constantBufferRemap.resize(codeBlock->numberOfConstantBuffers());
+        m_switchRemap.resize(codeBlock->numberOfSwitchJumpTables());
         for (size_t i = 0; i < codeBlock->numberOfIdentifiers(); ++i)
             m_identifierRemap[i] = i;
-        for (size_t i = 0; i < codeBlock->numberOfConstantRegisters(); ++i)
-            m_constantRemap[i] = i + FirstConstantRegisterIndex;
         for (size_t i = 0; i < codeBlock->numberOfConstantBuffers(); ++i)
             m_constantBufferRemap[i] = i;
+        for (size_t i = 0; i < codeBlock->numberOfSwitchJumpTables(); ++i)
+            m_switchRemap[i] = i;
         m_callsiteBlockHeadNeedsLinking = false;
     }
     
-    for (size_t i = 0; i < m_constantRemap.size(); ++i)
-        ASSERT(m_constantRemap[i] >= static_cast<unsigned>(FirstConstantRegisterIndex));
-    
     byteCodeParser->m_inlineStackTop = this;
 }
 
 void ByteCodeParser::parseCodeBlock()
 {
+    clearCaches();
+    
     CodeBlock* codeBlock = m_inlineStackTop->m_codeBlock;
     
-    if (m_graph.m_compilation) {
-        m_graph.m_compilation->addProfiledBytecodes(
+    if (m_graph.compilation()) {
+        m_graph.compilation()->addProfiledBytecodes(
             *m_vm->m_perBytecodeProfiler, m_inlineStackTop->m_profiledBlock);
     }
     
-    bool shouldDumpBytecode = Options::dumpBytecodeAtDFGTime();
-#if DFG_ENABLE(DEBUG_VERBOSE)
-    shouldDumpBytecode |= true;
-#endif
-    if (shouldDumpBytecode) {
+    if (UNLIKELY(Options::dumpSourceAtDFGTime())) {
+        Vector<DeferredSourceDump>& deferredSourceDump = m_graph.m_plan.callback->ensureDeferredSourceDump();
+        if (inlineCallFrame()) {
+            DeferredSourceDump dump(codeBlock->baselineVersion(), m_codeBlock, JITCode::DFGJIT, inlineCallFrame()->caller);
+            deferredSourceDump.append(dump);
+        } else
+            deferredSourceDump.append(DeferredSourceDump(codeBlock->baselineVersion()));
+    }
+
+    if (Options::dumpBytecodeAtDFGTime()) {
         dataLog("Parsing ", *codeBlock);
         if (inlineCallFrame()) {
             dataLog(
@@ -3631,9 +4125,7 @@ void ByteCodeParser::parseCodeBlock()
                 " ", inlineCallFrame()->caller);
         }
         dataLog(
-            ": captureCount = ", codeBlock->symbolTable() ? codeBlock->symbolTable()->captureCount() : 0,
-            ", needsFullScopeChain = ", codeBlock->needsFullScopeChain(),
-            ", needsActivation = ", codeBlock->ownerExecutable()->needsActivation(),
+            ": needsActivation = ", codeBlock->needsActivation(),
             ", isStrictMode = ", codeBlock->ownerExecutable()->isStrictMode(), "\n");
         codeBlock->baselineVersion()->dumpBytecode();
     }
@@ -3651,40 +4143,28 @@ void ByteCodeParser::parseCodeBlock()
     for (unsigned jumpTargetIndex = 0; jumpTargetIndex <= jumpTargets.size(); ++jumpTargetIndex) {
         // The maximum bytecode offset to go into the current basicblock is either the next jump target, or the end of the instructions.
         unsigned limit = jumpTargetIndex < jumpTargets.size() ? jumpTargets[jumpTargetIndex] : codeBlock->instructions().size();
-#if DFG_ENABLE(DEBUG_VERBOSE)
-        dataLog(
-            "Parsing bytecode with limit ", pointerDump(inlineCallFrame()),
-            " bc#", limit, " at inline depth ",
-            CodeOrigin::inlineDepthForCallFrame(inlineCallFrame()), ".\n");
-#endif
         ASSERT(m_currentIndex < limit);
 
         // Loop until we reach the current limit (i.e. next jump target).
         do {
             if (!m_currentBlock) {
                 // Check if we can use the last block.
-                if (!m_graph.m_blocks.isEmpty() && m_graph.m_blocks.last()->isEmpty()) {
+                if (m_graph.numBlocks() && m_graph.lastBlock()->isEmpty()) {
                     // This must be a block belonging to us.
-                    ASSERT(m_inlineStackTop->m_unlinkedBlocks.last().m_blockIndex == m_graph.m_blocks.size() - 1);
+                    ASSERT(m_inlineStackTop->m_unlinkedBlocks.last().m_block == m_graph.lastBlock());
                     // Either the block is linkable or it isn't. If it's linkable then it's the last
                     // block in the blockLinkingTargets list. If it's not then the last block will
                     // have a lower bytecode index that the one we're about to give to this block.
-                    if (m_inlineStackTop->m_blockLinkingTargets.isEmpty() || m_graph.m_blocks[m_inlineStackTop->m_blockLinkingTargets.last()]->bytecodeBegin != m_currentIndex) {
+                    if (m_inlineStackTop->m_blockLinkingTargets.isEmpty() || m_inlineStackTop->m_blockLinkingTargets.last()->bytecodeBegin != m_currentIndex) {
                         // Make the block linkable.
-                        ASSERT(m_inlineStackTop->m_blockLinkingTargets.isEmpty() || m_graph.m_blocks[m_inlineStackTop->m_blockLinkingTargets.last()]->bytecodeBegin < m_currentIndex);
-                        m_inlineStackTop->m_blockLinkingTargets.append(m_graph.m_blocks.size() - 1);
+                        ASSERT(m_inlineStackTop->m_blockLinkingTargets.isEmpty() || m_inlineStackTop->m_blockLinkingTargets.last()->bytecodeBegin < m_currentIndex);
+                        m_inlineStackTop->m_blockLinkingTargets.append(m_graph.lastBlock());
                     }
                     // Change its bytecode begin and continue.
-                    m_currentBlock = m_graph.m_blocks.last().get();
-#if DFG_ENABLE(DEBUG_VERBOSE)
-                    dataLogF("Reascribing bytecode index of block %p from bc#%u to bc#%u (peephole case).\n", m_currentBlock, m_currentBlock->bytecodeBegin, m_currentIndex);
-#endif
+                    m_currentBlock = m_graph.lastBlock();
                     m_currentBlock->bytecodeBegin = m_currentIndex;
                 } else {
-                    OwnPtr<BasicBlock> block = adoptPtr(new BasicBlock(m_currentIndex, m_numArguments, m_numLocals));
-#if DFG_ENABLE(DEBUG_VERBOSE)
-                    dataLogF("Creating basic block %p, #%zu for %p bc#%u at inline depth %u.\n", block.get(), m_graph.m_blocks.size(), m_inlineStackTop->executable(), m_currentIndex, CodeOrigin::inlineDepthForCallFrame(inlineCallFrame()));
-#endif
+                    RefPtr<BasicBlock> block = adoptRef(new BasicBlock(m_currentIndex, m_numArguments, m_numLocals, PNaN));
                     m_currentBlock = block.get();
                     // This assertion checks two things:
                     // 1) If the bytecodeBegin is greater than currentIndex, then something has gone
@@ -3692,13 +4172,18 @@ void ByteCodeParser::parseCodeBlock()
                     // 2) If the bytecodeBegin is equal to the currentIndex, then we failed to do
                     //    a peephole coalescing of this block in the if statement above. So, we're
                     //    generating suboptimal code and leaving more work for the CFG simplifier.
-                    ASSERT(m_inlineStackTop->m_unlinkedBlocks.isEmpty() || m_graph.m_blocks[m_inlineStackTop->m_unlinkedBlocks.last().m_blockIndex]->bytecodeBegin < m_currentIndex);
-                    m_inlineStackTop->m_unlinkedBlocks.append(UnlinkedBlock(m_graph.m_blocks.size()));
-                    m_inlineStackTop->m_blockLinkingTargets.append(m_graph.m_blocks.size());
+                    if (!m_inlineStackTop->m_unlinkedBlocks.isEmpty()) {
+                        unsigned lastBegin =
+                            m_inlineStackTop->m_unlinkedBlocks.last().m_block->bytecodeBegin;
+                        ASSERT_UNUSED(
+                            lastBegin, lastBegin == UINT_MAX || lastBegin < m_currentIndex);
+                    }
+                    m_inlineStackTop->m_unlinkedBlocks.append(UnlinkedBlock(block.get()));
+                    m_inlineStackTop->m_blockLinkingTargets.append(block.get());
                     // The first block is definitely an OSR target.
-                    if (!m_graph.m_blocks.size())
+                    if (!m_graph.numBlocks())
                         block->isOSRTarget = true;
-                    m_graph.m_blocks.append(block.release());
+                    m_graph.appendBlock(block);
                     prepareToParseBlock();
                 }
             }
@@ -3713,10 +4198,13 @@ void ByteCodeParser::parseCodeBlock()
             // are at the end of an inline function, or we realized that we
             // should stop parsing because there was a return in the first
             // basic block.
-            ASSERT(m_currentBlock->isEmpty() || m_currentBlock->last()->isTerminal() || (m_currentIndex == codeBlock->instructions().size() && inlineCallFrame()) || !shouldContinueParsing);
+            ASSERT(m_currentBlock->isEmpty() || m_currentBlock->terminal() || (m_currentIndex == codeBlock->instructions().size() && inlineCallFrame()) || !shouldContinueParsing);
 
-            if (!shouldContinueParsing)
+            if (!shouldContinueParsing) {
+                if (Options::verboseDFGByteCodeParsing())
+                    dataLog("Done parsing ", *codeBlock, "\n");
                 return;
+            }
             
             m_currentBlock = 0;
         } while (m_currentIndex < limit);
@@ -3724,6 +4212,9 @@ void ByteCodeParser::parseCodeBlock()
 
     // Should have reached the end of the instructions.
     ASSERT(m_currentIndex == codeBlock->instructions().size());
+    
+    if (Options::verboseDFGByteCodeParsing())
+        dataLog("Done parsing ", *codeBlock, " (fell off end)\n");
 }
 
 bool ByteCodeParser::parse()
@@ -3731,58 +4222,48 @@ bool ByteCodeParser::parse()
     // Set during construction.
     ASSERT(!m_currentIndex);
     
-#if DFG_ENABLE(ALL_VARIABLES_CAPTURED)
-    // We should be pretending that the code has an activation.
-    ASSERT(m_graph.needsActivation());
-#endif
+    if (Options::verboseDFGByteCodeParsing())
+        dataLog("Parsing ", *m_codeBlock, "\n");
+    
+    m_dfgCodeBlock = m_graph.m_plan.profiledDFGCodeBlock.get();
+    if (isFTL(m_graph.m_plan.mode) && m_dfgCodeBlock
+        && Options::enablePolyvariantDevirtualization()) {
+        if (Options::enablePolyvariantCallInlining())
+            CallLinkStatus::computeDFGStatuses(m_dfgCodeBlock, m_callContextMap);
+        if (Options::enablePolyvariantByIdInlining())
+            m_dfgCodeBlock->getStubInfoMap(m_dfgStubInfos);
+    }
     
     InlineStackEntry inlineStackEntry(
-        this, m_codeBlock, m_profiledBlock, NoBlock, 0, InvalidVirtualRegister, InvalidVirtualRegister,
-        m_codeBlock->numParameters(), CodeForCall);
+        this, m_codeBlock, m_profiledBlock, 0, 0, VirtualRegister(), VirtualRegister(),
+        m_codeBlock->numParameters(), InlineCallFrame::Call);
     
     parseCodeBlock();
 
     linkBlocks(inlineStackEntry.m_unlinkedBlocks, inlineStackEntry.m_blockLinkingTargets);
     m_graph.determineReachability();
+    m_graph.killUnreachableBlocks();
     
-    ASSERT(m_preservedVars.size());
-    size_t numberOfLocals = 0;
-    for (size_t i = m_preservedVars.size(); i--;) {
-        if (m_preservedVars.quickGet(i)) {
-            numberOfLocals = i + 1;
-            break;
-        }
-    }
-    
-    for (BlockIndex blockIndex = 0; blockIndex < m_graph.m_blocks.size(); ++blockIndex) {
-        BasicBlock* block = m_graph.m_blocks[blockIndex].get();
-        ASSERT(block);
-        if (!block->isReachable) {
-            m_graph.m_blocks[blockIndex].clear();
+    for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) {
+        BasicBlock* block = m_graph.block(blockIndex);
+        if (!block)
             continue;
-        }
-        
-        block->variablesAtHead.ensureLocals(numberOfLocals);
-        block->variablesAtTail.ensureLocals(numberOfLocals);
+        ASSERT(block->variablesAtHead.numberOfLocals() == m_graph.block(0)->variablesAtHead.numberOfLocals());
+        ASSERT(block->variablesAtHead.numberOfArguments() == m_graph.block(0)->variablesAtHead.numberOfArguments());
+        ASSERT(block->variablesAtTail.numberOfLocals() == m_graph.block(0)->variablesAtHead.numberOfLocals());
+        ASSERT(block->variablesAtTail.numberOfArguments() == m_graph.block(0)->variablesAtHead.numberOfArguments());
     }
     
-    m_graph.m_preservedVars = m_preservedVars;
     m_graph.m_localVars = m_numLocals;
     m_graph.m_parameterSlots = m_parameterSlots;
 
     return true;
 }
 
-bool parse(ExecState*, Graph& graph)
+bool parse(Graph& graph)
 {
     SamplingRegion samplingRegion("DFG Parsing");
-#if DFG_DEBUG_LOCAL_DISBALE
-    UNUSED_PARAM(exec);
-    UNUSED_PARAM(graph);
-    return false;
-#else
     return ByteCodeParser(graph).parse();
-#endif
 }
 
 } } // namespace JSC::DFG