]> git.saurik.com Git - apple/javascriptcore.git/blobdiff - bytecompiler/BytecodeGenerator.cpp
JavaScriptCore-7600.1.4.15.12.tar.gz
[apple/javascriptcore.git] / bytecompiler / BytecodeGenerator.cpp
index b0a0877c15ba83b47b4dff470e37e487c99f3d1d..734546ab98e187e0d0d8be1322763456599c9886 100644 (file)
@@ -1,6 +1,7 @@
 /*
- * Copyright (C) 2008, 2009 Apple Inc. All rights reserved.
+ * Copyright (C) 2008, 2009, 2012, 2013, 2014 Apple Inc. All rights reserved.
  * Copyright (C) 2008 Cameron Zwarich <cwzwarich@uwaterloo.ca>
+ * Copyright (C) 2012 Igalia, S.L.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -11,7 +12,7 @@
  * 2.  Redistributions in binary form must reproduce the above copyright
  *     notice, this list of conditions and the following disclaimer in the
  *     documentation and/or other materials provided with the distribution.
- * 3.  Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * 3.  Neither the name of Apple Inc. ("Apple") nor the names of
  *     its contributors may be used to endorse or promote products derived
  *     from this software without specific prior written permission.
  *
 #include "config.h"
 #include "BytecodeGenerator.h"
 
-#include "BatchedTransitionOptimizer.h"
-#include "PrototypeFunction.h"
-#include "JSFunction.h"
 #include "Interpreter.h"
-#include "UString.h"
+#include "JSActivation.h"
+#include "JSFunction.h"
+#include "JSNameScope.h"
+#include "LowLevelInterpreter.h"
+#include "JSCInlines.h"
+#include "Options.h"
+#include "StackAlignment.h"
+#include "StrongInlines.h"
+#include "UnlinkedCodeBlock.h"
+#include "UnlinkedInstructionStream.h"
+#include <wtf/StdLibExtras.h>
+#include <wtf/text/WTFString.h>
 
 using namespace std;
 
 namespace JSC {
 
-/*
-    The layout of a register frame looks like this:
-
-    For
-
-    function f(x, y) {
-        var v1;
-        function g() { }
-        var v2;
-        return (x) * (y);
-    }
-
-    assuming (x) and (y) generated temporaries t1 and t2, you would have
-
-    ------------------------------------
-    |  x |  y |  g | v2 | v1 | t1 | t2 | <-- value held
-    ------------------------------------
-    | -5 | -4 | -3 | -2 | -1 | +0 | +1 | <-- register index
-    ------------------------------------
-    | params->|<-locals      | temps->
-
-    Because temporary registers are allocated in a stack-like fashion, we
-    can reclaim them with a simple popping algorithm. The same goes for labels.
-    (We never reclaim parameter or local registers, because parameters and
-    locals are DontDelete.)
-
-    The register layout before a function call looks like this:
-
-    For
-
-    function f(x, y)
-    {
-    }
-
-    f(1);
-
-    >                        <------------------------------
-    <                        >  reserved: call frame  |  1 | <-- value held
-    >         >snip<         <------------------------------
-    <                        > +0 | +1 | +2 | +3 | +4 | +5 | <-- register index
-    >                        <------------------------------
-    | params->|<-locals      | temps->
-
-    The call instruction fills in the "call frame" registers. It also pads
-    missing arguments at the end of the call:
-
-    >                        <-----------------------------------
-    <                        >  reserved: call frame  |  1 |  ? | <-- value held ("?" stands for "undefined")
-    >         >snip<         <-----------------------------------
-    <                        > +0 | +1 | +2 | +3 | +4 | +5 | +6 | <-- register index
-    >                        <-----------------------------------
-    | params->|<-locals      | temps->
-
-    After filling in missing arguments, the call instruction sets up the new
-    stack frame to overlap the end of the old stack frame:
-
-                             |---------------------------------->                        <
-                             |  reserved: call frame  |  1 |  ? <                        > <-- value held ("?" stands for "undefined")
-                             |---------------------------------->         >snip<         <
-                             | -7 | -6 | -5 | -4 | -3 | -2 | -1 <                        > <-- register index
-                             |---------------------------------->                        <
-                             |                        | params->|<-locals       | temps->
-
-    That way, arguments are "copied" into the callee's stack frame for free.
-
-    If the caller supplies too many arguments, this trick doesn't work. The
-    extra arguments protrude into space reserved for locals and temporaries.
-    In that case, the call instruction makes a real copy of the call frame header,
-    along with just the arguments expected by the callee, leaving the original
-    call frame header and arguments behind. (The call instruction can't just discard
-    extra arguments, because the "arguments" object may access them later.)
-    This copying strategy ensures that all named values will be at the indices
-    expected by the callee.
-*/
-
-#ifndef NDEBUG
-static bool s_dumpsGeneratedCode = false;
-#endif
-
-void BytecodeGenerator::setDumpsGeneratedCode(bool dumpsGeneratedCode)
+void Label::setLocation(unsigned location)
 {
-#ifndef NDEBUG
-    s_dumpsGeneratedCode = dumpsGeneratedCode;
-#else
-    UNUSED_PARAM(dumpsGeneratedCode);
-#endif
-}
-
-bool BytecodeGenerator::dumpsGeneratedCode()
-{
-#ifndef NDEBUG
-    return s_dumpsGeneratedCode;
-#else
-    return false;
-#endif
+    m_location = location;
+    
+    unsigned size = m_unresolvedJumps.size();
+    for (unsigned i = 0; i < size; ++i)
+        m_generator->m_instructions[m_unresolvedJumps[i].second].u.operand = m_location - m_unresolvedJumps[i].first;
 }
 
-void BytecodeGenerator::generate()
+ParserError BytecodeGenerator::generate()
 {
-    m_codeBlock->setThisRegister(m_thisRegister.index());
+    SamplingRegion samplingRegion("Bytecode Generation");
+    
+    m_codeBlock->setThisRegister(m_thisRegister.virtualRegister());
+    for (size_t i = 0; i < m_deconstructedParameters.size(); i++) {
+        auto& entry = m_deconstructedParameters[i];
+        entry.second->bindValue(*this, entry.first.get());
+    }
 
     m_scopeNode->emitBytecode(*this);
 
-#ifndef NDEBUG
-    m_codeBlock->setInstructionCount(m_codeBlock->instructions().size());
+    m_staticPropertyAnalyzer.kill();
 
-    if (s_dumpsGeneratedCode)
-        m_codeBlock->dump(m_scopeChain->globalObject()->globalExec());
-#endif
-
-    if ((m_codeType == FunctionCode && !m_codeBlock->needsFullScopeChain() && !m_codeBlock->usesArguments()) || m_codeType == EvalCode)
-        symbolTable().clear();
+    for (unsigned i = 0; i < m_tryRanges.size(); ++i) {
+        TryRange& range = m_tryRanges[i];
+        int start = range.start->bind();
+        int end = range.end->bind();
         
-    m_codeBlock->setIsNumericCompareFunction(instructions() == m_globalData->numericCompareFunction(m_scopeChain->globalObject()->globalExec()));
-
-#if !ENABLE(OPCODE_SAMPLING)
-    if (!m_regeneratingForExceptionInfo && (m_codeType == FunctionCode || m_codeType == EvalCode))
-        m_codeBlock->clearExceptionInfo();
-#endif
+        // This will happen for empty try blocks and for some cases of finally blocks:
+        //
+        // try {
+        //    try {
+        //    } finally {
+        //        return 42;
+        //        // *HERE*
+        //    }
+        // } finally {
+        //    print("things");
+        // }
+        //
+        // The return will pop scopes to execute the outer finally block. But this includes
+        // popping the try context for the inner try. The try context is live in the fall-through
+        // part of the finally block not because we will emit a handler that overlaps the finally,
+        // but because we haven't yet had a chance to plant the catch target. Then when we finish
+        // emitting code for the outer finally block, we repush the try contex, this time with a
+        // new start index. But that means that the start index for the try range corresponding
+        // to the inner-finally-following-the-return (marked as "*HERE*" above) will be greater
+        // than the end index of the try block. This is harmless since end < start handlers will
+        // never get matched in our logic, but we do the runtime a favor and choose to not emit
+        // such handlers at all.
+        if (end <= start)
+            continue;
+        
+        ASSERT(range.tryData->targetScopeDepth != UINT_MAX);
+        UnlinkedHandlerInfo info = {
+            static_cast<uint32_t>(start), static_cast<uint32_t>(end),
+            static_cast<uint32_t>(range.tryData->target->bind()),
+            range.tryData->targetScopeDepth
+        };
+        m_codeBlock->addExceptionHandler(info);
+    }
+    
+    m_codeBlock->setInstructions(std::make_unique<UnlinkedInstructionStream>(m_instructions));
 
     m_codeBlock->shrinkToFit();
-}
-
-bool BytecodeGenerator::addVar(const Identifier& ident, bool isConstant, RegisterID*& r0)
-{
-    int index = m_calleeRegisters.size();
-    SymbolTableEntry newEntry(index, isConstant ? ReadOnly : 0);
-    pair<SymbolTable::iterator, bool> result = symbolTable().add(ident.ustring().rep(), newEntry);
 
-    if (!result.second) {
-        r0 = &registerFor(result.first->second.getIndex());
-        return false;
-    }
+    if (m_codeBlock->symbolTable())
+        m_codeBlock->setSymbolTable(m_codeBlock->symbolTable()->cloneCapturedNames(*m_codeBlock->vm()));
 
-    ++m_codeBlock->m_numVars;
-    r0 = newRegister();
-    return true;
+    if (m_expressionTooDeep)
+        return ParserError(ParserError::OutOfMemory);
+    return ParserError(ParserError::ErrorNone);
 }
 
-bool BytecodeGenerator::addGlobalVar(const Identifier& ident, bool isConstant, RegisterID*& r0)
+bool BytecodeGenerator::addVar(
+    const Identifier& ident, ConstantMode constantMode, WatchMode watchMode, RegisterID*& r0)
 {
-    int index = m_nextGlobalIndex;
-    SymbolTableEntry newEntry(index, isConstant ? ReadOnly : 0);
-    pair<SymbolTable::iterator, bool> result = symbolTable().add(ident.ustring().rep(), newEntry);
+    ASSERT(static_cast<size_t>(m_codeBlock->m_numVars) == m_calleeRegisters.size());
+    
+    ConcurrentJITLocker locker(symbolTable().m_lock);
+    int index = virtualRegisterForLocal(m_calleeRegisters.size()).offset();
+    SymbolTableEntry newEntry(index, constantMode == IsConstant ? ReadOnly : 0);
+    SymbolTable::Map::AddResult result = symbolTable().add(locker, ident.impl(), newEntry);
 
-    if (!result.second)
-        index = result.first->second.getIndex();
-    else {
-        --m_nextGlobalIndex;
-        m_globals.append(index + m_globalVarStorageOffset);
+    if (!result.isNewEntry) {
+        r0 = &registerFor(result.iterator->value.getIndex());
+        return false;
     }
-
-    r0 = &registerFor(index);
-    return result.second;
+    
+    if (watchMode == IsWatchable) {
+        while (m_watchableVariables.size() < static_cast<size_t>(m_codeBlock->m_numVars))
+            m_watchableVariables.append(Identifier());
+        m_watchableVariables.append(ident);
+    }
+    
+    r0 = addVar();
+    
+    ASSERT(watchMode == NotWatchable || static_cast<size_t>(m_codeBlock->m_numVars) == m_watchableVariables.size());
+    
+    return true;
 }
 
 void BytecodeGenerator::preserveLastVar()
@@ -202,251 +157,358 @@ void BytecodeGenerator::preserveLastVar()
         m_lastVar = &m_calleeRegisters.last();
 }
 
-BytecodeGenerator::BytecodeGenerator(ProgramNode* programNode, const Debugger* debugger, const ScopeChain& scopeChain, SymbolTable* symbolTable, ProgramCodeBlock* codeBlock)
-    : m_shouldEmitDebugHooks(!!debugger)
-    , m_shouldEmitProfileHooks(scopeChain.globalObject()->supportsProfiling())
-    , m_scopeChain(&scopeChain)
-    , m_symbolTable(symbolTable)
+BytecodeGenerator::BytecodeGenerator(VM& vm, ProgramNode* programNode, UnlinkedProgramCodeBlock* codeBlock, DebuggerMode debuggerMode, ProfilerMode profilerMode)
+    : m_shouldEmitDebugHooks(Options::forceDebuggerBytecodeGeneration() || debuggerMode == DebuggerOn)
+    , m_shouldEmitProfileHooks(Options::forceProfilerBytecodeGeneration() || profilerMode == ProfilerOn)
+    , m_symbolTable(0)
     , m_scopeNode(programNode)
-    , m_codeBlock(codeBlock)
-    , m_thisRegister(RegisterFile::ProgramCodeThisRegister)
+    , m_codeBlock(vm, codeBlock)
+    , m_thisRegister(CallFrame::thisArgumentOffset())
+    , m_activationRegister(0)
+    , m_emptyValueRegister(0)
+    , m_globalObjectRegister(0)
     , m_finallyDepth(0)
-    , m_dynamicScopeDepth(0)
-    , m_baseScopeDepth(0)
+    , m_localScopeDepth(0)
     , m_codeType(GlobalCode)
-    , m_nextGlobalIndex(-1)
     , m_nextConstantOffset(0)
     , m_globalConstantIndex(0)
-    , m_globalData(&scopeChain.globalObject()->globalExec()->globalData())
+    , m_firstLazyFunction(0)
+    , m_lastLazyFunction(0)
+    , m_staticPropertyAnalyzer(&m_instructions)
+    , m_vm(&vm)
     , m_lastOpcodeID(op_end)
-    , m_emitNodeDepth(0)
-    , m_regeneratingForExceptionInfo(false)
-    , m_codeBlockBeingRegeneratedFrom(0)
+#ifndef NDEBUG
+    , m_lastOpcodePosition(0)
+#endif
+    , m_usesExceptions(false)
+    , m_expressionTooDeep(false)
+    , m_isBuiltinFunction(false)
 {
-    if (m_shouldEmitDebugHooks)
-        m_codeBlock->setNeedsFullScopeChain(true);
+    m_codeBlock->setNumParameters(1); // Allocate space for "this"
 
     emitOpcode(op_enter);
-    codeBlock->setGlobalData(m_globalData);
-
-    // FIXME: Move code that modifies the global object to Interpreter::execute.
-    
-    m_codeBlock->m_numParameters = 1; // Allocate space for "this"
-
-    JSGlobalObject* globalObject = scopeChain.globalObject();
-    ExecState* exec = globalObject->globalExec();
-    RegisterFile* registerFile = &exec->globalData().interpreter->registerFile();
-    
-    // Shift register indexes in generated code to elide registers allocated by intermediate stack frames.
-    m_globalVarStorageOffset = -RegisterFile::CallFrameHeaderSize - m_codeBlock->m_numParameters - registerFile->size();
-
-    // Add previously defined symbols to bookkeeping.
-    m_globals.grow(symbolTable->size());
-    SymbolTable::iterator end = symbolTable->end();
-    for (SymbolTable::iterator it = symbolTable->begin(); it != end; ++it)
-        registerFor(it->second.getIndex()).setIndex(it->second.getIndex() + m_globalVarStorageOffset);
-        
-    BatchedTransitionOptimizer optimizer(globalObject);
 
     const VarStack& varStack = programNode->varStack();
     const FunctionStack& functionStack = programNode->functionStack();
-    bool canOptimizeNewGlobals = symbolTable->size() + functionStack.size() + varStack.size() < registerFile->maxGlobals();
-    if (canOptimizeNewGlobals) {
-        // Shift new symbols so they get stored prior to existing symbols.
-        m_nextGlobalIndex -= symbolTable->size();
-
-        for (size_t i = 0; i < functionStack.size(); ++i) {
-            FunctionBodyNode* function = functionStack[i];
-            globalObject->removeDirect(function->ident()); // Make sure our new function is not shadowed by an old property.
-            emitNewFunction(addGlobalVar(function->ident(), false), function);
-        }
 
-        Vector<RegisterID*, 32> newVars;
-        for (size_t i = 0; i < varStack.size(); ++i)
-            if (!globalObject->hasProperty(exec, *varStack[i].first))
-                newVars.append(addGlobalVar(*varStack[i].first, varStack[i].second & DeclarationStacks::IsConstant));
-
-        preserveLastVar();
+    for (size_t i = 0; i < functionStack.size(); ++i) {
+        FunctionBodyNode* function = functionStack[i];
+        UnlinkedFunctionExecutable* unlinkedFunction = makeFunction(function);
+        codeBlock->addFunctionDeclaration(*m_vm, function->ident(), unlinkedFunction);
+    }
 
-        for (size_t i = 0; i < newVars.size(); ++i)
-            emitLoad(newVars[i], jsUndefined());
-    } else {
-        for (size_t i = 0; i < functionStack.size(); ++i) {
-            FunctionBodyNode* function = functionStack[i];
-            globalObject->putWithAttributes(exec, function->ident(), new (exec) JSFunction(exec, makeFunction(exec, function), scopeChain.node()), DontDelete);
-        }
-        for (size_t i = 0; i < varStack.size(); ++i) {
-            if (globalObject->hasProperty(exec, *varStack[i].first))
-                continue;
-            int attributes = DontDelete;
-            if (varStack[i].second & DeclarationStacks::IsConstant)
-                attributes |= ReadOnly;
-            globalObject->putWithAttributes(exec, *varStack[i].first, jsUndefined(), attributes);
-        }
+    for (size_t i = 0; i < varStack.size(); ++i)
+        codeBlock->addVariableDeclaration(varStack[i].first, !!(varStack[i].second & DeclarationStacks::IsConstant));
 
-        preserveLastVar();
-    }
 }
 
-BytecodeGenerator::BytecodeGenerator(FunctionBodyNode* functionBody, const Debugger* debugger, const ScopeChain& scopeChain, SymbolTable* symbolTable, CodeBlock* codeBlock)
-    : m_shouldEmitDebugHooks(!!debugger)
-    , m_shouldEmitProfileHooks(scopeChain.globalObject()->supportsProfiling())
-    , m_scopeChain(&scopeChain)
-    , m_symbolTable(symbolTable)
+BytecodeGenerator::BytecodeGenerator(VM& vm, FunctionBodyNode* functionBody, UnlinkedFunctionCodeBlock* codeBlock, DebuggerMode debuggerMode, ProfilerMode profilerMode)
+    : m_shouldEmitDebugHooks(Options::forceDebuggerBytecodeGeneration() || debuggerMode == DebuggerOn)
+    , m_shouldEmitProfileHooks(Options::forceProfilerBytecodeGeneration() || profilerMode == ProfilerOn)
+    , m_symbolTable(codeBlock->symbolTable())
     , m_scopeNode(functionBody)
-    , m_codeBlock(codeBlock)
+    , m_codeBlock(vm, codeBlock)
+    , m_activationRegister(0)
+    , m_emptyValueRegister(0)
+    , m_globalObjectRegister(0)
     , m_finallyDepth(0)
-    , m_dynamicScopeDepth(0)
-    , m_baseScopeDepth(0)
+    , m_localScopeDepth(0)
     , m_codeType(FunctionCode)
     , m_nextConstantOffset(0)
     , m_globalConstantIndex(0)
-    , m_globalData(&scopeChain.globalObject()->globalExec()->globalData())
+    , m_firstLazyFunction(0)
+    , m_lastLazyFunction(0)
+    , m_staticPropertyAnalyzer(&m_instructions)
+    , m_vm(&vm)
     , m_lastOpcodeID(op_end)
-    , m_emitNodeDepth(0)
-    , m_regeneratingForExceptionInfo(false)
-    , m_codeBlockBeingRegeneratedFrom(0)
+#ifndef NDEBUG
+    , m_lastOpcodePosition(0)
+#endif
+    , m_usesExceptions(false)
+    , m_expressionTooDeep(false)
+    , m_isBuiltinFunction(codeBlock->isBuiltinFunction())
 {
-    if (m_shouldEmitDebugHooks)
-        m_codeBlock->setNeedsFullScopeChain(true);
+    if (m_isBuiltinFunction)
+        m_shouldEmitDebugHooks = false;
 
-    codeBlock->setGlobalData(m_globalData);
+    m_symbolTable->setUsesNonStrictEval(codeBlock->usesEval() && !codeBlock->isStrictMode());
+    Vector<Identifier> boundParameterProperties;
+    FunctionParameters& parameters = *functionBody->parameters();
+    for (size_t i = 0; i < parameters.size(); i++) {
+        auto pattern = parameters.at(i);
+        if (pattern->isBindingNode())
+            continue;
+        pattern->collectBoundIdentifiers(boundParameterProperties);
+        continue;
+    }
+    m_symbolTable->setParameterCountIncludingThis(functionBody->parameters()->size() + 1);
 
-    bool usesArguments = functionBody->usesArguments();
-    codeBlock->setUsesArguments(usesArguments);
-    if (usesArguments) {
-        m_argumentsRegister.setIndex(RegisterFile::OptionalCalleeArguments);
-        addVar(propertyNames().arguments, false);
+    emitOpcode(op_enter);
+    if (m_codeBlock->needsFullScopeChain() || m_shouldEmitDebugHooks) {
+        m_activationRegister = addVar();
+        emitInitLazyRegister(m_activationRegister);
+        m_codeBlock->setActivationRegister(m_activationRegister->virtualRegister());
     }
 
-    if (m_codeBlock->needsFullScopeChain()) {
-        ++m_codeBlock->m_numVars;
-        m_activationRegisterIndex = newRegister()->index();
-        emitOpcode(op_enter_with_activation);
-        instructions().append(m_activationRegisterIndex);
-    } else
-        emitOpcode(op_enter);
+    m_symbolTable->setCaptureStart(virtualRegisterForLocal(m_codeBlock->m_numVars).offset());
 
-    if (usesArguments) {
-        emitOpcode(op_init_arguments);
+    if (functionBody->usesArguments() || codeBlock->usesEval()) { // May reify arguments object.
+        RegisterID* unmodifiedArgumentsRegister = addVar(); // Anonymous, so it can't be modified by user code.
+        RegisterID* argumentsRegister = addVar(propertyNames().arguments, IsVariable, NotWatchable); // Can be changed by assigning to 'arguments'.
 
-        // The debugger currently retrieves the arguments object from an activation rather than pulling
-        // it from a call frame.  In the long-term it should stop doing that (<rdar://problem/6911886>),
-        // but for now we force eager creation of the arguments object when debugging.
-        if (m_shouldEmitDebugHooks)
+        // We can save a little space by hard-coding the knowledge that the two
+        // 'arguments' values are stored in consecutive registers, and storing
+        // only the index of the assignable one.
+        codeBlock->setArgumentsRegister(argumentsRegister->virtualRegister());
+        ASSERT_UNUSED(unmodifiedArgumentsRegister, unmodifiedArgumentsRegister->virtualRegister() == JSC::unmodifiedArgumentsRegister(codeBlock->argumentsRegister()));
+
+        emitInitLazyRegister(argumentsRegister);
+        emitInitLazyRegister(unmodifiedArgumentsRegister);
+        
+        if (shouldTearOffArgumentsEagerly()) {
             emitOpcode(op_create_arguments);
+            instructions().append(argumentsRegister->index());
+        }
     }
 
+    bool shouldCaptureAllTheThings = m_shouldEmitDebugHooks || codeBlock->usesEval();
+
+    bool capturesAnyArgumentByName = false;
+    Vector<RegisterID*, 0, UnsafeVectorOverflow> capturedArguments;
+    if (functionBody->hasCapturedVariables() || shouldCaptureAllTheThings) {
+        FunctionParameters& parameters = *functionBody->parameters();
+        capturedArguments.resize(parameters.size());
+        for (size_t i = 0; i < parameters.size(); ++i) {
+            capturedArguments[i] = 0;
+            auto pattern = parameters.at(i);
+            if (!pattern->isBindingNode())
+                continue;
+            const Identifier& ident = static_cast<const BindingNode*>(pattern)->boundProperty();
+            if (!functionBody->captures(ident) && !shouldCaptureAllTheThings)
+                continue;
+            capturesAnyArgumentByName = true;
+            capturedArguments[i] = addVar();
+        }
+    }
+
+    if (capturesAnyArgumentByName && !shouldTearOffArgumentsEagerly()) {
+        size_t parameterCount = m_symbolTable->parameterCount();
+        auto slowArguments = std::make_unique<SlowArgument[]>(parameterCount);
+        for (size_t i = 0; i < parameterCount; ++i) {
+            if (!capturedArguments[i]) {
+                ASSERT(slowArguments[i].status == SlowArgument::Normal);
+                slowArguments[i].index = CallFrame::argumentOffset(i);
+                continue;
+            }
+            slowArguments[i].status = SlowArgument::Captured;
+            slowArguments[i].index = capturedArguments[i]->index();
+        }
+        m_symbolTable->setSlowArguments(WTF::move(slowArguments));
+    }
+
+    RegisterID* calleeRegister = resolveCallee(functionBody); // May push to the scope chain and/or add a captured var.
+
     const DeclarationStacks::FunctionStack& functionStack = functionBody->functionStack();
+    const DeclarationStacks::VarStack& varStack = functionBody->varStack();
+    IdentifierSet test;
+
+    // Captured variables and functions go first so that activations don't have
+    // to step over the non-captured locals to mark them.
+    if (functionBody->hasCapturedVariables()) {
+        for (size_t i = 0; i < boundParameterProperties.size(); i++) {
+            const Identifier& ident = boundParameterProperties[i];
+            if (functionBody->captures(ident))
+                addVar(ident, IsVariable, IsWatchable);
+        }
+        for (size_t i = 0; i < functionStack.size(); ++i) {
+            FunctionBodyNode* function = functionStack[i];
+            const Identifier& ident = function->ident();
+            if (functionBody->captures(ident)) {
+                m_functions.add(ident.impl());
+                emitNewFunction(addVar(ident, IsVariable, IsWatchable), IsCaptured, function);
+            }
+        }
+        for (size_t i = 0; i < varStack.size(); ++i) {
+            const Identifier& ident = varStack[i].first;
+            if (functionBody->captures(ident))
+                addVar(ident, (varStack[i].second & DeclarationStacks::IsConstant) ? IsConstant : IsVariable, IsWatchable);
+        }
+    }
+
+    m_symbolTable->setCaptureEnd(virtualRegisterForLocal(codeBlock->m_numVars).offset());
+
+    bool canLazilyCreateFunctions = !functionBody->needsActivationForMoreThanVariables() && !m_shouldEmitDebugHooks;
+    m_firstLazyFunction = codeBlock->m_numVars;
     for (size_t i = 0; i < functionStack.size(); ++i) {
         FunctionBodyNode* function = functionStack[i];
         const Identifier& ident = function->ident();
-        m_functions.add(ident.ustring().rep());
-        emitNewFunction(addVar(ident, false), function);
+        if (!functionBody->captures(ident)) {
+            m_functions.add(ident.impl());
+            RefPtr<RegisterID> reg = addVar(ident, IsVariable, NotWatchable);
+            // Don't lazily create functions that override the name 'arguments'
+            // as this would complicate lazy instantiation of actual arguments.
+            if (!canLazilyCreateFunctions || ident == propertyNames().arguments)
+                emitNewFunction(reg.get(), NotCaptured, function);
+            else {
+                emitInitLazyRegister(reg.get());
+                m_lazyFunctions.set(reg->virtualRegister().toLocal(), function);
+            }
+        }
+    }
+    m_lastLazyFunction = canLazilyCreateFunctions ? codeBlock->m_numVars : m_firstLazyFunction;
+    for (size_t i = 0; i < boundParameterProperties.size(); i++) {
+        const Identifier& ident = boundParameterProperties[i];
+        if (!functionBody->captures(ident))
+            addVar(ident, IsVariable, IsWatchable);
+    }
+    for (size_t i = 0; i < varStack.size(); ++i) {
+        const Identifier& ident = varStack[i].first;
+        if (!functionBody->captures(ident))
+            addVar(ident, (varStack[i].second & DeclarationStacks::IsConstant) ? IsConstant : IsVariable, NotWatchable);
     }
 
-    const DeclarationStacks::VarStack& varStack = functionBody->varStack();
-    for (size_t i = 0; i < varStack.size(); ++i)
-        addVar(*varStack[i].first, varStack[i].second & DeclarationStacks::IsConstant);
+    if (shouldCaptureAllTheThings)
+        m_symbolTable->setCaptureEnd(virtualRegisterForLocal(codeBlock->m_numVars).offset());
 
-    FunctionParameters& parameters = *functionBody->parameters();
-    size_t parameterCount = parameters.size();
-    m_nextParameterIndex = -RegisterFile::CallFrameHeaderSize - parameterCount - 1;
-    m_parameters.grow(1 + parameterCount); // reserve space for "this"
+    if (m_symbolTable->captureCount())
+        emitOpcode(op_touch_entry);
+    
+    m_parameters.grow(parameters.size() + 1); // reserve space for "this"
 
     // Add "this" as a parameter
-    m_thisRegister.setIndex(m_nextParameterIndex);
-    ++m_nextParameterIndex;
-    ++m_codeBlock->m_numParameters;
-
-    if (functionBody->usesThis() || m_shouldEmitDebugHooks) {
-        emitOpcode(op_convert_this);
-        instructions().append(m_thisRegister.index());
+    int nextParameterIndex = CallFrame::thisArgumentOffset();
+    m_thisRegister.setIndex(nextParameterIndex++);
+    m_codeBlock->addParameter();
+    for (size_t i = 0; i < parameters.size(); ++i, ++nextParameterIndex) {
+        int index = nextParameterIndex;
+        auto pattern = parameters.at(i);
+        if (!pattern->isBindingNode()) {
+            m_codeBlock->addParameter();
+            RegisterID& parameter = registerFor(index);
+            parameter.setIndex(index);
+            m_deconstructedParameters.append(std::make_pair(&parameter, pattern));
+            continue;
+        }
+        auto simpleParameter = static_cast<const BindingNode*>(pattern);
+        if (capturedArguments.size() && capturedArguments[i]) {
+            ASSERT((functionBody->hasCapturedVariables() && functionBody->captures(simpleParameter->boundProperty())) || shouldCaptureAllTheThings);
+            index = capturedArguments[i]->index();
+            RegisterID original(nextParameterIndex);
+            emitMove(capturedArguments[i], &original);
+        }
+        addParameter(simpleParameter->boundProperty(), index);
     }
-    
-    for (size_t i = 0; i < parameterCount; ++i)
-        addParameter(parameters[i]);
-
     preserveLastVar();
+
+    // We declare the callee's name last because it should lose to a var, function, and/or parameter declaration.
+    addCallee(functionBody, calleeRegister);
+
+    if (isConstructor()) {
+        emitCreateThis(&m_thisRegister);
+    } else if (functionBody->usesThis() || codeBlock->usesEval()) {
+        m_codeBlock->addPropertyAccessInstruction(instructions().size());
+        emitOpcode(op_to_this);
+        instructions().append(kill(&m_thisRegister));
+        instructions().append(0);
+    }
 }
 
-BytecodeGenerator::BytecodeGenerator(EvalNode* evalNode, const Debugger* debugger, const ScopeChain& scopeChain, SymbolTable* symbolTable, EvalCodeBlock* codeBlock)
-    : m_shouldEmitDebugHooks(!!debugger)
-    , m_shouldEmitProfileHooks(scopeChain.globalObject()->supportsProfiling())
-    , m_scopeChain(&scopeChain)
-    , m_symbolTable(symbolTable)
+BytecodeGenerator::BytecodeGenerator(VM& vm, EvalNode* evalNode, UnlinkedEvalCodeBlock* codeBlock, DebuggerMode debuggerMode, ProfilerMode profilerMode)
+    : m_shouldEmitDebugHooks(Options::forceDebuggerBytecodeGeneration() || debuggerMode == DebuggerOn)
+    , m_shouldEmitProfileHooks(Options::forceProfilerBytecodeGeneration() || profilerMode == ProfilerOn)
+    , m_symbolTable(codeBlock->symbolTable())
     , m_scopeNode(evalNode)
-    , m_codeBlock(codeBlock)
-    , m_thisRegister(RegisterFile::ProgramCodeThisRegister)
+    , m_codeBlock(vm, codeBlock)
+    , m_thisRegister(CallFrame::thisArgumentOffset())
+    , m_activationRegister(0)
+    , m_emptyValueRegister(0)
+    , m_globalObjectRegister(0)
     , m_finallyDepth(0)
-    , m_dynamicScopeDepth(0)
-    , m_baseScopeDepth(codeBlock->baseScopeDepth())
+    , m_localScopeDepth(0)
     , m_codeType(EvalCode)
     , m_nextConstantOffset(0)
     , m_globalConstantIndex(0)
-    , m_globalData(&scopeChain.globalObject()->globalExec()->globalData())
+    , m_firstLazyFunction(0)
+    , m_lastLazyFunction(0)
+    , m_staticPropertyAnalyzer(&m_instructions)
+    , m_vm(&vm)
     , m_lastOpcodeID(op_end)
-    , m_emitNodeDepth(0)
-    , m_regeneratingForExceptionInfo(false)
-    , m_codeBlockBeingRegeneratedFrom(0)
+#ifndef NDEBUG
+    , m_lastOpcodePosition(0)
+#endif
+    , m_usesExceptions(false)
+    , m_expressionTooDeep(false)
+    , m_isBuiltinFunction(false)
 {
-    if (m_shouldEmitDebugHooks || m_baseScopeDepth)
-        m_codeBlock->setNeedsFullScopeChain(true);
+    m_symbolTable->setUsesNonStrictEval(codeBlock->usesEval() && !codeBlock->isStrictMode());
+    m_codeBlock->setNumParameters(1);
 
     emitOpcode(op_enter);
-    codeBlock->setGlobalData(m_globalData);
-    m_codeBlock->m_numParameters = 1; // Allocate space for "this"
 
     const DeclarationStacks::FunctionStack& functionStack = evalNode->functionStack();
     for (size_t i = 0; i < functionStack.size(); ++i)
-        m_codeBlock->addFunctionDecl(makeFunction(m_globalData, functionStack[i]));
+        m_codeBlock->addFunctionDecl(makeFunction(functionStack[i]));
 
     const DeclarationStacks::VarStack& varStack = evalNode->varStack();
     unsigned numVariables = varStack.size();
-    Vector<Identifier> variables;
+    Vector<Identifier, 0, UnsafeVectorOverflow> variables;
     variables.reserveCapacity(numVariables);
-    for (size_t i = 0; i < numVariables; ++i)
-        variables.append(*varStack[i].first);
+    for (size_t i = 0; i < numVariables; ++i) {
+        ASSERT(varStack[i].first.impl()->isAtomic());
+        variables.append(varStack[i].first);
+    }
     codeBlock->adoptVariables(variables);
-
     preserveLastVar();
 }
 
-RegisterID* BytecodeGenerator::addParameter(const Identifier& ident)
+BytecodeGenerator::~BytecodeGenerator()
 {
-    // Parameters overwrite var declarations, but not function declarations.
-    RegisterID* result = 0;
-    UString::Rep* rep = ident.ustring().rep();
-    if (!m_functions.contains(rep)) {
-        symbolTable().set(rep, m_nextParameterIndex);
-        RegisterID& parameter = registerFor(m_nextParameterIndex);
-        parameter.setIndex(m_nextParameterIndex);
-        result = &parameter;
-    }
-
-    // To maintain the calling convention, we have to allocate unique space for
-    // each parameter, even if the parameter doesn't make it into the symbol table.
-    ++m_nextParameterIndex;
-    ++m_codeBlock->m_numParameters;
-    return result;
 }
 
-RegisterID* BytecodeGenerator::registerFor(const Identifier& ident)
+RegisterID* BytecodeGenerator::emitInitLazyRegister(RegisterID* reg)
 {
-    if (ident == propertyNames().thisIdentifier)
-        return &m_thisRegister;
+    emitOpcode(op_init_lazy_reg);
+    instructions().append(reg->index());
+    ASSERT(!hasWatchableVariable(reg->index()));
+    return reg;
+}
 
-    if (!shouldOptimizeLocals())
+RegisterID* BytecodeGenerator::resolveCallee(FunctionBodyNode* functionBodyNode)
+{
+    if (!functionNameIsInScope(functionBodyNode->ident(), functionBodyNode->functionMode()))
         return 0;
 
-    SymbolTableEntry entry = symbolTable().get(ident.ustring().rep());
-    if (entry.isNull())
+    if (functionNameScopeIsDynamic(m_codeBlock->usesEval(), m_codeBlock->isStrictMode()))
         return 0;
 
-    if (ident == propertyNames().arguments)
-        createArgumentsIfNecessary();
+    m_calleeRegister.setIndex(JSStack::Callee);
+    if (functionBodyNode->captures(functionBodyNode->ident()))
+        return emitMove(addVar(), IsCaptured, &m_calleeRegister);
 
-    return &registerFor(entry.getIndex());
+    return &m_calleeRegister;
+}
+
+void BytecodeGenerator::addCallee(FunctionBodyNode* functionBodyNode, RegisterID* calleeRegister)
+{
+    if (!calleeRegister)
+        return;
+
+    symbolTable().add(functionBodyNode->ident().impl(), SymbolTableEntry(calleeRegister->index(), ReadOnly));
+}
+
+void BytecodeGenerator::addParameter(const Identifier& ident, int parameterIndex)
+{
+    // Parameters overwrite var declarations, but not function declarations.
+    StringImpl* rep = ident.impl();
+    if (!m_functions.contains(rep)) {
+        symbolTable().set(rep, parameterIndex);
+        RegisterID& parameter = registerFor(parameterIndex);
+        parameter.setIndex(parameterIndex);
+    }
+
+    // To maintain the calling convention, we have to allocate unique space for
+    // each parameter, even if the parameter doesn't make it into the symbol table.
+    m_codeBlock->addParameter();
 }
 
 bool BytecodeGenerator::willResolveToArguments(const Identifier& ident)
@@ -457,10 +519,10 @@ bool BytecodeGenerator::willResolveToArguments(const Identifier& ident)
     if (!shouldOptimizeLocals())
         return false;
     
-    SymbolTableEntry entry = symbolTable().get(ident.ustring().rep());
+    SymbolTableEntry entry = symbolTable().get(ident.impl());
     if (entry.isNull())
         return false;
-    
+
     if (m_codeBlock->usesArguments() && m_codeType == FunctionCode)
         return true;
     
@@ -471,40 +533,30 @@ RegisterID* BytecodeGenerator::uncheckedRegisterForArguments()
 {
     ASSERT(willResolveToArguments(propertyNames().arguments));
 
-    SymbolTableEntry entry = symbolTable().get(propertyNames().arguments.ustring().rep());
+    SymbolTableEntry entry = symbolTable().get(propertyNames().arguments.impl());
     ASSERT(!entry.isNull());
     return &registerFor(entry.getIndex());
 }
 
-RegisterID* BytecodeGenerator::constRegisterFor(const Identifier& ident)
+RegisterID* BytecodeGenerator::createLazyRegisterIfNecessary(RegisterID* reg)
 {
-    if (m_codeType == EvalCode)
-        return 0;
+    if (!reg->virtualRegister().isLocal())
+        return reg;
 
-    SymbolTableEntry entry = symbolTable().get(ident.ustring().rep());
-    if (entry.isNull())
-        return 0;
+    int localVariableNumber = reg->virtualRegister().toLocal();
 
-    return &registerFor(entry.getIndex());
-}
-
-bool BytecodeGenerator::isLocal(const Identifier& ident)
-{
-    if (ident == propertyNames().thisIdentifier)
-        return true;
-    
-    return shouldOptimizeLocals() && symbolTable().contains(ident.ustring().rep());
-}
-
-bool BytecodeGenerator::isLocalConstant(const Identifier& ident)
-{
-    return symbolTable().get(ident.ustring().rep()).isReadOnly();
+    if (m_lastLazyFunction <= localVariableNumber || localVariableNumber < m_firstLazyFunction)
+        return reg;
+    emitLazyNewFunction(reg, m_lazyFunctions.get(localVariableNumber));
+    return reg;
 }
 
 RegisterID* BytecodeGenerator::newRegister()
 {
-    m_calleeRegisters.append(m_calleeRegisters.size());
-    m_codeBlock->m_numCalleeRegisters = max<int>(m_codeBlock->m_numCalleeRegisters, m_calleeRegisters.size());
+    m_calleeRegisters.append(virtualRegisterForLocal(m_calleeRegisters.size()));
+    int numCalleeRegisters = max<int>(m_codeBlock->m_numCalleeRegisters, m_calleeRegisters.size());
+    numCalleeRegisters = WTF::roundUpToMultipleOf(stackAlignmentRegisters(), numCalleeRegisters);
+    m_codeBlock->m_numCalleeRegisters = numCalleeRegisters;
     return &m_calleeRegisters.last();
 }
 
@@ -519,15 +571,7 @@ RegisterID* BytecodeGenerator::newTemporary()
     return result;
 }
 
-RegisterID* BytecodeGenerator::highestUsedRegister()
-{
-    size_t count = m_codeBlock->m_numCalleeRegisters;
-    while (m_calleeRegisters.size() < count)
-        newRegister();
-    return &m_calleeRegisters.last();
-}
-
-PassRefPtr<LabelScope> BytecodeGenerator::newLabelScope(LabelScope::Type type, const Identifier* name)
+LabelScopePtr BytecodeGenerator::newLabelScope(LabelScope::Type type, const Identifier* name)
 {
     // Reclaim free label scopes.
     while (m_labelScopes.size() && !m_labelScopes.last().refCount())
@@ -536,7 +580,7 @@ PassRefPtr<LabelScope> BytecodeGenerator::newLabelScope(LabelScope::Type type, c
     // Allocate new label scope.
     LabelScope scope(type, name, scopeDepth(), newLabel(), type == LabelScope::Loop ? newLabel() : PassRefPtr<Label>()); // Only loops have continue targets.
     m_labelScopes.append(scope);
-    return &m_labelScopes.last();
+    return LabelScopePtr(m_labelScopes, m_labelScopes.size() - 1);
 }
 
 PassRefPtr<Label> BytecodeGenerator::newLabel()
@@ -546,7 +590,7 @@ PassRefPtr<Label> BytecodeGenerator::newLabel()
         m_labels.removeLast();
 
     // Allocate new label ID.
-    m_labels.append(m_codeBlock);
+    m_labels.append(this);
     return &m_labels.last();
 }
 
@@ -573,10 +617,42 @@ PassRefPtr<Label> BytecodeGenerator::emitLabel(Label* l0)
 
 void BytecodeGenerator::emitOpcode(OpcodeID opcodeID)
 {
-    instructions().append(globalData()->interpreter->getOpcode(opcodeID));
+#ifndef NDEBUG
+    size_t opcodePosition = instructions().size();
+    ASSERT(opcodePosition - m_lastOpcodePosition == opcodeLength(m_lastOpcodeID) || m_lastOpcodeID == op_end);
+    m_lastOpcodePosition = opcodePosition;
+#endif
+    instructions().append(opcodeID);
     m_lastOpcodeID = opcodeID;
 }
 
+UnlinkedArrayProfile BytecodeGenerator::newArrayProfile()
+{
+    return m_codeBlock->addArrayProfile();
+}
+
+UnlinkedArrayAllocationProfile BytecodeGenerator::newArrayAllocationProfile()
+{
+    return m_codeBlock->addArrayAllocationProfile();
+}
+
+UnlinkedObjectAllocationProfile BytecodeGenerator::newObjectAllocationProfile()
+{
+    return m_codeBlock->addObjectAllocationProfile();
+}
+
+UnlinkedValueProfile BytecodeGenerator::emitProfiledOpcode(OpcodeID opcodeID)
+{
+    UnlinkedValueProfile result = m_codeBlock->addValueProfile();
+    emitOpcode(opcodeID);
+    return result;
+}
+
+void BytecodeGenerator::emitLoopHint()
+{
+    emitOpcode(op_loop_hint);
+}
+
 void BytecodeGenerator::retrieveLastBinaryOp(int& dstIndex, int& src1Index, int& src2Index)
 {
     ASSERT(instructions().size() >= 4);
@@ -598,18 +674,20 @@ void ALWAYS_INLINE BytecodeGenerator::rewindBinaryOp()
 {
     ASSERT(instructions().size() >= 4);
     instructions().shrink(instructions().size() - 4);
+    m_lastOpcodeID = op_end;
 }
 
 void ALWAYS_INLINE BytecodeGenerator::rewindUnaryOp()
 {
     ASSERT(instructions().size() >= 3);
     instructions().shrink(instructions().size() - 3);
+    m_lastOpcodeID = op_end;
 }
 
 PassRefPtr<Label> BytecodeGenerator::emitJump(Label* target)
 {
     size_t begin = instructions().size();
-    emitOpcode(target->isForward() ? op_jmp : op_loop);
+    emitOpcode(op_jmp);
     instructions().append(target->bind(begin, instructions().size()));
     return target;
 }
@@ -627,13 +705,13 @@ PassRefPtr<Label> BytecodeGenerator::emitJumpIfTrue(RegisterID* cond, Label* tar
             rewindBinaryOp();
 
             size_t begin = instructions().size();
-            emitOpcode(target->isForward() ? op_jless : op_loop_if_less);
+            emitOpcode(op_jless);
             instructions().append(src1Index);
             instructions().append(src2Index);
             instructions().append(target->bind(begin, instructions().size()));
             return target;
         }
-    } else if (m_lastOpcodeID == op_lesseq && !target->isForward()) {
+    } else if (m_lastOpcodeID == op_lesseq) {
         int dstIndex;
         int src1Index;
         int src2Index;
@@ -644,7 +722,41 @@ PassRefPtr<Label> BytecodeGenerator::emitJumpIfTrue(RegisterID* cond, Label* tar
             rewindBinaryOp();
 
             size_t begin = instructions().size();
-            emitOpcode(op_loop_if_lesseq);
+            emitOpcode(op_jlesseq);
+            instructions().append(src1Index);
+            instructions().append(src2Index);
+            instructions().append(target->bind(begin, instructions().size()));
+            return target;
+        }
+    } else if (m_lastOpcodeID == op_greater) {
+        int dstIndex;
+        int src1Index;
+        int src2Index;
+
+        retrieveLastBinaryOp(dstIndex, src1Index, src2Index);
+
+        if (cond->index() == dstIndex && cond->isTemporary() && !cond->refCount()) {
+            rewindBinaryOp();
+
+            size_t begin = instructions().size();
+            emitOpcode(op_jgreater);
+            instructions().append(src1Index);
+            instructions().append(src2Index);
+            instructions().append(target->bind(begin, instructions().size()));
+            return target;
+        }
+    } else if (m_lastOpcodeID == op_greatereq) {
+        int dstIndex;
+        int src1Index;
+        int src2Index;
+
+        retrieveLastBinaryOp(dstIndex, src1Index, src2Index);
+
+        if (cond->index() == dstIndex && cond->isTemporary() && !cond->refCount()) {
+            rewindBinaryOp();
+
+            size_t begin = instructions().size();
+            emitOpcode(op_jgreatereq);
             instructions().append(src1Index);
             instructions().append(src2Index);
             instructions().append(target->bind(begin, instructions().size()));
@@ -684,7 +796,7 @@ PassRefPtr<Label> BytecodeGenerator::emitJumpIfTrue(RegisterID* cond, Label* tar
 
     size_t begin = instructions().size();
 
-    emitOpcode(target->isForward() ? op_jtrue : op_loop_if_true);
+    emitOpcode(op_jtrue);
     instructions().append(cond->index());
     instructions().append(target->bind(begin, instructions().size()));
     return target;
@@ -726,6 +838,40 @@ PassRefPtr<Label> BytecodeGenerator::emitJumpIfFalse(RegisterID* cond, Label* ta
             instructions().append(target->bind(begin, instructions().size()));
             return target;
         }
+    } else if (m_lastOpcodeID == op_greater && target->isForward()) {
+        int dstIndex;
+        int src1Index;
+        int src2Index;
+
+        retrieveLastBinaryOp(dstIndex, src1Index, src2Index);
+
+        if (cond->index() == dstIndex && cond->isTemporary() && !cond->refCount()) {
+            rewindBinaryOp();
+
+            size_t begin = instructions().size();
+            emitOpcode(op_jngreater);
+            instructions().append(src1Index);
+            instructions().append(src2Index);
+            instructions().append(target->bind(begin, instructions().size()));
+            return target;
+        }
+    } else if (m_lastOpcodeID == op_greatereq && target->isForward()) {
+        int dstIndex;
+        int src1Index;
+        int src2Index;
+
+        retrieveLastBinaryOp(dstIndex, src1Index, src2Index);
+
+        if (cond->index() == dstIndex && cond->isTemporary() && !cond->refCount()) {
+            rewindBinaryOp();
+
+            size_t begin = instructions().size();
+            emitOpcode(op_jngreatereq);
+            instructions().append(src1Index);
+            instructions().append(src2Index);
+            instructions().append(target->bind(begin, instructions().size()));
+            return target;
+        }
     } else if (m_lastOpcodeID == op_not) {
         int dstIndex;
         int srcIndex;
@@ -736,7 +882,7 @@ PassRefPtr<Label> BytecodeGenerator::emitJumpIfFalse(RegisterID* cond, Label* ta
             rewindUnaryOp();
 
             size_t begin = instructions().size();
-            emitOpcode(target->isForward() ? op_jtrue : op_loop_if_true);
+            emitOpcode(op_jtrue);
             instructions().append(srcIndex);
             instructions().append(target->bind(begin, instructions().size()));
             return target;
@@ -774,7 +920,7 @@ PassRefPtr<Label> BytecodeGenerator::emitJumpIfFalse(RegisterID* cond, Label* ta
     }
 
     size_t begin = instructions().size();
-    emitOpcode(target->isForward() ? op_jfalse : op_loop_if_false);
+    emitOpcode(op_jfalse);
     instructions().append(cond->index());
     instructions().append(target->bind(begin, instructions().size()));
     return target;
@@ -786,7 +932,7 @@ PassRefPtr<Label> BytecodeGenerator::emitJumpIfNotFunctionCall(RegisterID* cond,
 
     emitOpcode(op_jneq_ptr);
     instructions().append(cond->index());
-    instructions().append(m_scopeChain->globalObject()->d()->callFunction);
+    instructions().append(Special::CallFunction);
     instructions().append(target->bind(begin, instructions().size()));
     return target;
 }
@@ -797,33 +943,48 @@ PassRefPtr<Label> BytecodeGenerator::emitJumpIfNotFunctionApply(RegisterID* cond
 
     emitOpcode(op_jneq_ptr);
     instructions().append(cond->index());
-    instructions().append(m_scopeChain->globalObject()->d()->applyFunction);
+    instructions().append(Special::ApplyFunction);
     instructions().append(target->bind(begin, instructions().size()));
     return target;
 }
 
 unsigned BytecodeGenerator::addConstant(const Identifier& ident)
 {
-    UString::Rep* rep = ident.ustring().rep();
-    pair<IdentifierMap::iterator, bool> result = m_identifierMap.add(rep, m_codeBlock->numberOfIdentifiers());
-    if (result.second) // new entry
-        m_codeBlock->addIdentifier(Identifier(m_globalData, rep));
+    StringImpl* rep = ident.impl();
+    IdentifierMap::AddResult result = m_identifierMap.add(rep, m_codeBlock->numberOfIdentifiers());
+    if (result.isNewEntry)
+        m_codeBlock->addIdentifier(ident);
 
-    return result.first->second;
+    return result.iterator->value;
+}
+
+// We can't hash JSValue(), so we use a dedicated data member to cache it.
+RegisterID* BytecodeGenerator::addConstantEmptyValue()
+{
+    if (!m_emptyValueRegister) {
+        int index = m_nextConstantOffset;
+        m_constantPoolRegisters.append(FirstConstantRegisterIndex + m_nextConstantOffset);
+        ++m_nextConstantOffset;
+        m_codeBlock->addConstant(JSValue());
+        m_emptyValueRegister = &m_constantPoolRegisters[index];
+    }
+
+    return m_emptyValueRegister;
 }
 
 RegisterID* BytecodeGenerator::addConstantValue(JSValue v)
 {
-    int index = m_nextConstantOffset;
+    if (!v)
+        return addConstantEmptyValue();
 
-    pair<JSValueMap::iterator, bool> result = m_jsValueMap.add(JSValue::encode(v), m_nextConstantOffset);
-    if (result.second) {
+    int index = m_nextConstantOffset;
+    JSValueMap::AddResult result = m_jsValueMap.add(JSValue::encode(v), m_nextConstantOffset);
+    if (result.isNewEntry) {
         m_constantPoolRegisters.append(FirstConstantRegisterIndex + m_nextConstantOffset);
         ++m_nextConstantOffset;
-        m_codeBlock->addConstantRegister(JSValue(v));
+        m_codeBlock->addConstant(v);
     } else
-        index = result.first->second;
-
+        index = result.iterator->value;
     return &m_constantPoolRegisters[index];
 }
 
@@ -832,14 +993,23 @@ unsigned BytecodeGenerator::addRegExp(RegExp* r)
     return m_codeBlock->addRegExp(r);
 }
 
-RegisterID* BytecodeGenerator::emitMove(RegisterID* dst, RegisterID* src)
+RegisterID* BytecodeGenerator::emitMove(RegisterID* dst, CaptureMode captureMode, RegisterID* src)
 {
-    emitOpcode(op_mov);
+    m_staticPropertyAnalyzer.mov(dst->index(), src->index());
+
+    emitOpcode(captureMode == IsCaptured ? op_captured_mov : op_mov);
     instructions().append(dst->index());
     instructions().append(src->index());
+    if (captureMode == IsCaptured)
+        instructions().append(watchableVariable(dst->index()));
     return dst;
 }
 
+RegisterID* BytecodeGenerator::emitMove(RegisterID* dst, RegisterID* src)
+{
+    return emitMove(dst, captureMode(dst->index()), src);
+}
+
 RegisterID* BytecodeGenerator::emitUnaryOp(OpcodeID opcodeID, RegisterID* dst, RegisterID* src)
 {
     emitOpcode(opcodeID);
@@ -848,36 +1018,20 @@ RegisterID* BytecodeGenerator::emitUnaryOp(OpcodeID opcodeID, RegisterID* dst, R
     return dst;
 }
 
-RegisterID* BytecodeGenerator::emitPreInc(RegisterID* srcDst)
+RegisterID* BytecodeGenerator::emitInc(RegisterID* srcDst)
 {
-    emitOpcode(op_pre_inc);
+    emitOpcode(op_inc);
     instructions().append(srcDst->index());
     return srcDst;
 }
 
-RegisterID* BytecodeGenerator::emitPreDec(RegisterID* srcDst)
+RegisterID* BytecodeGenerator::emitDec(RegisterID* srcDst)
 {
-    emitOpcode(op_pre_dec);
+    emitOpcode(op_dec);
     instructions().append(srcDst->index());
     return srcDst;
 }
 
-RegisterID* BytecodeGenerator::emitPostInc(RegisterID* dst, RegisterID* srcDst)
-{
-    emitOpcode(op_post_inc);
-    instructions().append(dst->index());
-    instructions().append(srcDst->index());
-    return dst;
-}
-
-RegisterID* BytecodeGenerator::emitPostDec(RegisterID* dst, RegisterID* srcDst)
-{
-    emitOpcode(op_post_dec);
-    instructions().append(dst->index());
-    instructions().append(srcDst->index());
-    return dst;
-}
-
 RegisterID* BytecodeGenerator::emitBinaryOp(OpcodeID opcodeID, RegisterID* dst, RegisterID* src1, RegisterID* src2, OperandTypes types)
 {
     emitOpcode(opcodeID);
@@ -903,8 +1057,8 @@ RegisterID* BytecodeGenerator::emitEqualityOp(OpcodeID opcodeID, RegisterID* dst
         if (src1->index() == dstIndex
             && src1->isTemporary()
             && m_codeBlock->isConstantRegisterIndex(src2->index())
-            && m_codeBlock->constantRegister(src2->index()).jsValue().isString()) {
-            const UString& value = asString(m_codeBlock->constantRegister(src2->index()).jsValue())->tryGetValue();
+            && m_codeBlock->constantRegister(src2->index()).get().isString()) {
+            const String& value = asString(m_codeBlock->constantRegister(src2->index()).get())->tryGetValue();
             if (value == "undefined") {
                 rewindUnaryOp();
                 emitOpcode(op_is_undefined);
@@ -964,21 +1118,22 @@ RegisterID* BytecodeGenerator::emitLoad(RegisterID* dst, bool b)
 
 RegisterID* BytecodeGenerator::emitLoad(RegisterID* dst, double number)
 {
-    // FIXME: Our hash tables won't hold infinity, so we make a new JSNumberCell each time.
-    // Later we can do the extra work to handle that like the other cases.
-    if (number == HashTraits<double>::emptyValue() || HashTraits<double>::isDeletedValue(number))
-        return emitLoad(dst, jsNumber(globalData(), number));
-    JSValue& valueInMap = m_numberMap.add(number, JSValue()).first->second;
+    // FIXME: Our hash tables won't hold infinity, so we make a new JSValue each time.
+    // Later we can do the extra work to handle that like the other cases.  They also don't
+    // work correctly with NaN as a key.
+    if (std::isnan(number) || number == HashTraits<double>::emptyValue() || HashTraits<double>::isDeletedValue(number))
+        return emitLoad(dst, jsNumber(number));
+    JSValue& valueInMap = m_numberMap.add(number, JSValue()).iterator->value;
     if (!valueInMap)
-        valueInMap = jsNumber(globalData(), number);
+        valueInMap = jsNumber(number);
     return emitLoad(dst, valueInMap);
 }
 
 RegisterID* BytecodeGenerator::emitLoad(RegisterID* dst, const Identifier& identifier)
 {
-    JSString*& stringInMap = m_stringMap.add(identifier.ustring().rep(), 0).first->second;
+    JSString*& stringInMap = m_stringMap.add(identifier.impl(), nullptr).iterator->value;
     if (!stringInMap)
-        stringInMap = jsOwnedString(globalData(), identifier.ustring());
+        stringInMap = jsOwnedString(vm(), identifier.string());
     return emitLoad(dst, JSValue(stringInMap));
 }
 
@@ -990,282 +1145,225 @@ RegisterID* BytecodeGenerator::emitLoad(RegisterID* dst, JSValue v)
     return constantID;
 }
 
-bool BytecodeGenerator::findScopedProperty(const Identifier& property, int& index, size_t& stackDepth, bool forWriting, JSObject*& globalObject)
+RegisterID* BytecodeGenerator::emitLoadGlobalObject(RegisterID* dst)
 {
-    // Cases where we cannot statically optimize the lookup.
-    if (property == propertyNames().arguments || !canOptimizeNonLocals()) {
-        stackDepth = 0;
-        index = missingSymbolMarker();
-
-        if (shouldOptimizeLocals() && m_codeType == GlobalCode) {
-            ScopeChainIterator iter = m_scopeChain->begin();
-            globalObject = *iter;
-            ASSERT((++iter) == m_scopeChain->end());
-        }
-        return false;
+    if (!m_globalObjectRegister) {
+        int index = m_nextConstantOffset;
+        m_constantPoolRegisters.append(FirstConstantRegisterIndex + m_nextConstantOffset);
+        ++m_nextConstantOffset;
+        m_codeBlock->addConstant(JSValue());
+        m_globalObjectRegister = &m_constantPoolRegisters[index];
+        m_codeBlock->setGlobalObjectRegister(VirtualRegister(index));
     }
+    if (dst)
+        emitMove(dst, m_globalObjectRegister);
+    return m_globalObjectRegister;
+}
 
-    size_t depth = 0;
+bool BytecodeGenerator::isCaptured(int operand)
+{
+    return m_symbolTable && m_symbolTable->isCaptured(operand);
+}
+
+Local BytecodeGenerator::local(const Identifier& property)
+{
+    if (property == propertyNames().thisIdentifier)
+        return Local(thisRegister(), ReadOnly, NotCaptured);
     
-    ScopeChainIterator iter = m_scopeChain->begin();
-    ScopeChainIterator end = m_scopeChain->end();
-    for (; iter != end; ++iter, ++depth) {
-        JSObject* currentScope = *iter;
-        if (!currentScope->isVariableObject())
-            break;
-        JSVariableObject* currentVariableObject = static_cast<JSVariableObject*>(currentScope);
-        SymbolTableEntry entry = currentVariableObject->symbolTable().get(property.ustring().rep());
-
-        // Found the property
-        if (!entry.isNull()) {
-            if (entry.isReadOnly() && forWriting) {
-                stackDepth = 0;
-                index = missingSymbolMarker();
-                if (++iter == end)
-                    globalObject = currentVariableObject;
-                return false;
-            }
-            stackDepth = depth;
-            index = entry.getIndex();
-            if (++iter == end)
-                globalObject = currentVariableObject;
-            return true;
-        }
-        if (currentVariableObject->isDynamicScope())
-            break;
-    }
+    if (property == propertyNames().arguments)
+        createArgumentsIfNecessary();
 
-    // Can't locate the property but we're able to avoid a few lookups.
-    stackDepth = depth;
-    index = missingSymbolMarker();
-    JSObject* scope = *iter;
-    if (++iter == end)
-        globalObject = scope;
-    return true;
+    if (!shouldOptimizeLocals())
+        return Local();
+
+    SymbolTableEntry entry = symbolTable().get(property.impl());
+    if (entry.isNull())
+        return Local();
+
+    RegisterID* local = createLazyRegisterIfNecessary(&registerFor(entry.getIndex()));
+    return Local(local, entry.getAttributes(), captureMode(local->index()));
 }
 
-RegisterID* BytecodeGenerator::emitInstanceOf(RegisterID* dst, RegisterID* value, RegisterID* base, RegisterID* basePrototype)
-{ 
-    emitOpcode(op_instanceof);
+Local BytecodeGenerator::constLocal(const Identifier& property)
+{
+    if (m_codeType != FunctionCode)
+        return Local();
+
+    SymbolTableEntry entry = symbolTable().get(property.impl());
+    if (entry.isNull())
+        return Local();
+
+    RegisterID* local = createLazyRegisterIfNecessary(&registerFor(entry.getIndex()));
+    return Local(local, entry.getAttributes(), captureMode(local->index()));
+}
+
+void BytecodeGenerator::emitCheckHasInstance(RegisterID* dst, RegisterID* value, RegisterID* base, Label* target)
+{
+    size_t begin = instructions().size();
+    emitOpcode(op_check_has_instance);
     instructions().append(dst->index());
     instructions().append(value->index());
     instructions().append(base->index());
-    instructions().append(basePrototype->index());
-    return dst;
+    instructions().append(target->bind(begin, instructions().size()));
 }
 
-RegisterID* BytecodeGenerator::emitResolve(RegisterID* dst, const Identifier& property)
+// Indicates the least upper bound of resolve type based on local scope. The bytecode linker
+// will start with this ResolveType and compute the least upper bound including intercepting scopes.
+ResolveType BytecodeGenerator::resolveType()
 {
-    size_t depth = 0;
-    int index = 0;
-    JSObject* globalObject = 0;
-    if (!findScopedProperty(property, index, depth, false, globalObject) && !globalObject) {
-        // We can't optimise at all :-(
-        emitOpcode(op_resolve);
-        instructions().append(dst->index());
-        instructions().append(addConstant(property));
-        return dst;
-    }
-
-    if (globalObject) {
-        bool forceGlobalResolve = false;
-        if (m_regeneratingForExceptionInfo) {
-#if ENABLE(JIT)
-            forceGlobalResolve = m_codeBlockBeingRegeneratedFrom->hasGlobalResolveInfoAtBytecodeOffset(instructions().size());
-#else
-            forceGlobalResolve = m_codeBlockBeingRegeneratedFrom->hasGlobalResolveInstructionAtBytecodeOffset(instructions().size());
-#endif
-        }
-
-        if (index != missingSymbolMarker() && !forceGlobalResolve) {
-            // Directly index the property lookup across multiple scopes.
-            return emitGetScopedVar(dst, depth, index, globalObject);
-        }
+    if (m_localScopeDepth)
+        return Dynamic;
+    if (m_symbolTable && m_symbolTable->usesNonStrictEval())
+        return GlobalPropertyWithVarInjectionChecks;
+    return GlobalProperty;
+}
 
-#if ENABLE(JIT)
-        m_codeBlock->addGlobalResolveInfo(instructions().size());
-#else
-        m_codeBlock->addGlobalResolveInstruction(instructions().size());
-#endif
-        emitOpcode(op_resolve_global);
-        instructions().append(dst->index());
-        instructions().append(globalObject);
-        instructions().append(addConstant(property));
-        instructions().append(0);
-        instructions().append(0);
-        return dst;
-    }
+RegisterID* BytecodeGenerator::emitResolveScope(RegisterID* dst, const Identifier& identifier)
+{
+    m_codeBlock->addPropertyAccessInstruction(instructions().size());
 
-    if (index != missingSymbolMarker()) {
-        // Directly index the property lookup across multiple scopes.
-        return emitGetScopedVar(dst, depth, index, globalObject);
-    }
+    ASSERT(!m_symbolTable || !m_symbolTable->contains(identifier.impl()) || resolveType() == Dynamic);
 
-    // In this case we are at least able to drop a few scope chains from the
-    // lookup chain, although we still need to hash from then on.
-    emitOpcode(op_resolve_skip);
-    instructions().append(dst->index());
-    instructions().append(addConstant(property));
-    instructions().append(depth);
+    // resolve_scope dst, id, ResolveType, depth
+    emitOpcode(op_resolve_scope);
+    instructions().append(kill(dst));
+    instructions().append(addConstant(identifier));
+    instructions().append(resolveType());
+    instructions().append(0);
+    instructions().append(0);
     return dst;
 }
 
-RegisterID* BytecodeGenerator::emitGetScopedVar(RegisterID* dst, size_t depth, int index, JSValue globalObject)
+RegisterID* BytecodeGenerator::emitGetFromScope(RegisterID* dst, RegisterID* scope, const Identifier& identifier, ResolveMode resolveMode)
 {
-    if (globalObject) {
-        emitOpcode(op_get_global_var);
-        instructions().append(dst->index());
-        instructions().append(asCell(globalObject));
-        instructions().append(index);
-        return dst;
-    }
+    m_codeBlock->addPropertyAccessInstruction(instructions().size());
 
-    emitOpcode(op_get_scoped_var);
-    instructions().append(dst->index());
-    instructions().append(index);
-    instructions().append(depth);
+    // get_from_scope dst, scope, id, ResolveModeAndType, Structure, Operand
+    UnlinkedValueProfile profile = emitProfiledOpcode(op_get_from_scope);
+    instructions().append(kill(dst));
+    instructions().append(scope->index());
+    instructions().append(addConstant(identifier));
+    instructions().append(ResolveModeAndType(resolveMode, resolveType()).operand());
+    instructions().append(0);
+    instructions().append(0);
+    instructions().append(profile);
     return dst;
 }
 
-RegisterID* BytecodeGenerator::emitPutScopedVar(size_t depth, int index, RegisterID* value, JSValue globalObject)
+RegisterID* BytecodeGenerator::emitPutToScope(RegisterID* scope, const Identifier& identifier, RegisterID* value, ResolveMode resolveMode)
 {
-    if (globalObject) {
-        emitOpcode(op_put_global_var);
-        instructions().append(asCell(globalObject));
-        instructions().append(index);
-        instructions().append(value->index());
-        return value;
-    }
-    emitOpcode(op_put_scoped_var);
-    instructions().append(index);
-    instructions().append(depth);
+    m_codeBlock->addPropertyAccessInstruction(instructions().size());
+
+    // put_to_scope scope, id, value, ResolveModeAndType, Structure, Operand
+    emitOpcode(op_put_to_scope);
+    instructions().append(scope->index());
+    instructions().append(addConstant(identifier));
     instructions().append(value->index());
+    instructions().append(ResolveModeAndType(resolveMode, resolveType()).operand());
+    instructions().append(0);
+    instructions().append(0);
     return value;
 }
 
-RegisterID* BytecodeGenerator::emitResolveBase(RegisterID* dst, const Identifier& property)
-{
-    size_t depth = 0;
-    int index = 0;
-    JSObject* globalObject = 0;
-    findScopedProperty(property, index, depth, false, globalObject);
-    if (!globalObject) {
-        // We can't optimise at all :-(
-        emitOpcode(op_resolve_base);
-        instructions().append(dst->index());
-        instructions().append(addConstant(property));
-        return dst;
-    }
-
-    // Global object is the base
-    return emitLoad(dst, JSValue(globalObject));
+RegisterID* BytecodeGenerator::emitInstanceOf(RegisterID* dst, RegisterID* value, RegisterID* basePrototype)
+{ 
+    emitOpcode(op_instanceof);
+    instructions().append(dst->index());
+    instructions().append(value->index());
+    instructions().append(basePrototype->index());
+    return dst;
 }
 
-RegisterID* BytecodeGenerator::emitResolveWithBase(RegisterID* baseDst, RegisterID* propDst, const Identifier& property)
+RegisterID* BytecodeGenerator::emitInitGlobalConst(const Identifier& identifier, RegisterID* value)
 {
-    size_t depth = 0;
-    int index = 0;
-    JSObject* globalObject = 0;
-    if (!findScopedProperty(property, index, depth, false, globalObject) || !globalObject) {
-        // We can't optimise at all :-(
-        emitOpcode(op_resolve_with_base);
-        instructions().append(baseDst->index());
-        instructions().append(propDst->index());
-        instructions().append(addConstant(property));
-        return baseDst;
-    }
-
-    bool forceGlobalResolve = false;
-    if (m_regeneratingForExceptionInfo) {
-#if ENABLE(JIT)
-        forceGlobalResolve = m_codeBlockBeingRegeneratedFrom->hasGlobalResolveInfoAtBytecodeOffset(instructions().size());
-#else
-        forceGlobalResolve = m_codeBlockBeingRegeneratedFrom->hasGlobalResolveInstructionAtBytecodeOffset(instructions().size());
-#endif
-    }
-
-    // Global object is the base
-    emitLoad(baseDst, JSValue(globalObject));
+    ASSERT(m_codeType == GlobalCode);
+    emitOpcode(op_init_global_const_nop);
+    instructions().append(0);
+    instructions().append(value->index());
+    instructions().append(0);
+    instructions().append(addConstant(identifier));
+    return value;
+}
 
-    if (index != missingSymbolMarker() && !forceGlobalResolve) {
-        // Directly index the property lookup across multiple scopes.
-        emitGetScopedVar(propDst, depth, index, globalObject);
-        return baseDst;
-    }
+RegisterID* BytecodeGenerator::emitGetById(RegisterID* dst, RegisterID* base, const Identifier& property)
+{
+    m_codeBlock->addPropertyAccessInstruction(instructions().size());
 
-#if ENABLE(JIT)
-    m_codeBlock->addGlobalResolveInfo(instructions().size());
-#else
-    m_codeBlock->addGlobalResolveInstruction(instructions().size());
-#endif
-    emitOpcode(op_resolve_global);
-    instructions().append(propDst->index());
-    instructions().append(globalObject);
+    UnlinkedValueProfile profile = emitProfiledOpcode(op_get_by_id);
+    instructions().append(kill(dst));
+    instructions().append(base->index());
     instructions().append(addConstant(property));
     instructions().append(0);
     instructions().append(0);
-    return baseDst;
+    instructions().append(0);
+    instructions().append(0);
+    instructions().append(profile);
+    return dst;
 }
 
-void BytecodeGenerator::emitMethodCheck()
+RegisterID* BytecodeGenerator::emitGetArgumentsLength(RegisterID* dst, RegisterID* base)
 {
-    emitOpcode(op_method_check);
+    emitOpcode(op_get_arguments_length);
+    instructions().append(dst->index());
+    ASSERT(base->virtualRegister() == m_codeBlock->argumentsRegister());
+    instructions().append(base->index());
+    instructions().append(addConstant(propertyNames().length));
+    return dst;
 }
 
-RegisterID* BytecodeGenerator::emitGetById(RegisterID* dst, RegisterID* base, const Identifier& property)
+RegisterID* BytecodeGenerator::emitPutById(RegisterID* base, const Identifier& property, RegisterID* value)
 {
-#if ENABLE(JIT)
-    m_codeBlock->addStructureStubInfo(StructureStubInfo(access_get_by_id));
-#else
+    unsigned propertyIndex = addConstant(property);
+
+    m_staticPropertyAnalyzer.putById(base->index(), propertyIndex);
+
     m_codeBlock->addPropertyAccessInstruction(instructions().size());
-#endif
 
-    emitOpcode(op_get_by_id);
-    instructions().append(dst->index());
+    emitOpcode(op_put_by_id);
     instructions().append(base->index());
-    instructions().append(addConstant(property));
+    instructions().append(propertyIndex);
+    instructions().append(value->index());
     instructions().append(0);
     instructions().append(0);
     instructions().append(0);
     instructions().append(0);
-    return dst;
+    instructions().append(0);
+    return value;
 }
 
-RegisterID* BytecodeGenerator::emitPutById(RegisterID* base, const Identifier& property, RegisterID* value)
+RegisterID* BytecodeGenerator::emitDirectPutById(RegisterID* base, const Identifier& property, RegisterID* value)
 {
-#if ENABLE(JIT)
-    m_codeBlock->addStructureStubInfo(StructureStubInfo(access_put_by_id));
-#else
-    m_codeBlock->addPropertyAccessInstruction(instructions().size());
-#endif
+    unsigned propertyIndex = addConstant(property);
 
+    m_staticPropertyAnalyzer.putById(base->index(), propertyIndex);
+
+    m_codeBlock->addPropertyAccessInstruction(instructions().size());
+    
     emitOpcode(op_put_by_id);
     instructions().append(base->index());
-    instructions().append(addConstant(property));
+    instructions().append(propertyIndex);
     instructions().append(value->index());
     instructions().append(0);
     instructions().append(0);
     instructions().append(0);
     instructions().append(0);
+    instructions().append(
+        property != m_vm->propertyNames->underscoreProto
+        && PropertyName(property).asIndex() == PropertyName::NotAnIndex);
     return value;
 }
 
-RegisterID* BytecodeGenerator::emitPutGetter(RegisterID* base, const Identifier& property, RegisterID* value)
+void BytecodeGenerator::emitPutGetterSetter(RegisterID* base, const Identifier& property, RegisterID* getter, RegisterID* setter)
 {
-    emitOpcode(op_put_getter);
-    instructions().append(base->index());
-    instructions().append(addConstant(property));
-    instructions().append(value->index());
-    return value;
-}
+    unsigned propertyIndex = addConstant(property);
 
-RegisterID* BytecodeGenerator::emitPutSetter(RegisterID* base, const Identifier& property, RegisterID* value)
-{
-    emitOpcode(op_put_setter);
+    m_staticPropertyAnalyzer.putById(base->index(), propertyIndex);
+
+    emitOpcode(op_put_getter_setter);
     instructions().append(base->index());
-    instructions().append(addConstant(property));
-    instructions().append(value->index());
-    return value;
+    instructions().append(propertyIndex);
+    instructions().append(getter->index());
+    instructions().append(setter->index());
 }
 
 RegisterID* BytecodeGenerator::emitDeleteById(RegisterID* dst, RegisterID* base, const Identifier& property)
@@ -1277,6 +1375,19 @@ RegisterID* BytecodeGenerator::emitDeleteById(RegisterID* dst, RegisterID* base,
     return dst;
 }
 
+RegisterID* BytecodeGenerator::emitGetArgumentByVal(RegisterID* dst, RegisterID* base, RegisterID* property)
+{
+    UnlinkedArrayProfile arrayProfile = newArrayProfile();
+    UnlinkedValueProfile profile = emitProfiledOpcode(op_get_argument_by_val);
+    instructions().append(kill(dst));
+    ASSERT(base->virtualRegister() == m_codeBlock->argumentsRegister());
+    instructions().append(base->index());
+    instructions().append(property->index());
+    instructions().append(arrayProfile);
+    instructions().append(profile);
+    return dst;
+}
+
 RegisterID* BytecodeGenerator::emitGetByVal(RegisterID* dst, RegisterID* base, RegisterID* property)
 {
     for (size_t i = m_forInContextStack.size(); i > 0; i--) {
@@ -1292,19 +1403,38 @@ RegisterID* BytecodeGenerator::emitGetByVal(RegisterID* dst, RegisterID* base, R
             return dst;
         }
     }
-    emitOpcode(op_get_by_val);
-    instructions().append(dst->index());
+    UnlinkedArrayProfile arrayProfile = newArrayProfile();
+    UnlinkedValueProfile profile = emitProfiledOpcode(op_get_by_val);
+    instructions().append(kill(dst));
     instructions().append(base->index());
     instructions().append(property->index());
+    instructions().append(arrayProfile);
+    instructions().append(profile);
     return dst;
 }
 
 RegisterID* BytecodeGenerator::emitPutByVal(RegisterID* base, RegisterID* property, RegisterID* value)
 {
-    emitOpcode(op_put_by_val);
+    UnlinkedArrayProfile arrayProfile = newArrayProfile();
+    if (m_isBuiltinFunction)
+        emitOpcode(op_put_by_val_direct);
+    else
+        emitOpcode(op_put_by_val);
+    instructions().append(base->index());
+    instructions().append(property->index());
+    instructions().append(value->index());
+    instructions().append(arrayProfile);
+    return value;
+}
+
+RegisterID* BytecodeGenerator::emitDirectPutByVal(RegisterID* base, RegisterID* property, RegisterID* value)
+{
+    UnlinkedArrayProfile arrayProfile = newArrayProfile();
+    emitOpcode(op_put_by_val_direct);
     instructions().append(base->index());
     instructions().append(property->index());
     instructions().append(value->index());
+    instructions().append(arrayProfile);
     return value;
 }
 
@@ -1326,38 +1456,132 @@ RegisterID* BytecodeGenerator::emitPutByIndex(RegisterID* base, unsigned index,
     return value;
 }
 
+RegisterID* BytecodeGenerator::emitCreateThis(RegisterID* dst)
+{
+    RefPtr<RegisterID> func = newTemporary(); 
+
+    m_codeBlock->addPropertyAccessInstruction(instructions().size());
+    emitOpcode(op_get_callee);
+    instructions().append(func->index());
+    instructions().append(0);
+
+    size_t begin = instructions().size();
+    m_staticPropertyAnalyzer.createThis(m_thisRegister.index(), begin + 3);
+
+    emitOpcode(op_create_this); 
+    instructions().append(m_thisRegister.index()); 
+    instructions().append(func->index()); 
+    instructions().append(0);
+    return dst;
+}
+
 RegisterID* BytecodeGenerator::emitNewObject(RegisterID* dst)
 {
+    size_t begin = instructions().size();
+    m_staticPropertyAnalyzer.newObject(dst->index(), begin + 2);
+
     emitOpcode(op_new_object);
     instructions().append(dst->index());
+    instructions().append(0);
+    instructions().append(newObjectAllocationProfile());
     return dst;
 }
 
-RegisterID* BytecodeGenerator::emitNewArray(RegisterID* dst, ElementNode* elements)
+unsigned BytecodeGenerator::addConstantBuffer(unsigned length)
+{
+    return m_codeBlock->addConstantBuffer(length);
+}
+
+JSString* BytecodeGenerator::addStringConstant(const Identifier& identifier)
+{
+    JSString*& stringInMap = m_stringMap.add(identifier.impl(), nullptr).iterator->value;
+    if (!stringInMap) {
+        stringInMap = jsString(vm(), identifier.string());
+        addConstantValue(stringInMap);
+    }
+    return stringInMap;
+}
+
+RegisterID* BytecodeGenerator::emitNewArray(RegisterID* dst, ElementNode* elements, unsigned length)
 {
-    Vector<RefPtr<RegisterID>, 16> argv;
+#if !ASSERT_DISABLED
+    unsigned checkLength = 0;
+#endif
+    bool hadVariableExpression = false;
+    if (length) {
+        for (ElementNode* n = elements; n; n = n->next()) {
+            if (!n->value()->isConstant()) {
+                hadVariableExpression = true;
+                break;
+            }
+            if (n->elision())
+                break;
+#if !ASSERT_DISABLED
+            checkLength++;
+#endif
+        }
+        if (!hadVariableExpression) {
+            ASSERT(length == checkLength);
+            unsigned constantBufferIndex = addConstantBuffer(length);
+            JSValue* constantBuffer = m_codeBlock->constantBuffer(constantBufferIndex).data();
+            unsigned index = 0;
+            for (ElementNode* n = elements; index < length; n = n->next()) {
+                ASSERT(n->value()->isConstant());
+                constantBuffer[index++] = static_cast<ConstantNode*>(n->value())->jsValue(*this);
+            }
+            emitOpcode(op_new_array_buffer);
+            instructions().append(dst->index());
+            instructions().append(constantBufferIndex);
+            instructions().append(length);
+            instructions().append(newArrayAllocationProfile());
+            return dst;
+        }
+    }
+
+    Vector<RefPtr<RegisterID>, 16, UnsafeVectorOverflow> argv;
     for (ElementNode* n = elements; n; n = n->next()) {
-        if (n->elision())
+        if (!length)
             break;
+        length--;
+        ASSERT(!n->value()->isSpreadExpression());
         argv.append(newTemporary());
         // op_new_array requires the initial values to be a sequential range of registers
-        ASSERT(argv.size() == 1 || argv[argv.size() - 1]->index() == argv[argv.size() - 2]->index() + 1);
+        ASSERT(argv.size() == 1 || argv[argv.size() - 1]->index() == argv[argv.size() - 2]->index() - 1);
         emitNode(argv.last().get(), n->value());
     }
+    ASSERT(!length);
     emitOpcode(op_new_array);
     instructions().append(dst->index());
     instructions().append(argv.size() ? argv[0]->index() : 0); // argv
     instructions().append(argv.size()); // argc
+    instructions().append(newArrayAllocationProfile());
     return dst;
 }
 
-RegisterID* BytecodeGenerator::emitNewFunction(RegisterID* dst, FunctionBodyNode* function)
+RegisterID* BytecodeGenerator::emitNewFunction(RegisterID* dst, CaptureMode captureMode, FunctionBodyNode* function)
+{
+    return emitNewFunctionInternal(dst, captureMode, m_codeBlock->addFunctionDecl(makeFunction(function)), false);
+}
+
+RegisterID* BytecodeGenerator::emitLazyNewFunction(RegisterID* dst, FunctionBodyNode* function)
 {
-    unsigned index = m_codeBlock->addFunctionDecl(makeFunction(m_globalData, function));
+    FunctionOffsetMap::AddResult ptr = m_functionOffsets.add(function, 0);
+    if (ptr.isNewEntry)
+        ptr.iterator->value = m_codeBlock->addFunctionDecl(makeFunction(function));
+    return emitNewFunctionInternal(dst, NotCaptured, ptr.iterator->value, true);
+}
 
-    emitOpcode(op_new_func);
+RegisterID* BytecodeGenerator::emitNewFunctionInternal(RegisterID* dst, CaptureMode captureMode, unsigned index, bool doNullCheck)
+{
+    createActivationIfNecessary();
+    emitOpcode(captureMode == IsCaptured ? op_new_captured_func : op_new_func);
     instructions().append(dst->index());
     instructions().append(index);
+    if (captureMode == IsCaptured) {
+        ASSERT(!doNullCheck);
+        instructions().append(watchableVariable(dst->index()));
+    } else
+        instructions().append(doNullCheck);
     return dst;
 }
 
@@ -1369,151 +1593,256 @@ RegisterID* BytecodeGenerator::emitNewRegExp(RegisterID* dst, RegExp* regExp)
     return dst;
 }
 
-
 RegisterID* BytecodeGenerator::emitNewFunctionExpression(RegisterID* r0, FuncExprNode* n)
 {
     FunctionBodyNode* function = n->body();
-    unsigned index = m_codeBlock->addFunctionExpr(makeFunction(m_globalData, function));
-
+    unsigned index = m_codeBlock->addFunctionExpr(makeFunction(function));
+    
+    createActivationIfNecessary();
     emitOpcode(op_new_func_exp);
     instructions().append(r0->index());
     instructions().append(index);
     return r0;
 }
 
-RegisterID* BytecodeGenerator::emitCall(RegisterID* dst, RegisterID* func, RegisterID* thisRegister, ArgumentsNode* argumentsNode, unsigned divot, unsigned startOffset, unsigned endOffset)
+RegisterID* BytecodeGenerator::emitCall(RegisterID* dst, RegisterID* func, ExpectedFunction expectedFunction, CallArguments& callArguments, const JSTextPosition& divot, const JSTextPosition& divotStart, const JSTextPosition& divotEnd)
 {
-    return emitCall(op_call, dst, func, thisRegister, argumentsNode, divot, startOffset, endOffset);
+    return emitCall(op_call, dst, func, expectedFunction, callArguments, divot, divotStart, divotEnd);
 }
 
 void BytecodeGenerator::createArgumentsIfNecessary()
 {
-    if (m_codeBlock->usesArguments() && m_codeType == FunctionCode)
-        emitOpcode(op_create_arguments);
+    if (m_codeType != FunctionCode)
+        return;
+    
+    if (!m_codeBlock->usesArguments())
+        return;
+
+    if (shouldTearOffArgumentsEagerly())
+        return;
+
+    emitOpcode(op_create_arguments);
+    instructions().append(m_codeBlock->argumentsRegister().offset());
+    ASSERT(!hasWatchableVariable(m_codeBlock->argumentsRegister().offset()));
 }
 
-RegisterID* BytecodeGenerator::emitCallEval(RegisterID* dst, RegisterID* func, RegisterID* thisRegister, ArgumentsNode* argumentsNode, unsigned divot, unsigned startOffset, unsigned endOffset)
+void BytecodeGenerator::createActivationIfNecessary()
 {
-    createArgumentsIfNecessary();
-    return emitCall(op_call_eval, dst, func, thisRegister, argumentsNode, divot, startOffset, endOffset);
+    if (!m_activationRegister)
+        return;
+    emitOpcode(op_create_activation);
+    instructions().append(m_activationRegister->index());
 }
 
-RegisterID* BytecodeGenerator::emitCall(OpcodeID opcodeID, RegisterID* dst, RegisterID* func, RegisterID* thisRegister, ArgumentsNode* argumentsNode, unsigned divot, unsigned startOffset, unsigned endOffset)
+RegisterID* BytecodeGenerator::emitCallEval(RegisterID* dst, RegisterID* func, CallArguments& callArguments, const JSTextPosition& divot, const JSTextPosition& divotStart, const JSTextPosition& divotEnd)
 {
-    ASSERT(opcodeID == op_call || opcodeID == op_call_eval);
-    ASSERT(func->refCount());
-    ASSERT(thisRegister->refCount());
+    createActivationIfNecessary();
+    return emitCall(op_call_eval, dst, func, NoExpectedFunction, callArguments, divot, divotStart, divotEnd);
+}
 
-    RegisterID* originalFunc = func;
-    if (m_shouldEmitProfileHooks) {
-        // If codegen decided to recycle func as this call's destination register,
-        // we need to undo that optimization here so that func will still be around
-        // for the sake of op_profile_did_call.
-        if (dst == func) {
-            RefPtr<RegisterID> movedThisRegister = emitMove(newTemporary(), thisRegister);
-            RefPtr<RegisterID> movedFunc = emitMove(thisRegister, func);
-            
-            thisRegister = movedThisRegister.release().releaseRef();
-            func = movedFunc.release().releaseRef();
+ExpectedFunction BytecodeGenerator::expectedFunctionForIdentifier(const Identifier& identifier)
+{
+    if (identifier == m_vm->propertyNames->Object)
+        return ExpectObjectConstructor;
+    if (identifier == m_vm->propertyNames->Array)
+        return ExpectArrayConstructor;
+    return NoExpectedFunction;
+}
+
+ExpectedFunction BytecodeGenerator::emitExpectedFunctionSnippet(RegisterID* dst, RegisterID* func, ExpectedFunction expectedFunction, CallArguments& callArguments, Label* done)
+{
+    RefPtr<Label> realCall = newLabel();
+    switch (expectedFunction) {
+    case ExpectObjectConstructor: {
+        // If the number of arguments is non-zero, then we can't do anything interesting.
+        if (callArguments.argumentCountIncludingThis() >= 2)
+            return NoExpectedFunction;
+        
+        size_t begin = instructions().size();
+        emitOpcode(op_jneq_ptr);
+        instructions().append(func->index());
+        instructions().append(Special::ObjectConstructor);
+        instructions().append(realCall->bind(begin, instructions().size()));
+        
+        if (dst != ignoredResult())
+            emitNewObject(dst);
+        break;
+    }
+        
+    case ExpectArrayConstructor: {
+        // If you're doing anything other than "new Array()" or "new Array(foo)" then we
+        // don't do inline it, for now. The only reason is that call arguments are in
+        // the opposite order of what op_new_array expects, so we'd either need to change
+        // how op_new_array works or we'd need an op_new_array_reverse. Neither of these
+        // things sounds like it's worth it.
+        if (callArguments.argumentCountIncludingThis() > 2)
+            return NoExpectedFunction;
+        
+        size_t begin = instructions().size();
+        emitOpcode(op_jneq_ptr);
+        instructions().append(func->index());
+        instructions().append(Special::ArrayConstructor);
+        instructions().append(realCall->bind(begin, instructions().size()));
+        
+        if (dst != ignoredResult()) {
+            if (callArguments.argumentCountIncludingThis() == 2) {
+                emitOpcode(op_new_array_with_size);
+                instructions().append(dst->index());
+                instructions().append(callArguments.argumentRegister(0)->index());
+                instructions().append(newArrayAllocationProfile());
+            } else {
+                ASSERT(callArguments.argumentCountIncludingThis() == 1);
+                emitOpcode(op_new_array);
+                instructions().append(dst->index());
+                instructions().append(0);
+                instructions().append(0);
+                instructions().append(newArrayAllocationProfile());
+            }
         }
+        break;
+    }
+        
+    default:
+        ASSERT(expectedFunction == NoExpectedFunction);
+        return NoExpectedFunction;
     }
+    
+    size_t begin = instructions().size();
+    emitOpcode(op_jmp);
+    instructions().append(done->bind(begin, instructions().size()));
+    emitLabel(realCall.get());
+    
+    return expectedFunction;
+}
+
+RegisterID* BytecodeGenerator::emitCall(OpcodeID opcodeID, RegisterID* dst, RegisterID* func, ExpectedFunction expectedFunction, CallArguments& callArguments, const JSTextPosition& divot, const JSTextPosition& divotStart, const JSTextPosition& divotEnd)
+{
+    ASSERT(opcodeID == op_call || opcodeID == op_call_eval);
+    ASSERT(func->refCount());
+
+    if (m_shouldEmitProfileHooks)
+        emitMove(callArguments.profileHookRegister(), func);
 
     // Generate code for arguments.
-    Vector<RefPtr<RegisterID>, 16> argv;
-    argv.append(thisRegister);
-    for (ArgumentListNode* n = argumentsNode->m_listNode; n; n = n->m_next) {
-        argv.append(newTemporary());
-        // op_call requires the arguments to be a sequential range of registers
-        ASSERT(argv[argv.size() - 1]->index() == argv[argv.size() - 2]->index() + 1);
-        emitNode(argv.last().get(), n);
+    unsigned argument = 0;
+    if (callArguments.argumentsNode()) {
+        ArgumentListNode* n = callArguments.argumentsNode()->m_listNode;
+        if (n && n->m_expr->isSpreadExpression()) {
+            RELEASE_ASSERT(!n->m_next);
+            auto expression = static_cast<SpreadExpressionNode*>(n->m_expr)->expression();
+            RefPtr<RegisterID> argumentRegister;
+            if (expression->isResolveNode() && willResolveToArguments(static_cast<ResolveNode*>(expression)->identifier()) && !symbolTable().slowArguments())
+                argumentRegister = uncheckedRegisterForArguments();
+            else
+                argumentRegister = expression->emitBytecode(*this, callArguments.argumentRegister(0));
+            RefPtr<RegisterID> thisRegister = emitMove(newTemporary(), callArguments.thisRegister());
+            return emitCallVarargs(dst, func, callArguments.thisRegister(), argumentRegister.get(), newTemporary(), 0, callArguments.profileHookRegister(), divot, divotStart, divotEnd);
+        }
+        for (; n; n = n->m_next)
+            emitNode(callArguments.argumentRegister(argument++), n);
     }
-
+    
     // Reserve space for call frame.
-    Vector<RefPtr<RegisterID>, RegisterFile::CallFrameHeaderSize> callFrame;
-    for (int i = 0; i < RegisterFile::CallFrameHeaderSize; ++i)
+    Vector<RefPtr<RegisterID>, JSStack::CallFrameHeaderSize, UnsafeVectorOverflow> callFrame;
+    for (int i = 0; i < JSStack::CallFrameHeaderSize; ++i)
         callFrame.append(newTemporary());
 
     if (m_shouldEmitProfileHooks) {
         emitOpcode(op_profile_will_call);
-        instructions().append(func->index());
-
-#if ENABLE(JIT)
-        m_codeBlock->addFunctionRegisterInfo(instructions().size(), func->index());
-#endif
+        instructions().append(callArguments.profileHookRegister()->index());
     }
 
-    emitExpressionInfo(divot, startOffset, endOffset);
-
-#if ENABLE(JIT)
-    m_codeBlock->addCallLinkInfo();
-#endif
+    emitExpressionInfo(divot, divotStart, divotEnd);
 
+    RefPtr<Label> done = newLabel();
+    expectedFunction = emitExpectedFunctionSnippet(dst, func, expectedFunction, callArguments, done.get());
+    
     // Emit call.
-    emitOpcode(opcodeID);
-    instructions().append(dst->index()); // dst
-    instructions().append(func->index()); // func
-    instructions().append(argv.size()); // argCount
-    instructions().append(argv[0]->index() + argv.size() + RegisterFile::CallFrameHeaderSize); // registerOffset
+    UnlinkedArrayProfile arrayProfile = newArrayProfile();
+    UnlinkedValueProfile profile = emitProfiledOpcode(opcodeID);
+    ASSERT(dst);
+    ASSERT(dst != ignoredResult());
+    instructions().append(dst->index());
+    instructions().append(func->index());
+    instructions().append(callArguments.argumentCountIncludingThis());
+    instructions().append(callArguments.stackOffset());
+    instructions().append(m_codeBlock->addLLIntCallLinkInfo());
+    instructions().append(0);
+    instructions().append(arrayProfile);
+    instructions().append(profile);
+    
+    if (expectedFunction != NoExpectedFunction)
+        emitLabel(done.get());
 
     if (m_shouldEmitProfileHooks) {
         emitOpcode(op_profile_did_call);
-        instructions().append(func->index());
-
-        if (dst == originalFunc) {
-            thisRegister->deref();
-            func->deref();
-        }
+        instructions().append(callArguments.profileHookRegister()->index());
     }
 
     return dst;
 }
 
-RegisterID* BytecodeGenerator::emitLoadVarargs(RegisterID* argCountDst, RegisterID* arguments)
+RegisterID* BytecodeGenerator::emitCallVarargs(RegisterID* dst, RegisterID* func, RegisterID* thisRegister, RegisterID* arguments, RegisterID* firstFreeRegister, int32_t firstVarArgOffset, RegisterID* profileHookRegister, const JSTextPosition& divot, const JSTextPosition& divotStart, const JSTextPosition& divotEnd)
 {
-    ASSERT(argCountDst->index() < arguments->index());
-    emitOpcode(op_load_varargs);
-    instructions().append(argCountDst->index());
-    instructions().append(arguments->index());
-    return argCountDst;
+    return emitCallVarargs(op_call_varargs, dst, func, thisRegister, arguments, firstFreeRegister, firstVarArgOffset, profileHookRegister, divot, divotStart, divotEnd);
 }
 
-RegisterID* BytecodeGenerator::emitCallVarargs(RegisterID* dst, RegisterID* func, RegisterID* thisRegister, RegisterID* argCountRegister, unsigned divot, unsigned startOffset, unsigned endOffset)
+RegisterID* BytecodeGenerator::emitConstructVarargs(RegisterID* dst, RegisterID* func, RegisterID* arguments, RegisterID* firstFreeRegister, int32_t firstVarArgOffset, RegisterID* profileHookRegister, const JSTextPosition& divot, const JSTextPosition& divotStart, const JSTextPosition& divotEnd)
+{
+    return emitCallVarargs(op_construct_varargs, dst, func, 0, arguments, firstFreeRegister, firstVarArgOffset, profileHookRegister, divot, divotStart, divotEnd);
+}
+    
+RegisterID* BytecodeGenerator::emitCallVarargs(OpcodeID opcode, RegisterID* dst, RegisterID* func, RegisterID* thisRegister, RegisterID* arguments, RegisterID* firstFreeRegister, int32_t firstVarArgOffset, RegisterID* profileHookRegister, const JSTextPosition& divot, const JSTextPosition& divotStart, const JSTextPosition& divotEnd)
 {
-    ASSERT(func->refCount());
-    ASSERT(thisRegister->refCount());
-    ASSERT(dst != func);
     if (m_shouldEmitProfileHooks) {
+        emitMove(profileHookRegister, func);
         emitOpcode(op_profile_will_call);
-        instructions().append(func->index());
-        
-#if ENABLE(JIT)
-        m_codeBlock->addFunctionRegisterInfo(instructions().size(), func->index());
-#endif
+        instructions().append(profileHookRegister->index());
     }
     
-    emitExpressionInfo(divot, startOffset, endOffset);
-    
+    emitExpressionInfo(divot, divotStart, divotEnd);
+
     // Emit call.
-    emitOpcode(op_call_varargs);
-    instructions().append(dst->index()); // dst
-    instructions().append(func->index()); // func
-    instructions().append(argCountRegister->index()); // arg count
-    instructions().append(thisRegister->index() + RegisterFile::CallFrameHeaderSize); // initial registerOffset
+    UnlinkedArrayProfile arrayProfile = newArrayProfile();
+    UnlinkedValueProfile profile = emitProfiledOpcode(opcode);
+    ASSERT(dst != ignoredResult());
+    instructions().append(dst->index());
+    instructions().append(func->index());
+    instructions().append(thisRegister ? thisRegister->index() : 0);
+    instructions().append(arguments->index());
+    instructions().append(firstFreeRegister->index());
+    instructions().append(firstVarArgOffset);
+    instructions().append(arrayProfile);
+    instructions().append(profile);
     if (m_shouldEmitProfileHooks) {
         emitOpcode(op_profile_did_call);
-        instructions().append(func->index());
+        instructions().append(profileHookRegister->index());
     }
     return dst;
 }
 
 RegisterID* BytecodeGenerator::emitReturn(RegisterID* src)
 {
-    if (m_codeBlock->needsFullScopeChain()) {
+    if (m_activationRegister) {
         emitOpcode(op_tear_off_activation);
-        instructions().append(m_activationRegisterIndex);
-    } else if (m_codeBlock->usesArguments() && m_codeBlock->m_numParameters > 1)
+        instructions().append(m_activationRegister->index());
+    }
+
+    if (m_codeBlock->usesArguments() && m_codeBlock->numParameters() != 1 && !isStrictMode()) {
         emitOpcode(op_tear_off_arguments);
+        instructions().append(m_codeBlock->argumentsRegister().offset());
+        instructions().append(m_activationRegister ? m_activationRegister->index() : emitLoad(0, JSValue())->index());
+    }
 
+    // Constructors use op_ret_object_or_this to check the result is an
+    // object, unless we can trivially determine the check is not
+    // necessary (currently, if the return value is 'this').
+    if (isConstructor() && (src->index() != m_thisRegister.index())) {
+        emitOpcode(op_ret_object_or_this);
+        instructions().append(src->index());
+        instructions().append(m_thisRegister.index());
+        return src;
+    }
     return emitUnaryNoDstOp(op_ret, src);
 }
 
@@ -1524,72 +1853,65 @@ RegisterID* BytecodeGenerator::emitUnaryNoDstOp(OpcodeID opcodeID, RegisterID* s
     return src;
 }
 
-RegisterID* BytecodeGenerator::emitConstruct(RegisterID* dst, RegisterID* func, ArgumentsNode* argumentsNode, unsigned divot, unsigned startOffset, unsigned endOffset)
+RegisterID* BytecodeGenerator::emitConstruct(RegisterID* dst, RegisterID* func, ExpectedFunction expectedFunction, CallArguments& callArguments, const JSTextPosition& divot, const JSTextPosition& divotStart, const JSTextPosition& divotEnd)
 {
     ASSERT(func->refCount());
 
-    RegisterID* originalFunc = func;
-    if (m_shouldEmitProfileHooks) {
-        // If codegen decided to recycle func as this call's destination register,
-        // we need to undo that optimization here so that func will still be around
-        // for the sake of op_profile_did_call.
-        if (dst == func) {
-            RefPtr<RegisterID> movedFunc = emitMove(newTemporary(), func);
-            func = movedFunc.release().releaseRef();
-        }
-    }
-
-    RefPtr<RegisterID> funcProto = newTemporary();
+    if (m_shouldEmitProfileHooks)
+        emitMove(callArguments.profileHookRegister(), func);
 
     // Generate code for arguments.
-    Vector<RefPtr<RegisterID>, 16> argv;
-    argv.append(newTemporary()); // reserve space for "this"
-    for (ArgumentListNode* n = argumentsNode ? argumentsNode->m_listNode : 0; n; n = n->m_next) {
-        argv.append(newTemporary());
-        // op_construct requires the arguments to be a sequential range of registers
-        ASSERT(argv[argv.size() - 1]->index() == argv[argv.size() - 2]->index() + 1);
-        emitNode(argv.last().get(), n);
+    unsigned argument = 0;
+    if (ArgumentsNode* argumentsNode = callArguments.argumentsNode()) {
+        
+        ArgumentListNode* n = callArguments.argumentsNode()->m_listNode;
+        if (n && n->m_expr->isSpreadExpression()) {
+            RELEASE_ASSERT(!n->m_next);
+            auto expression = static_cast<SpreadExpressionNode*>(n->m_expr)->expression();
+            RefPtr<RegisterID> argumentRegister;
+            if (expression->isResolveNode() && willResolveToArguments(static_cast<ResolveNode*>(expression)->identifier()) && !symbolTable().slowArguments())
+                argumentRegister = uncheckedRegisterForArguments();
+            else
+                argumentRegister = expression->emitBytecode(*this, callArguments.argumentRegister(0));
+            return emitConstructVarargs(dst, func, argumentRegister.get(), newTemporary(), 0, callArguments.profileHookRegister(), divot, divotStart, divotEnd);
+        }
+        
+        for (ArgumentListNode* n = argumentsNode->m_listNode; n; n = n->m_next)
+            emitNode(callArguments.argumentRegister(argument++), n);
     }
 
     if (m_shouldEmitProfileHooks) {
         emitOpcode(op_profile_will_call);
-        instructions().append(func->index());
+        instructions().append(callArguments.profileHookRegister()->index());
     }
 
-    // Load prototype.
-    emitExpressionInfo(divot, startOffset, endOffset);
-    emitGetByIdExceptionInfo(op_construct);
-    emitGetById(funcProto.get(), func, globalData()->propertyNames->prototype);
-
     // Reserve space for call frame.
-    Vector<RefPtr<RegisterID>, RegisterFile::CallFrameHeaderSize> callFrame;
-    for (int i = 0; i < RegisterFile::CallFrameHeaderSize; ++i)
+    Vector<RefPtr<RegisterID>, JSStack::CallFrameHeaderSize, UnsafeVectorOverflow> callFrame;
+    for (int i = 0; i < JSStack::CallFrameHeaderSize; ++i)
         callFrame.append(newTemporary());
 
-    emitExpressionInfo(divot, startOffset, endOffset);
-
-#if ENABLE(JIT)
-    m_codeBlock->addCallLinkInfo();
-#endif
-
-    emitOpcode(op_construct);
-    instructions().append(dst->index()); // dst
-    instructions().append(func->index()); // func
-    instructions().append(argv.size()); // argCount
-    instructions().append(argv[0]->index() + argv.size() + RegisterFile::CallFrameHeaderSize); // registerOffset
-    instructions().append(funcProto->index()); // proto
-    instructions().append(argv[0]->index()); // thisRegister
+    emitExpressionInfo(divot, divotStart, divotEnd);
+    
+    RefPtr<Label> done = newLabel();
+    expectedFunction = emitExpectedFunctionSnippet(dst, func, expectedFunction, callArguments, done.get());
 
-    emitOpcode(op_construct_verify);
+    UnlinkedValueProfile profile = emitProfiledOpcode(op_construct);
+    ASSERT(dst != ignoredResult());
     instructions().append(dst->index());
-    instructions().append(argv[0]->index());
+    instructions().append(func->index());
+    instructions().append(callArguments.argumentCountIncludingThis());
+    instructions().append(callArguments.stackOffset());
+    instructions().append(m_codeBlock->addLLIntCallLinkInfo());
+    instructions().append(0);
+    instructions().append(0);
+    instructions().append(profile);
+
+    if (expectedFunction != NoExpectedFunction)
+        emitLabel(done.get());
 
     if (m_shouldEmitProfileHooks) {
         emitOpcode(op_profile_did_call);
-        instructions().append(func->index());
-        
-        if (dst == originalFunc)
-            func->deref();
+        instructions().append(callArguments.profileHookRegister()->index());
     }
 
     return dst;
@@ -1612,16 +1934,15 @@ void BytecodeGenerator::emitToPrimitive(RegisterID* dst, RegisterID* src)
     instructions().append(src->index());
 }
 
-RegisterID* BytecodeGenerator::emitPushScope(RegisterID* scope)
+RegisterID* BytecodeGenerator::emitPushWithScope(RegisterID* scope)
 {
-    ASSERT(scope->isTemporary());
     ControlFlowContext context;
     context.isFinallyBlock = false;
     m_scopeContextStack.append(context);
-    m_dynamicScopeDepth++;
-    createArgumentsIfNecessary();
+    m_localScopeDepth++;
 
-    return emitUnaryNoDstOp(op_push_scope, scope);
+    createActivationIfNecessary();
+    return emitUnaryNoDstOp(op_push_with_scope, scope);
 }
 
 void BytecodeGenerator::emitPopScope()
@@ -1632,24 +1953,43 @@ void BytecodeGenerator::emitPopScope()
     emitOpcode(op_pop_scope);
 
     m_scopeContextStack.removeLast();
-    m_dynamicScopeDepth--;
+    m_localScopeDepth--;
 }
 
-void BytecodeGenerator::emitDebugHook(DebugHookID debugHookID, int firstLine, int lastLine)
+void BytecodeGenerator::emitDebugHook(DebugHookID debugHookID, unsigned line, unsigned charOffset, unsigned lineStart)
 {
+#if ENABLE(DEBUG_WITH_BREAKPOINT)
+    if (debugHookID != DidReachBreakpoint)
+        return;
+#else
     if (!m_shouldEmitDebugHooks)
         return;
+#endif
+    JSTextPosition divot(line, charOffset, lineStart);
+    emitExpressionInfo(divot, divot, divot);
     emitOpcode(op_debug);
     instructions().append(debugHookID);
-    instructions().append(firstLine);
-    instructions().append(lastLine);
+    instructions().append(false);
 }
 
-void BytecodeGenerator::pushFinallyContext(Label* target, RegisterID* retAddrDst)
+void BytecodeGenerator::pushFinallyContext(StatementNode* finallyBlock)
 {
+    // Reclaim free label scopes.
+    while (m_labelScopes.size() && !m_labelScopes.last().refCount())
+        m_labelScopes.removeLast();
+
     ControlFlowContext scope;
     scope.isFinallyBlock = true;
-    FinallyContext context = { target, retAddrDst };
+    FinallyContext context = {
+        finallyBlock,
+        static_cast<unsigned>(m_scopeContextStack.size()),
+        static_cast<unsigned>(m_switchContextStack.size()),
+        static_cast<unsigned>(m_forInContextStack.size()),
+        static_cast<unsigned>(m_tryContextStack.size()),
+        static_cast<unsigned>(m_labelScopes.size()),
+        m_finallyDepth,
+        m_localScopeDepth
+    };
     scope.finallyContext = context;
     m_scopeContextStack.append(scope);
     m_finallyDepth++;
@@ -1664,7 +2004,7 @@ void BytecodeGenerator::popFinallyContext()
     m_finallyDepth--;
 }
 
-LabelScope* BytecodeGenerator::breakTarget(const Identifier& name)
+LabelScopePtr BytecodeGenerator::breakTarget(const Identifier& name)
 {
     // Reclaim free label scopes.
     //
@@ -1680,7 +2020,7 @@ LabelScope* BytecodeGenerator::breakTarget(const Identifier& name)
     }
 
     if (!m_labelScopes.size())
-        return 0;
+        return LabelScopePtr::null();
 
     // We special-case the following, which is a syntax error in Firefox:
     // label:
@@ -1690,58 +2030,58 @@ LabelScope* BytecodeGenerator::breakTarget(const Identifier& name)
             LabelScope* scope = &m_labelScopes[i];
             if (scope->type() != LabelScope::NamedLabel) {
                 ASSERT(scope->breakTarget());
-                return scope;
+                return LabelScopePtr(m_labelScopes, i);
             }
         }
-        return 0;
+        return LabelScopePtr::null();
     }
 
     for (int i = m_labelScopes.size() - 1; i >= 0; --i) {
         LabelScope* scope = &m_labelScopes[i];
         if (scope->name() && *scope->name() == name) {
             ASSERT(scope->breakTarget());
-            return scope;
+            return LabelScopePtr(m_labelScopes, i);
         }
     }
-    return 0;
+    return LabelScopePtr::null();
 }
 
-LabelScope* BytecodeGenerator::continueTarget(const Identifier& name)
+LabelScopePtr BytecodeGenerator::continueTarget(const Identifier& name)
 {
     // Reclaim free label scopes.
     while (m_labelScopes.size() && !m_labelScopes.last().refCount())
         m_labelScopes.removeLast();
 
     if (!m_labelScopes.size())
-        return 0;
+        return LabelScopePtr::null();
 
     if (name.isEmpty()) {
         for (int i = m_labelScopes.size() - 1; i >= 0; --i) {
             LabelScope* scope = &m_labelScopes[i];
             if (scope->type() == LabelScope::Loop) {
                 ASSERT(scope->continueTarget());
-                return scope;
+                return LabelScopePtr(m_labelScopes, i);
             }
         }
-        return 0;
+        return LabelScopePtr::null();
     }
 
     // Continue to the loop nested nearest to the label scope that matches
     // 'name'.
-    LabelScope* result = 0;
+    LabelScopePtr result = LabelScopePtr::null();
     for (int i = m_labelScopes.size() - 1; i >= 0; --i) {
         LabelScope* scope = &m_labelScopes[i];
         if (scope->type() == LabelScope::Loop) {
             ASSERT(scope->continueTarget());
-            result = scope;
+            result = LabelScopePtr(m_labelScopes, i);
         }
         if (scope->name() && *scope->name() == name)
-            return result; // may be 0
+            return result; // may be null.
     }
-    return 0;
+    return LabelScopePtr::null();
 }
 
-PassRefPtr<Label> BytecodeGenerator::emitComplexJumpScopes(Label* target, ControlFlowContext* topScope, ControlFlowContext* bottomScope)
+void BytecodeGenerator::emitComplexPopScopes(ControlFlowContext* topScope, ControlFlowContext* bottomScope)
 {
     while (topScope > bottomScope) {
         // First we count the number of dynamic scopes we need to remove to get
@@ -1755,54 +2095,121 @@ PassRefPtr<Label> BytecodeGenerator::emitComplexJumpScopes(Label* target, Contro
         }
 
         if (nNormalScopes) {
-            size_t begin = instructions().size();
-
             // We need to remove a number of dynamic scopes to get to the next
             // finally block
-            emitOpcode(op_jmp_scopes);
-            instructions().append(nNormalScopes);
-
-            // If topScope == bottomScope then there isn't actually a finally block
-            // left to emit, so make the jmp_scopes jump directly to the target label
-            if (topScope == bottomScope) {
-                instructions().append(target->bind(begin, instructions().size()));
-                return target;
-            }
+            while (nNormalScopes--)
+                emitOpcode(op_pop_scope);
 
-            // Otherwise we just use jmp_scopes to pop a group of scopes and go
-            // to the next instruction
-            RefPtr<Label> nextInsn = newLabel();
-            instructions().append(nextInsn->bind(begin, instructions().size()));
-            emitLabel(nextInsn.get());
+            // If topScope == bottomScope then there isn't a finally block left to emit.
+            if (topScope == bottomScope)
+                return;
         }
-
+        
+        Vector<ControlFlowContext> savedScopeContextStack;
+        Vector<SwitchInfo> savedSwitchContextStack;
+        Vector<ForInContext> savedForInContextStack;
+        Vector<TryContext> poppedTryContexts;
+        LabelScopeStore savedLabelScopes;
         while (topScope > bottomScope && topScope->isFinallyBlock) {
-            emitJumpSubroutine(topScope->finallyContext.retAddrDst, topScope->finallyContext.finallyAddr);
+            RefPtr<Label> beforeFinally = emitLabel(newLabel().get());
+            
+            // Save the current state of the world while instating the state of the world
+            // for the finally block.
+            FinallyContext finallyContext = topScope->finallyContext;
+            bool flipScopes = finallyContext.scopeContextStackSize != m_scopeContextStack.size();
+            bool flipSwitches = finallyContext.switchContextStackSize != m_switchContextStack.size();
+            bool flipForIns = finallyContext.forInContextStackSize != m_forInContextStack.size();
+            bool flipTries = finallyContext.tryContextStackSize != m_tryContextStack.size();
+            bool flipLabelScopes = finallyContext.labelScopesSize != m_labelScopes.size();
+            int topScopeIndex = -1;
+            int bottomScopeIndex = -1;
+            if (flipScopes) {
+                topScopeIndex = topScope - m_scopeContextStack.begin();
+                bottomScopeIndex = bottomScope - m_scopeContextStack.begin();
+                savedScopeContextStack = m_scopeContextStack;
+                m_scopeContextStack.shrink(finallyContext.scopeContextStackSize);
+            }
+            if (flipSwitches) {
+                savedSwitchContextStack = m_switchContextStack;
+                m_switchContextStack.shrink(finallyContext.switchContextStackSize);
+            }
+            if (flipForIns) {
+                savedForInContextStack = m_forInContextStack;
+                m_forInContextStack.shrink(finallyContext.forInContextStackSize);
+            }
+            if (flipTries) {
+                while (m_tryContextStack.size() != finallyContext.tryContextStackSize) {
+                    ASSERT(m_tryContextStack.size() > finallyContext.tryContextStackSize);
+                    TryContext context = m_tryContextStack.last();
+                    m_tryContextStack.removeLast();
+                    TryRange range;
+                    range.start = context.start;
+                    range.end = beforeFinally;
+                    range.tryData = context.tryData;
+                    m_tryRanges.append(range);
+                    poppedTryContexts.append(context);
+                }
+            }
+            if (flipLabelScopes) {
+                savedLabelScopes = m_labelScopes;
+                while (m_labelScopes.size() > finallyContext.labelScopesSize)
+                    m_labelScopes.removeLast();
+            }
+            int savedFinallyDepth = m_finallyDepth;
+            m_finallyDepth = finallyContext.finallyDepth;
+            int savedDynamicScopeDepth = m_localScopeDepth;
+            m_localScopeDepth = finallyContext.dynamicScopeDepth;
+            
+            // Emit the finally block.
+            emitNode(finallyContext.finallyBlock);
+            
+            RefPtr<Label> afterFinally = emitLabel(newLabel().get());
+            
+            // Restore the state of the world.
+            if (flipScopes) {
+                m_scopeContextStack = savedScopeContextStack;
+                topScope = &m_scopeContextStack[topScopeIndex]; // assert it's within bounds
+                bottomScope = m_scopeContextStack.begin() + bottomScopeIndex; // don't assert, since it the index might be -1.
+            }
+            if (flipSwitches)
+                m_switchContextStack = savedSwitchContextStack;
+            if (flipForIns)
+                m_forInContextStack = savedForInContextStack;
+            if (flipTries) {
+                ASSERT(m_tryContextStack.size() == finallyContext.tryContextStackSize);
+                for (unsigned i = poppedTryContexts.size(); i--;) {
+                    TryContext context = poppedTryContexts[i];
+                    context.start = afterFinally;
+                    m_tryContextStack.append(context);
+                }
+                poppedTryContexts.clear();
+            }
+            if (flipLabelScopes)
+                m_labelScopes = savedLabelScopes;
+            m_finallyDepth = savedFinallyDepth;
+            m_localScopeDepth = savedDynamicScopeDepth;
+            
             --topScope;
         }
     }
-    return emitJump(target);
 }
 
-PassRefPtr<Label> BytecodeGenerator::emitJumpScopes(Label* target, int targetScopeDepth)
+void BytecodeGenerator::emitPopScopes(int targetScopeDepth)
 {
     ASSERT(scopeDepth() - targetScopeDepth >= 0);
-    ASSERT(target->isForward());
 
     size_t scopeDelta = scopeDepth() - targetScopeDepth;
     ASSERT(scopeDelta <= m_scopeContextStack.size());
     if (!scopeDelta)
-        return emitJump(target);
-
-    if (m_finallyDepth)
-        return emitComplexJumpScopes(target, &m_scopeContextStack.last(), &m_scopeContextStack.last() - scopeDelta);
+        return;
 
-    size_t begin = instructions().size();
+    if (!m_finallyDepth) {
+        while (scopeDelta--)
+            emitOpcode(op_pop_scope);
+        return;
+    }
 
-    emitOpcode(op_jmp_scopes);
-    instructions().append(scopeDelta);
-    instructions().append(target->bind(begin, instructions().size()));
-    return target;
+    emitComplexPopScopes(&m_scopeContextStack.last(), &m_scopeContextStack.last() - scopeDelta);
 }
 
 RegisterID* BytecodeGenerator::emitGetPropertyNames(RegisterID* dst, RegisterID* base, RegisterID* i, RegisterID* size, Label* breakTarget)
@@ -1832,64 +2239,77 @@ RegisterID* BytecodeGenerator::emitNextPropertyName(RegisterID* dst, RegisterID*
     return dst;
 }
 
-RegisterID* BytecodeGenerator::emitCatch(RegisterID* targetRegister, Label* start, Label* end)
+TryData* BytecodeGenerator::pushTry(Label* start)
 {
-#if ENABLE(JIT)
-    HandlerInfo info = { start->bind(0, 0), end->bind(0, 0), instructions().size(), m_dynamicScopeDepth + m_baseScopeDepth, CodeLocationLabel() };
-#else
-    HandlerInfo info = { start->bind(0, 0), end->bind(0, 0), instructions().size(), m_dynamicScopeDepth + m_baseScopeDepth };
-#endif
+    TryData tryData;
+    tryData.target = newLabel();
+    tryData.targetScopeDepth = UINT_MAX;
+    m_tryData.append(tryData);
+    TryData* result = &m_tryData.last();
+    
+    TryContext tryContext;
+    tryContext.start = start;
+    tryContext.tryData = result;
+    
+    m_tryContextStack.append(tryContext);
+    
+    return result;
+}
+
+RegisterID* BytecodeGenerator::popTryAndEmitCatch(TryData* tryData, RegisterID* targetRegister, Label* end)
+{
+    m_usesExceptions = true;
+    
+    ASSERT_UNUSED(tryData, m_tryContextStack.last().tryData == tryData);
+    
+    TryRange tryRange;
+    tryRange.start = m_tryContextStack.last().start;
+    tryRange.end = end;
+    tryRange.tryData = m_tryContextStack.last().tryData;
+    m_tryRanges.append(tryRange);
+    m_tryContextStack.removeLast();
+    
+    emitLabel(tryRange.tryData->target.get());
+    tryRange.tryData->targetScopeDepth = m_localScopeDepth;
 
-    m_codeBlock->addExceptionHandler(info);
     emitOpcode(op_catch);
     instructions().append(targetRegister->index());
     return targetRegister;
 }
 
-RegisterID* BytecodeGenerator::emitNewError(RegisterID* dst, ErrorType type, JSValue message)
+void BytecodeGenerator::emitThrowReferenceError(const String& message)
 {
-    emitOpcode(op_new_error);
-    instructions().append(dst->index());
-    instructions().append(static_cast<int>(type));
-    instructions().append(addConstantValue(message)->index());
-    return dst;
+    emitOpcode(op_throw_static_error);
+    instructions().append(addConstantValue(addStringConstant(Identifier(m_vm, message)))->index());
+    instructions().append(true);
 }
 
-PassRefPtr<Label> BytecodeGenerator::emitJumpSubroutine(RegisterID* retAddrDst, Label* finally)
+void BytecodeGenerator::emitPushFunctionNameScope(const Identifier& property, RegisterID* value, unsigned attributes)
 {
-    size_t begin = instructions().size();
-
-    emitOpcode(op_jsr);
-    instructions().append(retAddrDst->index());
-    instructions().append(finally->bind(begin, instructions().size()));
-    emitLabel(newLabel().get()); // Record the fact that the next instruction is implicitly labeled, because op_sret will return to it.
-    return finally;
+    emitOpcode(op_push_name_scope);
+    instructions().append(addConstant(property));
+    instructions().append(value->index());
+    instructions().append(attributes);
 }
 
-void BytecodeGenerator::emitSubroutineReturn(RegisterID* retAddrSrc)
+void BytecodeGenerator::emitPushCatchScope(const Identifier& property, RegisterID* value, unsigned attributes)
 {
-    emitOpcode(op_sret);
-    instructions().append(retAddrSrc->index());
-}
+    createActivationIfNecessary();
 
-void BytecodeGenerator::emitPushNewScope(RegisterID* dst, const Identifier& property, RegisterID* value)
-{
     ControlFlowContext context;
     context.isFinallyBlock = false;
     m_scopeContextStack.append(context);
-    m_dynamicScopeDepth++;
-    
-    createArgumentsIfNecessary();
+    m_localScopeDepth++;
 
-    emitOpcode(op_push_new_scope);
-    instructions().append(dst->index());
+    emitOpcode(op_push_name_scope);
     instructions().append(addConstant(property));
     instructions().append(value->index());
+    instructions().append(attributes);
 }
 
 void BytecodeGenerator::beginSwitch(RegisterID* scrutineeRegister, SwitchInfo::SwitchType type)
 {
-    SwitchInfo info = { instructions().size(), type };
+    SwitchInfo info = { static_cast<uint32_t>(instructions().size()), type };
     switch (type) {
         case SwitchInfo::SwitchImmediate:
             emitOpcode(op_switch_imm);
@@ -1901,7 +2321,7 @@ void BytecodeGenerator::beginSwitch(RegisterID* scrutineeRegister, SwitchInfo::S
             emitOpcode(op_switch_string);
             break;
         default:
-            ASSERT_NOT_REACHED();
+            RELEASE_ASSERT_NOT_REACHED();
     }
 
     instructions().append(0); // place holder for table index
@@ -1922,33 +2342,23 @@ static int32_t keyForImmediateSwitch(ExpressionNode* node, int32_t min, int32_t
     return key - min;
 }
 
-static void prepareJumpTableForImmediateSwitch(SimpleJumpTable& jumpTable, int32_t switchAddress, uint32_t clauseCount, RefPtr<Label>* labels, ExpressionNode** nodes, int32_t min, int32_t max)
-{
-    jumpTable.min = min;
-    jumpTable.branchOffsets.resize(max - min + 1);
-    jumpTable.branchOffsets.fill(0);
-    for (uint32_t i = 0; i < clauseCount; ++i) {
-        // We're emitting this after the clause labels should have been fixed, so 
-        // the labels should not be "forward" references
-        ASSERT(!labels[i]->isForward());
-        jumpTable.add(keyForImmediateSwitch(nodes[i], min, max), labels[i]->bind(switchAddress, switchAddress + 3)); 
-    }
-}
-
 static int32_t keyForCharacterSwitch(ExpressionNode* node, int32_t min, int32_t max)
 {
     UNUSED_PARAM(max);
     ASSERT(node->isString());
-    UString::Rep* clause = static_cast<StringNode*>(node)->value().ustring().rep();
-    ASSERT(clause->size() == 1);
+    StringImpl* clause = static_cast<StringNode*>(node)->value().impl();
+    ASSERT(clause->length() == 1);
     
-    int32_t key = clause->data()[0];
+    int32_t key = (*clause)[0];
     ASSERT(key >= min);
     ASSERT(key <= max);
     return key - min;
 }
 
-static void prepareJumpTableForCharacterSwitch(SimpleJumpTable& jumpTable, int32_t switchAddress, uint32_t clauseCount, RefPtr<Label>* labels, ExpressionNode** nodes, int32_t min, int32_t max)
+static void prepareJumpTableForSwitch(
+    UnlinkedSimpleJumpTable& jumpTable, int32_t switchAddress, uint32_t clauseCount,
+    RefPtr<Label>* labels, ExpressionNode** nodes, int32_t min, int32_t max,
+    int32_t (*keyGetter)(ExpressionNode*, int32_t min, int32_t max))
 {
     jumpTable.min = min;
     jumpTable.branchOffsets.resize(max - min + 1);
@@ -1957,11 +2367,11 @@ static void prepareJumpTableForCharacterSwitch(SimpleJumpTable& jumpTable, int32
         // We're emitting this after the clause labels should have been fixed, so 
         // the labels should not be "forward" references
         ASSERT(!labels[i]->isForward());
-        jumpTable.add(keyForCharacterSwitch(nodes[i], min, max), labels[i]->bind(switchAddress, switchAddress + 3)); 
+        jumpTable.add(keyGetter(nodes[i], min, max), labels[i]->bind(switchAddress, switchAddress + 3)); 
     }
 }
 
-static void prepareJumpTableForStringSwitch(StringJumpTable& jumpTable, int32_t switchAddress, uint32_t clauseCount, RefPtr<Label>* labels, ExpressionNode** nodes)
+static void prepareJumpTableForStringSwitch(UnlinkedStringJumpTable& jumpTable, int32_t switchAddress, uint32_t clauseCount, RefPtr<Label>* labels, ExpressionNode** nodes)
 {
     for (uint32_t i = 0; i < clauseCount; ++i) {
         // We're emitting this after the clause labels should have been fixed, so 
@@ -1969,10 +2379,8 @@ static void prepareJumpTableForStringSwitch(StringJumpTable& jumpTable, int32_t
         ASSERT(!labels[i]->isForward());
         
         ASSERT(nodes[i]->isString());
-        UString::Rep* clause = static_cast<StringNode*>(nodes[i])->value().ustring().rep();
-        OffsetLocation location;
-        location.branchOffset = labels[i]->bind(switchAddress, switchAddress + 3);
-        jumpTable.offsetTable.add(clause, location);
+        StringImpl* clause = static_cast<StringNode*>(nodes[i])->value().impl();
+        jumpTable.offsetTable.add(clause, labels[i]->bind(switchAddress, switchAddress + 3));
     }
 }
 
@@ -1980,25 +2388,34 @@ void BytecodeGenerator::endSwitch(uint32_t clauseCount, RefPtr<Label>* labels, E
 {
     SwitchInfo switchInfo = m_switchContextStack.last();
     m_switchContextStack.removeLast();
-    if (switchInfo.switchType == SwitchInfo::SwitchImmediate) {
-        instructions()[switchInfo.bytecodeOffset + 1] = m_codeBlock->numberOfImmediateSwitchJumpTables();
+    
+    switch (switchInfo.switchType) {
+    case SwitchInfo::SwitchImmediate:
+    case SwitchInfo::SwitchCharacter: {
+        instructions()[switchInfo.bytecodeOffset + 1] = m_codeBlock->numberOfSwitchJumpTables();
         instructions()[switchInfo.bytecodeOffset + 2] = defaultLabel->bind(switchInfo.bytecodeOffset, switchInfo.bytecodeOffset + 3);
 
-        SimpleJumpTable& jumpTable = m_codeBlock->addImmediateSwitchJumpTable();
-        prepareJumpTableForImmediateSwitch(jumpTable, switchInfo.bytecodeOffset, clauseCount, labels, nodes, min, max);
-    } else if (switchInfo.switchType == SwitchInfo::SwitchCharacter) {
-        instructions()[switchInfo.bytecodeOffset + 1] = m_codeBlock->numberOfCharacterSwitchJumpTables();
-        instructions()[switchInfo.bytecodeOffset + 2] = defaultLabel->bind(switchInfo.bytecodeOffset, switchInfo.bytecodeOffset + 3);
+        UnlinkedSimpleJumpTable& jumpTable = m_codeBlock->addSwitchJumpTable();
+        prepareJumpTableForSwitch(
+            jumpTable, switchInfo.bytecodeOffset, clauseCount, labels, nodes, min, max,
+            switchInfo.switchType == SwitchInfo::SwitchImmediate
+                ? keyForImmediateSwitch
+                : keyForCharacterSwitch); 
+        break;
+    }
         
-        SimpleJumpTable& jumpTable = m_codeBlock->addCharacterSwitchJumpTable();
-        prepareJumpTableForCharacterSwitch(jumpTable, switchInfo.bytecodeOffset, clauseCount, labels, nodes, min, max);
-    } else {
-        ASSERT(switchInfo.switchType == SwitchInfo::SwitchString);
+    case SwitchInfo::SwitchString: {
         instructions()[switchInfo.bytecodeOffset + 1] = m_codeBlock->numberOfStringSwitchJumpTables();
         instructions()[switchInfo.bytecodeOffset + 2] = defaultLabel->bind(switchInfo.bytecodeOffset, switchInfo.bytecodeOffset + 3);
 
-        StringJumpTable& jumpTable = m_codeBlock->addStringSwitchJumpTable();
+        UnlinkedStringJumpTable& jumpTable = m_codeBlock->addStringSwitchJumpTable();
         prepareJumpTableForStringSwitch(jumpTable, switchInfo.bytecodeOffset, clauseCount, labels, nodes);
+        break;
+    }
+        
+    default:
+        RELEASE_ASSERT_NOT_REACHED();
+        break;
     }
 }
 
@@ -2008,10 +2425,86 @@ RegisterID* BytecodeGenerator::emitThrowExpressionTooDeepException()
     // And we could make the caller pass the node pointer in, if there was some way of getting
     // that from an arbitrary node. However, calling emitExpressionInfo without any useful data
     // is still good enough to get us an accurate line number.
-    emitExpressionInfo(0, 0, 0);
-    RegisterID* exception = emitNewError(newTemporary(), SyntaxError, jsString(globalData(), "Expression too deep"));
-    emitThrow(exception);
-    return exception;
+    m_expressionTooDeep = true;
+    return newTemporary();
+}
+
+void BytecodeGenerator::setIsNumericCompareFunction(bool isNumericCompareFunction)
+{
+    m_codeBlock->setIsNumericCompareFunction(isNumericCompareFunction);
+}
+
+bool BytecodeGenerator::isArgumentNumber(const Identifier& ident, int argumentNumber)
+{
+    RegisterID* registerID = local(ident).get();
+    if (!registerID || registerID->index() >= 0)
+         return 0;
+    return registerID->index() == CallFrame::argumentOffset(argumentNumber);
+}
+
+void BytecodeGenerator::emitReadOnlyExceptionIfNeeded()
+{
+    if (!isStrictMode())
+        return;
+    emitOpcode(op_throw_static_error);
+    instructions().append(addConstantValue(addStringConstant(Identifier(m_vm, StrictModeReadonlyPropertyWriteError)))->index());
+    instructions().append(false);
+}
+    
+void BytecodeGenerator::emitEnumeration(ThrowableExpressionData* node, ExpressionNode* subjectNode, const std::function<void(BytecodeGenerator&, RegisterID*)>& callBack)
+{
+    if (subjectNode->isResolveNode()
+        && willResolveToArguments(static_cast<ResolveNode*>(subjectNode)->identifier())
+        && !symbolTable().slowArguments()) {
+        RefPtr<RegisterID> index = emitLoad(newTemporary(), jsNumber(0));
+
+        LabelScopePtr scope = newLabelScope(LabelScope::Loop);
+        RefPtr<RegisterID> value = emitLoad(newTemporary(), jsUndefined());
+        
+        RefPtr<Label> loopCondition = newLabel();
+        RefPtr<Label> loopStart = newLabel();
+        emitJump(loopCondition.get());
+        emitLabel(loopStart.get());
+        emitLoopHint();
+        emitGetArgumentByVal(value.get(), uncheckedRegisterForArguments(), index.get());
+        callBack(*this, value.get());
+    
+        emitLabel(scope->continueTarget());
+        emitInc(index.get());
+        emitLabel(loopCondition.get());
+        RefPtr<RegisterID> length = emitGetArgumentsLength(newTemporary(), uncheckedRegisterForArguments());
+        emitJumpIfTrue(emitEqualityOp(op_less, newTemporary(), index.get(), length.get()), loopStart.get());
+        emitLabel(scope->breakTarget());
+        return;
+    }
+
+    LabelScopePtr scope = newLabelScope(LabelScope::Loop);
+    RefPtr<RegisterID> subject = newTemporary();
+    emitNode(subject.get(), subjectNode);
+    RefPtr<RegisterID> iterator = emitGetById(newTemporary(), subject.get(), propertyNames().iteratorPrivateName);
+    {
+        CallArguments args(*this, 0);
+        emitMove(args.thisRegister(), subject.get());
+        emitCall(iterator.get(), iterator.get(), NoExpectedFunction, args, node->divot(), node->divotStart(), node->divotEnd());
+    }
+    RefPtr<RegisterID> iteratorNext = emitGetById(newTemporary(), iterator.get(), propertyNames().iteratorNextPrivateName);
+    RefPtr<RegisterID> value = newTemporary();
+    emitLoad(value.get(), jsUndefined());
+    
+    emitJump(scope->continueTarget());
+    
+    RefPtr<Label> loopStart = newLabel();
+    emitLabel(loopStart.get());
+    emitLoopHint();
+    callBack(*this, value.get());
+    emitLabel(scope->continueTarget());
+    CallArguments nextArguments(*this, 0, 1);
+    emitMove(nextArguments.thisRegister(), iterator.get());
+    emitMove(nextArguments.argumentRegister(0), value.get());
+    emitCall(value.get(), iteratorNext.get(), NoExpectedFunction, nextArguments, node->divot(), node->divotStart(), node->divotEnd());
+    RefPtr<RegisterID> result = newTemporary();
+    emitJumpIfFalse(emitEqualityOp(op_stricteq, result.get(), value.get(), emitLoad(0, JSValue(vm()->iterationTerminator.get()))), loopStart.get());
+    emitLabel(scope->breakTarget());
 }
 
 } // namespace JSC