X-Git-Url: https://git.saurik.com/apple/javascriptcore.git/blobdiff_plain/9dae56ea45a0f5f8136a5c93d6f3a7f99399ca73..ef99ff287df9046eb88937225e0554eabb00e33c:/bytecompiler/BytecodeGenerator.cpp?ds=sidebyside diff --git a/bytecompiler/BytecodeGenerator.cpp b/bytecompiler/BytecodeGenerator.cpp index cd89c1e..734546a 100644 --- a/bytecompiler/BytecodeGenerator.cpp +++ b/bytecompiler/BytecodeGenerator.cpp @@ -1,6 +1,7 @@ /* - * Copyright (C) 2008, 2009 Apple Inc. All rights reserved. + * Copyright (C) 2008, 2009, 2012, 2013, 2014 Apple Inc. All rights reserved. * Copyright (C) 2008 Cameron Zwarich + * Copyright (C) 2012 Igalia, S.L. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -11,7 +12,7 @@ * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. - * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of + * 3. Neither the name of Apple Inc. ("Apple") nor the names of * its contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * @@ -30,431 +31,532 @@ #include "config.h" #include "BytecodeGenerator.h" -#include "BatchedTransitionOptimizer.h" -#include "JSFunction.h" #include "Interpreter.h" -#include "UString.h" +#include "JSActivation.h" +#include "JSFunction.h" +#include "JSNameScope.h" +#include "LowLevelInterpreter.h" +#include "JSCInlines.h" +#include "Options.h" +#include "StackAlignment.h" +#include "StrongInlines.h" +#include "UnlinkedCodeBlock.h" +#include "UnlinkedInstructionStream.h" +#include +#include using namespace std; namespace JSC { -/* - The layout of a register frame looks like this: - - For - - function f(x, y) { - var v1; - function g() { } - var v2; - return (x) * (y); - } - - assuming (x) and (y) generated temporaries t1 and t2, you would have - - ------------------------------------ - | x | y | g | v2 | v1 | t1 | t2 | <-- value held - ------------------------------------ - | -5 | -4 | -3 | -2 | -1 | +0 | +1 | <-- register index - ------------------------------------ - | params->|<-locals | temps-> - - Because temporary registers are allocated in a stack-like fashion, we - can reclaim them with a simple popping algorithm. The same goes for labels. - (We never reclaim parameter or local registers, because parameters and - locals are DontDelete.) - - The register layout before a function call looks like this: - - For - - function f(x, y) - { - } - - f(1); - - > <------------------------------ - < > reserved: call frame | 1 | <-- value held - > >snip< <------------------------------ - < > +0 | +1 | +2 | +3 | +4 | +5 | <-- register index - > <------------------------------ - | params->|<-locals | temps-> - - The call instruction fills in the "call frame" registers. It also pads - missing arguments at the end of the call: - - > <----------------------------------- - < > reserved: call frame | 1 | ? | <-- value held ("?" stands for "undefined") - > >snip< <----------------------------------- - < > +0 | +1 | +2 | +3 | +4 | +5 | +6 | <-- register index - > <----------------------------------- - | params->|<-locals | temps-> - - After filling in missing arguments, the call instruction sets up the new - stack frame to overlap the end of the old stack frame: - - |----------------------------------> < - | reserved: call frame | 1 | ? < > <-- value held ("?" stands for "undefined") - |----------------------------------> >snip< < - | -7 | -6 | -5 | -4 | -3 | -2 | -1 < > <-- register index - |----------------------------------> < - | | params->|<-locals | temps-> - - That way, arguments are "copied" into the callee's stack frame for free. - - If the caller supplies too many arguments, this trick doesn't work. The - extra arguments protrude into space reserved for locals and temporaries. - In that case, the call instruction makes a real copy of the call frame header, - along with just the arguments expected by the callee, leaving the original - call frame header and arguments behind. (The call instruction can't just discard - extra arguments, because the "arguments" object may access them later.) - This copying strategy ensures that all named values will be at the indices - expected by the callee. -*/ - -#ifndef NDEBUG -static bool s_dumpsGeneratedCode = false; -#endif - -void BytecodeGenerator::setDumpsGeneratedCode(bool dumpsGeneratedCode) +void Label::setLocation(unsigned location) { -#ifndef NDEBUG - s_dumpsGeneratedCode = dumpsGeneratedCode; -#else - UNUSED_PARAM(dumpsGeneratedCode); -#endif -} - -bool BytecodeGenerator::dumpsGeneratedCode() -{ -#ifndef NDEBUG - return s_dumpsGeneratedCode; -#else - return false; -#endif + m_location = location; + + unsigned size = m_unresolvedJumps.size(); + for (unsigned i = 0; i < size; ++i) + m_generator->m_instructions[m_unresolvedJumps[i].second].u.operand = m_location - m_unresolvedJumps[i].first; } -void BytecodeGenerator::generate() +ParserError BytecodeGenerator::generate() { - m_codeBlock->setThisRegister(m_thisRegister.index()); + SamplingRegion samplingRegion("Bytecode Generation"); + + m_codeBlock->setThisRegister(m_thisRegister.virtualRegister()); + for (size_t i = 0; i < m_deconstructedParameters.size(); i++) { + auto& entry = m_deconstructedParameters[i]; + entry.second->bindValue(*this, entry.first.get()); + } m_scopeNode->emitBytecode(*this); -#ifndef NDEBUG - m_codeBlock->setInstructionCount(m_codeBlock->instructions().size()); + m_staticPropertyAnalyzer.kill(); - if (s_dumpsGeneratedCode) - m_codeBlock->dump(m_scopeChain->globalObject()->globalExec()); -#endif - - if ((m_codeType == FunctionCode && !m_codeBlock->needsFullScopeChain() && !m_codeBlock->usesArguments()) || m_codeType == EvalCode) - symbolTable().clear(); + for (unsigned i = 0; i < m_tryRanges.size(); ++i) { + TryRange& range = m_tryRanges[i]; + int start = range.start->bind(); + int end = range.end->bind(); - m_codeBlock->setIsNumericCompareFunction(instructions() == m_globalData->numericCompareFunction(m_scopeChain->globalObject()->globalExec())); - -#if !ENABLE(OPCODE_SAMPLING) - if (!m_regeneratingForExceptionInfo && (m_codeType == FunctionCode || m_codeType == EvalCode)) - m_codeBlock->clearExceptionInfo(); -#endif + // This will happen for empty try blocks and for some cases of finally blocks: + // + // try { + // try { + // } finally { + // return 42; + // // *HERE* + // } + // } finally { + // print("things"); + // } + // + // The return will pop scopes to execute the outer finally block. But this includes + // popping the try context for the inner try. The try context is live in the fall-through + // part of the finally block not because we will emit a handler that overlaps the finally, + // but because we haven't yet had a chance to plant the catch target. Then when we finish + // emitting code for the outer finally block, we repush the try contex, this time with a + // new start index. But that means that the start index for the try range corresponding + // to the inner-finally-following-the-return (marked as "*HERE*" above) will be greater + // than the end index of the try block. This is harmless since end < start handlers will + // never get matched in our logic, but we do the runtime a favor and choose to not emit + // such handlers at all. + if (end <= start) + continue; + + ASSERT(range.tryData->targetScopeDepth != UINT_MAX); + UnlinkedHandlerInfo info = { + static_cast(start), static_cast(end), + static_cast(range.tryData->target->bind()), + range.tryData->targetScopeDepth + }; + m_codeBlock->addExceptionHandler(info); + } + + m_codeBlock->setInstructions(std::make_unique(m_instructions)); m_codeBlock->shrinkToFit(); -} -bool BytecodeGenerator::addVar(const Identifier& ident, bool isConstant, RegisterID*& r0) -{ - int index = m_calleeRegisters.size(); - SymbolTableEntry newEntry(index, isConstant ? ReadOnly : 0); - pair result = symbolTable().add(ident.ustring().rep(), newEntry); + if (m_codeBlock->symbolTable()) + m_codeBlock->setSymbolTable(m_codeBlock->symbolTable()->cloneCapturedNames(*m_codeBlock->vm())); - if (!result.second) { - r0 = ®isterFor(result.first->second.getIndex()); - return false; - } - - ++m_codeBlock->m_numVars; - r0 = newRegister(); - return true; + if (m_expressionTooDeep) + return ParserError(ParserError::OutOfMemory); + return ParserError(ParserError::ErrorNone); } -bool BytecodeGenerator::addGlobalVar(const Identifier& ident, bool isConstant, RegisterID*& r0) +bool BytecodeGenerator::addVar( + const Identifier& ident, ConstantMode constantMode, WatchMode watchMode, RegisterID*& r0) { - int index = m_nextGlobalIndex; - SymbolTableEntry newEntry(index, isConstant ? ReadOnly : 0); - pair result = symbolTable().add(ident.ustring().rep(), newEntry); + ASSERT(static_cast(m_codeBlock->m_numVars) == m_calleeRegisters.size()); + + ConcurrentJITLocker locker(symbolTable().m_lock); + int index = virtualRegisterForLocal(m_calleeRegisters.size()).offset(); + SymbolTableEntry newEntry(index, constantMode == IsConstant ? ReadOnly : 0); + SymbolTable::Map::AddResult result = symbolTable().add(locker, ident.impl(), newEntry); - if (!result.second) - index = result.first->second.getIndex(); - else { - --m_nextGlobalIndex; - m_globals.append(index + m_globalVarStorageOffset); + if (!result.isNewEntry) { + r0 = ®isterFor(result.iterator->value.getIndex()); + return false; } - - r0 = ®isterFor(index); - return result.second; + + if (watchMode == IsWatchable) { + while (m_watchableVariables.size() < static_cast(m_codeBlock->m_numVars)) + m_watchableVariables.append(Identifier()); + m_watchableVariables.append(ident); + } + + r0 = addVar(); + + ASSERT(watchMode == NotWatchable || static_cast(m_codeBlock->m_numVars) == m_watchableVariables.size()); + + return true; } -void BytecodeGenerator::allocateConstants(size_t count) +void BytecodeGenerator::preserveLastVar() { - m_codeBlock->m_numConstants = count; - if (!count) - return; - - m_nextConstantIndex = m_calleeRegisters.size(); - - for (size_t i = 0; i < count; ++i) - newRegister(); - m_lastConstant = &m_calleeRegisters.last(); + if ((m_firstConstantIndex = m_calleeRegisters.size()) != 0) + m_lastVar = &m_calleeRegisters.last(); } -BytecodeGenerator::BytecodeGenerator(ProgramNode* programNode, const Debugger* debugger, const ScopeChain& scopeChain, SymbolTable* symbolTable, ProgramCodeBlock* codeBlock) - : m_shouldEmitDebugHooks(!!debugger) - , m_shouldEmitProfileHooks(scopeChain.globalObject()->supportsProfiling()) - , m_scopeChain(&scopeChain) - , m_symbolTable(symbolTable) +BytecodeGenerator::BytecodeGenerator(VM& vm, ProgramNode* programNode, UnlinkedProgramCodeBlock* codeBlock, DebuggerMode debuggerMode, ProfilerMode profilerMode) + : m_shouldEmitDebugHooks(Options::forceDebuggerBytecodeGeneration() || debuggerMode == DebuggerOn) + , m_shouldEmitProfileHooks(Options::forceProfilerBytecodeGeneration() || profilerMode == ProfilerOn) + , m_symbolTable(0) , m_scopeNode(programNode) - , m_codeBlock(codeBlock) - , m_thisRegister(RegisterFile::ProgramCodeThisRegister) + , m_codeBlock(vm, codeBlock) + , m_thisRegister(CallFrame::thisArgumentOffset()) + , m_activationRegister(0) + , m_emptyValueRegister(0) + , m_globalObjectRegister(0) , m_finallyDepth(0) - , m_dynamicScopeDepth(0) - , m_baseScopeDepth(0) + , m_localScopeDepth(0) , m_codeType(GlobalCode) - , m_nextGlobalIndex(-1) - , m_globalData(&scopeChain.globalObject()->globalExec()->globalData()) + , m_nextConstantOffset(0) + , m_globalConstantIndex(0) + , m_firstLazyFunction(0) + , m_lastLazyFunction(0) + , m_staticPropertyAnalyzer(&m_instructions) + , m_vm(&vm) , m_lastOpcodeID(op_end) - , m_emitNodeDepth(0) - , m_regeneratingForExceptionInfo(false) - , m_codeBlockBeingRegeneratedFrom(0) +#ifndef NDEBUG + , m_lastOpcodePosition(0) +#endif + , m_usesExceptions(false) + , m_expressionTooDeep(false) + , m_isBuiltinFunction(false) { - if (m_shouldEmitDebugHooks) - m_codeBlock->setNeedsFullScopeChain(true); + m_codeBlock->setNumParameters(1); // Allocate space for "this" emitOpcode(op_enter); - codeBlock->setGlobalData(m_globalData); - - // FIXME: Move code that modifies the global object to Interpreter::execute. - - m_codeBlock->m_numParameters = 1; // Allocate space for "this" - - JSGlobalObject* globalObject = scopeChain.globalObject(); - ExecState* exec = globalObject->globalExec(); - RegisterFile* registerFile = &exec->globalData().interpreter->registerFile(); - - // Shift register indexes in generated code to elide registers allocated by intermediate stack frames. - m_globalVarStorageOffset = -RegisterFile::CallFrameHeaderSize - m_codeBlock->m_numParameters - registerFile->size(); - - // Add previously defined symbols to bookkeeping. - m_globals.grow(symbolTable->size()); - SymbolTable::iterator end = symbolTable->end(); - for (SymbolTable::iterator it = symbolTable->begin(); it != end; ++it) - registerFor(it->second.getIndex()).setIndex(it->second.getIndex() + m_globalVarStorageOffset); - - BatchedTransitionOptimizer optimizer(globalObject); const VarStack& varStack = programNode->varStack(); const FunctionStack& functionStack = programNode->functionStack(); - bool canOptimizeNewGlobals = symbolTable->size() + functionStack.size() + varStack.size() < registerFile->maxGlobals(); - if (canOptimizeNewGlobals) { - // Shift new symbols so they get stored prior to existing symbols. - m_nextGlobalIndex -= symbolTable->size(); - for (size_t i = 0; i < functionStack.size(); ++i) { - FuncDeclNode* funcDecl = functionStack[i].get(); - globalObject->removeDirect(funcDecl->m_ident); // Make sure our new function is not shadowed by an old property. - emitNewFunction(addGlobalVar(funcDecl->m_ident, false), funcDecl); - } - - Vector newVars; - for (size_t i = 0; i < varStack.size(); ++i) - if (!globalObject->hasProperty(exec, varStack[i].first)) - newVars.append(addGlobalVar(varStack[i].first, varStack[i].second & DeclarationStacks::IsConstant)); - - allocateConstants(programNode->neededConstants()); + for (size_t i = 0; i < functionStack.size(); ++i) { + FunctionBodyNode* function = functionStack[i]; + UnlinkedFunctionExecutable* unlinkedFunction = makeFunction(function); + codeBlock->addFunctionDeclaration(*m_vm, function->ident(), unlinkedFunction); + } - for (size_t i = 0; i < newVars.size(); ++i) - emitLoad(newVars[i], jsUndefined()); - } else { - for (size_t i = 0; i < functionStack.size(); ++i) { - FuncDeclNode* funcDecl = functionStack[i].get(); - globalObject->putWithAttributes(exec, funcDecl->m_ident, funcDecl->makeFunction(exec, scopeChain.node()), DontDelete); - } - for (size_t i = 0; i < varStack.size(); ++i) { - if (globalObject->hasProperty(exec, varStack[i].first)) - continue; - int attributes = DontDelete; - if (varStack[i].second & DeclarationStacks::IsConstant) - attributes |= ReadOnly; - globalObject->putWithAttributes(exec, varStack[i].first, jsUndefined(), attributes); - } + for (size_t i = 0; i < varStack.size(); ++i) + codeBlock->addVariableDeclaration(varStack[i].first, !!(varStack[i].second & DeclarationStacks::IsConstant)); - allocateConstants(programNode->neededConstants()); - } } -BytecodeGenerator::BytecodeGenerator(FunctionBodyNode* functionBody, const Debugger* debugger, const ScopeChain& scopeChain, SymbolTable* symbolTable, CodeBlock* codeBlock) - : m_shouldEmitDebugHooks(!!debugger) - , m_shouldEmitProfileHooks(scopeChain.globalObject()->supportsProfiling()) - , m_scopeChain(&scopeChain) - , m_symbolTable(symbolTable) +BytecodeGenerator::BytecodeGenerator(VM& vm, FunctionBodyNode* functionBody, UnlinkedFunctionCodeBlock* codeBlock, DebuggerMode debuggerMode, ProfilerMode profilerMode) + : m_shouldEmitDebugHooks(Options::forceDebuggerBytecodeGeneration() || debuggerMode == DebuggerOn) + , m_shouldEmitProfileHooks(Options::forceProfilerBytecodeGeneration() || profilerMode == ProfilerOn) + , m_symbolTable(codeBlock->symbolTable()) , m_scopeNode(functionBody) - , m_codeBlock(codeBlock) + , m_codeBlock(vm, codeBlock) + , m_activationRegister(0) + , m_emptyValueRegister(0) + , m_globalObjectRegister(0) , m_finallyDepth(0) - , m_dynamicScopeDepth(0) - , m_baseScopeDepth(0) + , m_localScopeDepth(0) , m_codeType(FunctionCode) - , m_globalData(&scopeChain.globalObject()->globalExec()->globalData()) + , m_nextConstantOffset(0) + , m_globalConstantIndex(0) + , m_firstLazyFunction(0) + , m_lastLazyFunction(0) + , m_staticPropertyAnalyzer(&m_instructions) + , m_vm(&vm) , m_lastOpcodeID(op_end) - , m_emitNodeDepth(0) - , m_regeneratingForExceptionInfo(false) - , m_codeBlockBeingRegeneratedFrom(0) -{ - if (m_shouldEmitDebugHooks) - m_codeBlock->setNeedsFullScopeChain(true); +#ifndef NDEBUG + , m_lastOpcodePosition(0) +#endif + , m_usesExceptions(false) + , m_expressionTooDeep(false) + , m_isBuiltinFunction(codeBlock->isBuiltinFunction()) +{ + if (m_isBuiltinFunction) + m_shouldEmitDebugHooks = false; + + m_symbolTable->setUsesNonStrictEval(codeBlock->usesEval() && !codeBlock->isStrictMode()); + Vector boundParameterProperties; + FunctionParameters& parameters = *functionBody->parameters(); + for (size_t i = 0; i < parameters.size(); i++) { + auto pattern = parameters.at(i); + if (pattern->isBindingNode()) + continue; + pattern->collectBoundIdentifiers(boundParameterProperties); + continue; + } + m_symbolTable->setParameterCountIncludingThis(functionBody->parameters()->size() + 1); + + emitOpcode(op_enter); + if (m_codeBlock->needsFullScopeChain() || m_shouldEmitDebugHooks) { + m_activationRegister = addVar(); + emitInitLazyRegister(m_activationRegister); + m_codeBlock->setActivationRegister(m_activationRegister->virtualRegister()); + } + + m_symbolTable->setCaptureStart(virtualRegisterForLocal(m_codeBlock->m_numVars).offset()); + + if (functionBody->usesArguments() || codeBlock->usesEval()) { // May reify arguments object. + RegisterID* unmodifiedArgumentsRegister = addVar(); // Anonymous, so it can't be modified by user code. + RegisterID* argumentsRegister = addVar(propertyNames().arguments, IsVariable, NotWatchable); // Can be changed by assigning to 'arguments'. - codeBlock->setGlobalData(m_globalData); + // We can save a little space by hard-coding the knowledge that the two + // 'arguments' values are stored in consecutive registers, and storing + // only the index of the assignable one. + codeBlock->setArgumentsRegister(argumentsRegister->virtualRegister()); + ASSERT_UNUSED(unmodifiedArgumentsRegister, unmodifiedArgumentsRegister->virtualRegister() == JSC::unmodifiedArgumentsRegister(codeBlock->argumentsRegister())); - bool usesArguments = functionBody->usesArguments(); - codeBlock->setUsesArguments(usesArguments); - if (usesArguments) { - m_argumentsRegister.setIndex(RegisterFile::OptionalCalleeArguments); - addVar(propertyNames().arguments, false); + emitInitLazyRegister(argumentsRegister); + emitInitLazyRegister(unmodifiedArgumentsRegister); + + if (shouldTearOffArgumentsEagerly()) { + emitOpcode(op_create_arguments); + instructions().append(argumentsRegister->index()); + } } - if (m_codeBlock->needsFullScopeChain()) { - ++m_codeBlock->m_numVars; - m_activationRegisterIndex = newRegister()->index(); - emitOpcode(op_enter_with_activation); - instructions().append(m_activationRegisterIndex); - } else - emitOpcode(op_enter); + bool shouldCaptureAllTheThings = m_shouldEmitDebugHooks || codeBlock->usesEval(); + + bool capturesAnyArgumentByName = false; + Vector capturedArguments; + if (functionBody->hasCapturedVariables() || shouldCaptureAllTheThings) { + FunctionParameters& parameters = *functionBody->parameters(); + capturedArguments.resize(parameters.size()); + for (size_t i = 0; i < parameters.size(); ++i) { + capturedArguments[i] = 0; + auto pattern = parameters.at(i); + if (!pattern->isBindingNode()) + continue; + const Identifier& ident = static_cast(pattern)->boundProperty(); + if (!functionBody->captures(ident) && !shouldCaptureAllTheThings) + continue; + capturesAnyArgumentByName = true; + capturedArguments[i] = addVar(); + } + } - if (usesArguments) - emitOpcode(op_create_arguments); + if (capturesAnyArgumentByName && !shouldTearOffArgumentsEagerly()) { + size_t parameterCount = m_symbolTable->parameterCount(); + auto slowArguments = std::make_unique(parameterCount); + for (size_t i = 0; i < parameterCount; ++i) { + if (!capturedArguments[i]) { + ASSERT(slowArguments[i].status == SlowArgument::Normal); + slowArguments[i].index = CallFrame::argumentOffset(i); + continue; + } + slowArguments[i].status = SlowArgument::Captured; + slowArguments[i].index = capturedArguments[i]->index(); + } + m_symbolTable->setSlowArguments(WTF::move(slowArguments)); + } + + RegisterID* calleeRegister = resolveCallee(functionBody); // May push to the scope chain and/or add a captured var. const DeclarationStacks::FunctionStack& functionStack = functionBody->functionStack(); + const DeclarationStacks::VarStack& varStack = functionBody->varStack(); + IdentifierSet test; + + // Captured variables and functions go first so that activations don't have + // to step over the non-captured locals to mark them. + if (functionBody->hasCapturedVariables()) { + for (size_t i = 0; i < boundParameterProperties.size(); i++) { + const Identifier& ident = boundParameterProperties[i]; + if (functionBody->captures(ident)) + addVar(ident, IsVariable, IsWatchable); + } + for (size_t i = 0; i < functionStack.size(); ++i) { + FunctionBodyNode* function = functionStack[i]; + const Identifier& ident = function->ident(); + if (functionBody->captures(ident)) { + m_functions.add(ident.impl()); + emitNewFunction(addVar(ident, IsVariable, IsWatchable), IsCaptured, function); + } + } + for (size_t i = 0; i < varStack.size(); ++i) { + const Identifier& ident = varStack[i].first; + if (functionBody->captures(ident)) + addVar(ident, (varStack[i].second & DeclarationStacks::IsConstant) ? IsConstant : IsVariable, IsWatchable); + } + } + + m_symbolTable->setCaptureEnd(virtualRegisterForLocal(codeBlock->m_numVars).offset()); + + bool canLazilyCreateFunctions = !functionBody->needsActivationForMoreThanVariables() && !m_shouldEmitDebugHooks; + m_firstLazyFunction = codeBlock->m_numVars; for (size_t i = 0; i < functionStack.size(); ++i) { - FuncDeclNode* funcDecl = functionStack[i].get(); - const Identifier& ident = funcDecl->m_ident; - m_functions.add(ident.ustring().rep()); - emitNewFunction(addVar(ident, false), funcDecl); + FunctionBodyNode* function = functionStack[i]; + const Identifier& ident = function->ident(); + if (!functionBody->captures(ident)) { + m_functions.add(ident.impl()); + RefPtr reg = addVar(ident, IsVariable, NotWatchable); + // Don't lazily create functions that override the name 'arguments' + // as this would complicate lazy instantiation of actual arguments. + if (!canLazilyCreateFunctions || ident == propertyNames().arguments) + emitNewFunction(reg.get(), NotCaptured, function); + else { + emitInitLazyRegister(reg.get()); + m_lazyFunctions.set(reg->virtualRegister().toLocal(), function); + } + } + } + m_lastLazyFunction = canLazilyCreateFunctions ? codeBlock->m_numVars : m_firstLazyFunction; + for (size_t i = 0; i < boundParameterProperties.size(); i++) { + const Identifier& ident = boundParameterProperties[i]; + if (!functionBody->captures(ident)) + addVar(ident, IsVariable, IsWatchable); + } + for (size_t i = 0; i < varStack.size(); ++i) { + const Identifier& ident = varStack[i].first; + if (!functionBody->captures(ident)) + addVar(ident, (varStack[i].second & DeclarationStacks::IsConstant) ? IsConstant : IsVariable, NotWatchable); } - const DeclarationStacks::VarStack& varStack = functionBody->varStack(); - for (size_t i = 0; i < varStack.size(); ++i) - addVar(varStack[i].first, varStack[i].second & DeclarationStacks::IsConstant); + if (shouldCaptureAllTheThings) + m_symbolTable->setCaptureEnd(virtualRegisterForLocal(codeBlock->m_numVars).offset()); - const Identifier* parameters = functionBody->parameters(); - size_t parameterCount = functionBody->parameterCount(); - m_nextParameterIndex = -RegisterFile::CallFrameHeaderSize - parameterCount - 1; - m_parameters.grow(1 + parameterCount); // reserve space for "this" + if (m_symbolTable->captureCount()) + emitOpcode(op_touch_entry); + + m_parameters.grow(parameters.size() + 1); // reserve space for "this" // Add "this" as a parameter - m_thisRegister.setIndex(m_nextParameterIndex); - ++m_nextParameterIndex; - ++m_codeBlock->m_numParameters; - - if (functionBody->usesThis() || m_shouldEmitDebugHooks) { - emitOpcode(op_convert_this); - instructions().append(m_thisRegister.index()); + int nextParameterIndex = CallFrame::thisArgumentOffset(); + m_thisRegister.setIndex(nextParameterIndex++); + m_codeBlock->addParameter(); + for (size_t i = 0; i < parameters.size(); ++i, ++nextParameterIndex) { + int index = nextParameterIndex; + auto pattern = parameters.at(i); + if (!pattern->isBindingNode()) { + m_codeBlock->addParameter(); + RegisterID& parameter = registerFor(index); + parameter.setIndex(index); + m_deconstructedParameters.append(std::make_pair(¶meter, pattern)); + continue; + } + auto simpleParameter = static_cast(pattern); + if (capturedArguments.size() && capturedArguments[i]) { + ASSERT((functionBody->hasCapturedVariables() && functionBody->captures(simpleParameter->boundProperty())) || shouldCaptureAllTheThings); + index = capturedArguments[i]->index(); + RegisterID original(nextParameterIndex); + emitMove(capturedArguments[i], &original); + } + addParameter(simpleParameter->boundProperty(), index); } - - for (size_t i = 0; i < parameterCount; ++i) - addParameter(parameters[i]); + preserveLastVar(); + + // We declare the callee's name last because it should lose to a var, function, and/or parameter declaration. + addCallee(functionBody, calleeRegister); - allocateConstants(functionBody->neededConstants()); + if (isConstructor()) { + emitCreateThis(&m_thisRegister); + } else if (functionBody->usesThis() || codeBlock->usesEval()) { + m_codeBlock->addPropertyAccessInstruction(instructions().size()); + emitOpcode(op_to_this); + instructions().append(kill(&m_thisRegister)); + instructions().append(0); + } } -BytecodeGenerator::BytecodeGenerator(EvalNode* evalNode, const Debugger* debugger, const ScopeChain& scopeChain, SymbolTable* symbolTable, EvalCodeBlock* codeBlock) - : m_shouldEmitDebugHooks(!!debugger) - , m_shouldEmitProfileHooks(scopeChain.globalObject()->supportsProfiling()) - , m_scopeChain(&scopeChain) - , m_symbolTable(symbolTable) +BytecodeGenerator::BytecodeGenerator(VM& vm, EvalNode* evalNode, UnlinkedEvalCodeBlock* codeBlock, DebuggerMode debuggerMode, ProfilerMode profilerMode) + : m_shouldEmitDebugHooks(Options::forceDebuggerBytecodeGeneration() || debuggerMode == DebuggerOn) + , m_shouldEmitProfileHooks(Options::forceProfilerBytecodeGeneration() || profilerMode == ProfilerOn) + , m_symbolTable(codeBlock->symbolTable()) , m_scopeNode(evalNode) - , m_codeBlock(codeBlock) - , m_thisRegister(RegisterFile::ProgramCodeThisRegister) + , m_codeBlock(vm, codeBlock) + , m_thisRegister(CallFrame::thisArgumentOffset()) + , m_activationRegister(0) + , m_emptyValueRegister(0) + , m_globalObjectRegister(0) , m_finallyDepth(0) - , m_dynamicScopeDepth(0) - , m_baseScopeDepth(codeBlock->baseScopeDepth()) + , m_localScopeDepth(0) , m_codeType(EvalCode) - , m_globalData(&scopeChain.globalObject()->globalExec()->globalData()) + , m_nextConstantOffset(0) + , m_globalConstantIndex(0) + , m_firstLazyFunction(0) + , m_lastLazyFunction(0) + , m_staticPropertyAnalyzer(&m_instructions) + , m_vm(&vm) , m_lastOpcodeID(op_end) - , m_emitNodeDepth(0) - , m_regeneratingForExceptionInfo(false) - , m_codeBlockBeingRegeneratedFrom(0) +#ifndef NDEBUG + , m_lastOpcodePosition(0) +#endif + , m_usesExceptions(false) + , m_expressionTooDeep(false) + , m_isBuiltinFunction(false) { - if (m_shouldEmitDebugHooks || m_baseScopeDepth) - m_codeBlock->setNeedsFullScopeChain(true); + m_symbolTable->setUsesNonStrictEval(codeBlock->usesEval() && !codeBlock->isStrictMode()); + m_codeBlock->setNumParameters(1); emitOpcode(op_enter); - codeBlock->setGlobalData(m_globalData); - m_codeBlock->m_numParameters = 1; // Allocate space for "this" - allocateConstants(evalNode->neededConstants()); + const DeclarationStacks::FunctionStack& functionStack = evalNode->functionStack(); + for (size_t i = 0; i < functionStack.size(); ++i) + m_codeBlock->addFunctionDecl(makeFunction(functionStack[i])); + + const DeclarationStacks::VarStack& varStack = evalNode->varStack(); + unsigned numVariables = varStack.size(); + Vector variables; + variables.reserveCapacity(numVariables); + for (size_t i = 0; i < numVariables; ++i) { + ASSERT(varStack[i].first.impl()->isAtomic()); + variables.append(varStack[i].first); + } + codeBlock->adoptVariables(variables); + preserveLastVar(); +} + +BytecodeGenerator::~BytecodeGenerator() +{ +} + +RegisterID* BytecodeGenerator::emitInitLazyRegister(RegisterID* reg) +{ + emitOpcode(op_init_lazy_reg); + instructions().append(reg->index()); + ASSERT(!hasWatchableVariable(reg->index())); + return reg; } -RegisterID* BytecodeGenerator::addParameter(const Identifier& ident) +RegisterID* BytecodeGenerator::resolveCallee(FunctionBodyNode* functionBodyNode) +{ + if (!functionNameIsInScope(functionBodyNode->ident(), functionBodyNode->functionMode())) + return 0; + + if (functionNameScopeIsDynamic(m_codeBlock->usesEval(), m_codeBlock->isStrictMode())) + return 0; + + m_calleeRegister.setIndex(JSStack::Callee); + if (functionBodyNode->captures(functionBodyNode->ident())) + return emitMove(addVar(), IsCaptured, &m_calleeRegister); + + return &m_calleeRegister; +} + +void BytecodeGenerator::addCallee(FunctionBodyNode* functionBodyNode, RegisterID* calleeRegister) +{ + if (!calleeRegister) + return; + + symbolTable().add(functionBodyNode->ident().impl(), SymbolTableEntry(calleeRegister->index(), ReadOnly)); +} + +void BytecodeGenerator::addParameter(const Identifier& ident, int parameterIndex) { // Parameters overwrite var declarations, but not function declarations. - RegisterID* result = 0; - UString::Rep* rep = ident.ustring().rep(); + StringImpl* rep = ident.impl(); if (!m_functions.contains(rep)) { - symbolTable().set(rep, m_nextParameterIndex); - RegisterID& parameter = registerFor(m_nextParameterIndex); - parameter.setIndex(m_nextParameterIndex); - result = ¶meter; + symbolTable().set(rep, parameterIndex); + RegisterID& parameter = registerFor(parameterIndex); + parameter.setIndex(parameterIndex); } // To maintain the calling convention, we have to allocate unique space for // each parameter, even if the parameter doesn't make it into the symbol table. - ++m_nextParameterIndex; - ++m_codeBlock->m_numParameters; - return result; + m_codeBlock->addParameter(); } -RegisterID* BytecodeGenerator::registerFor(const Identifier& ident) +bool BytecodeGenerator::willResolveToArguments(const Identifier& ident) { - if (ident == propertyNames().thisIdentifier) - return &m_thisRegister; - + if (ident != propertyNames().arguments) + return false; + if (!shouldOptimizeLocals()) - return 0; - - SymbolTableEntry entry = symbolTable().get(ident.ustring().rep()); + return false; + + SymbolTableEntry entry = symbolTable().get(ident.impl()); if (entry.isNull()) - return 0; + return false; - return ®isterFor(entry.getIndex()); + if (m_codeBlock->usesArguments() && m_codeType == FunctionCode) + return true; + + return false; } -RegisterID* BytecodeGenerator::constRegisterFor(const Identifier& ident) +RegisterID* BytecodeGenerator::uncheckedRegisterForArguments() { - if (m_codeType == EvalCode) - return 0; + ASSERT(willResolveToArguments(propertyNames().arguments)); - SymbolTableEntry entry = symbolTable().get(ident.ustring().rep()); + SymbolTableEntry entry = symbolTable().get(propertyNames().arguments.impl()); ASSERT(!entry.isNull()); - return ®isterFor(entry.getIndex()); } -bool BytecodeGenerator::isLocal(const Identifier& ident) +RegisterID* BytecodeGenerator::createLazyRegisterIfNecessary(RegisterID* reg) { - if (ident == propertyNames().thisIdentifier) - return true; - - return shouldOptimizeLocals() && symbolTable().contains(ident.ustring().rep()); -} + if (!reg->virtualRegister().isLocal()) + return reg; -bool BytecodeGenerator::isLocalConstant(const Identifier& ident) -{ - return symbolTable().get(ident.ustring().rep()).isReadOnly(); + int localVariableNumber = reg->virtualRegister().toLocal(); + + if (m_lastLazyFunction <= localVariableNumber || localVariableNumber < m_firstLazyFunction) + return reg; + emitLazyNewFunction(reg, m_lazyFunctions.get(localVariableNumber)); + return reg; } RegisterID* BytecodeGenerator::newRegister() { - m_calleeRegisters.append(m_calleeRegisters.size()); - m_codeBlock->m_numCalleeRegisters = max(m_codeBlock->m_numCalleeRegisters, m_calleeRegisters.size()); + m_calleeRegisters.append(virtualRegisterForLocal(m_calleeRegisters.size())); + int numCalleeRegisters = max(m_codeBlock->m_numCalleeRegisters, m_calleeRegisters.size()); + numCalleeRegisters = WTF::roundUpToMultipleOf(stackAlignmentRegisters(), numCalleeRegisters); + m_codeBlock->m_numCalleeRegisters = numCalleeRegisters; return &m_calleeRegisters.last(); } @@ -469,24 +571,16 @@ RegisterID* BytecodeGenerator::newTemporary() return result; } -RegisterID* BytecodeGenerator::highestUsedRegister() -{ - size_t count = m_codeBlock->m_numCalleeRegisters; - while (m_calleeRegisters.size() < count) - newRegister(); - return &m_calleeRegisters.last(); -} - -PassRefPtr BytecodeGenerator::newLabelScope(LabelScope::Type type, const Identifier* name) +LabelScopePtr BytecodeGenerator::newLabelScope(LabelScope::Type type, const Identifier* name) { // Reclaim free label scopes. while (m_labelScopes.size() && !m_labelScopes.last().refCount()) m_labelScopes.removeLast(); // Allocate new label scope. - LabelScope scope(type, name, scopeDepth(), newLabel(), type == LabelScope::Loop ? newLabel() : 0); // Only loops have continue targets. + LabelScope scope(type, name, scopeDepth(), newLabel(), type == LabelScope::Loop ? newLabel() : PassRefPtr