/*
- * Copyright (C) 2008, 2009 Apple Inc. All rights reserved.
+ * Copyright (C) 2008, 2009, 2012-2015 Apple Inc. All rights reserved.
* Copyright (C) 2008 Cameron Zwarich <cwzwarich@uwaterloo.ca>
+ * Copyright (C) 2012 Igalia, S.L.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
#include "config.h"
#include "BytecodeGenerator.h"
-#include "BatchedTransitionOptimizer.h"
-#include "JSFunction.h"
+#include "BuiltinExecutables.h"
#include "Interpreter.h"
-#include "UString.h"
+#include "JSFunction.h"
+#include "JSLexicalEnvironment.h"
+#include "JSNameScope.h"
+#include "JSTemplateRegistryKey.h"
+#include "LowLevelInterpreter.h"
+#include "JSCInlines.h"
+#include "Options.h"
+#include "StackAlignment.h"
+#include "StrongInlines.h"
+#include "UnlinkedCodeBlock.h"
+#include "UnlinkedInstructionStream.h"
+#include <wtf/StdLibExtras.h>
+#include <wtf/text/WTFString.h>
using namespace std;
namespace JSC {
-/*
- The layout of a register frame looks like this:
-
- For
+void Label::setLocation(unsigned location)
+{
+ m_location = location;
+
+ unsigned size = m_unresolvedJumps.size();
+ for (unsigned i = 0; i < size; ++i)
+ m_generator.instructions()[m_unresolvedJumps[i].second].u.operand = m_location - m_unresolvedJumps[i].first;
+}
- function f(x, y) {
- var v1;
- function g() { }
- var v2;
- return (x) * (y);
+ParserError BytecodeGenerator::generate()
+{
+ SamplingRegion samplingRegion("Bytecode Generation");
+
+ m_codeBlock->setThisRegister(m_thisRegister.virtualRegister());
+
+ // If we have declared a variable named "arguments" and we are using arguments then we should
+ // perform that assignment now.
+ if (m_needToInitializeArguments)
+ initializeVariable(variable(propertyNames().arguments), m_argumentsRegister);
+
+ for (size_t i = 0; i < m_destructuringParameters.size(); i++) {
+ auto& entry = m_destructuringParameters[i];
+ entry.second->bindValue(*this, entry.first.get());
}
- assuming (x) and (y) generated temporaries t1 and t2, you would have
-
- ------------------------------------
- | x | y | g | v2 | v1 | t1 | t2 | <-- value held
- ------------------------------------
- | -5 | -4 | -3 | -2 | -1 | +0 | +1 | <-- register index
- ------------------------------------
- | params->|<-locals | temps->
-
- Because temporary registers are allocated in a stack-like fashion, we
- can reclaim them with a simple popping algorithm. The same goes for labels.
- (We never reclaim parameter or local registers, because parameters and
- locals are DontDelete.)
-
- The register layout before a function call looks like this:
-
- For
-
- function f(x, y)
{
+ RefPtr<RegisterID> temp = newTemporary();
+ RefPtr<RegisterID> globalScope = scopeRegister(); // FIXME: With lexical scoping, this won't always be the global object: https://bugs.webkit.org/show_bug.cgi?id=142944
+ for (auto functionPair : m_functionsToInitialize) {
+ FunctionBodyNode* functionBody = functionPair.first;
+ FunctionVariableType functionType = functionPair.second;
+ emitNewFunction(temp.get(), functionBody);
+ if (functionType == NormalFunctionVariable)
+ initializeVariable(variable(functionBody->ident()) , temp.get());
+ else if (functionType == GlobalFunctionVariable)
+ emitPutToScope(globalScope.get(), Variable(functionBody->ident()), temp.get(), ThrowIfNotFound);
+ else
+ RELEASE_ASSERT_NOT_REACHED();
+ }
}
+
+ bool callingClassConstructor = constructorKind() != ConstructorKind::None && !isConstructor();
+ if (!callingClassConstructor)
+ m_scopeNode->emitBytecode(*this);
- f(1);
-
- > <------------------------------
- < > reserved: call frame | 1 | <-- value held
- > >snip< <------------------------------
- < > +0 | +1 | +2 | +3 | +4 | +5 | <-- register index
- > <------------------------------
- | params->|<-locals | temps->
-
- The call instruction fills in the "call frame" registers. It also pads
- missing arguments at the end of the call:
-
- > <-----------------------------------
- < > reserved: call frame | 1 | ? | <-- value held ("?" stands for "undefined")
- > >snip< <-----------------------------------
- < > +0 | +1 | +2 | +3 | +4 | +5 | +6 | <-- register index
- > <-----------------------------------
- | params->|<-locals | temps->
-
- After filling in missing arguments, the call instruction sets up the new
- stack frame to overlap the end of the old stack frame:
-
- |----------------------------------> <
- | reserved: call frame | 1 | ? < > <-- value held ("?" stands for "undefined")
- |----------------------------------> >snip< <
- | -7 | -6 | -5 | -4 | -3 | -2 | -1 < > <-- register index
- |----------------------------------> <
- | | params->|<-locals | temps->
+ m_staticPropertyAnalyzer.kill();
- That way, arguments are "copied" into the callee's stack frame for free.
+ for (unsigned i = 0; i < m_tryRanges.size(); ++i) {
+ TryRange& range = m_tryRanges[i];
+ int start = range.start->bind();
+ int end = range.end->bind();
+
+ // This will happen for empty try blocks and for some cases of finally blocks:
+ //
+ // try {
+ // try {
+ // } finally {
+ // return 42;
+ // // *HERE*
+ // }
+ // } finally {
+ // print("things");
+ // }
+ //
+ // The return will pop scopes to execute the outer finally block. But this includes
+ // popping the try context for the inner try. The try context is live in the fall-through
+ // part of the finally block not because we will emit a handler that overlaps the finally,
+ // but because we haven't yet had a chance to plant the catch target. Then when we finish
+ // emitting code for the outer finally block, we repush the try contex, this time with a
+ // new start index. But that means that the start index for the try range corresponding
+ // to the inner-finally-following-the-return (marked as "*HERE*" above) will be greater
+ // than the end index of the try block. This is harmless since end < start handlers will
+ // never get matched in our logic, but we do the runtime a favor and choose to not emit
+ // such handlers at all.
+ if (end <= start)
+ continue;
+
+ ASSERT(range.tryData->targetScopeDepth != UINT_MAX);
+ ASSERT(range.tryData->handlerType != HandlerType::Illegal);
+ UnlinkedHandlerInfo info(static_cast<uint32_t>(start), static_cast<uint32_t>(end),
+ static_cast<uint32_t>(range.tryData->target->bind()), range.tryData->targetScopeDepth,
+ range.tryData->handlerType);
+ m_codeBlock->addExceptionHandler(info);
+ }
+
+ m_codeBlock->setInstructions(std::make_unique<UnlinkedInstructionStream>(m_instructions));
- If the caller supplies too many arguments, this trick doesn't work. The
- extra arguments protrude into space reserved for locals and temporaries.
- In that case, the call instruction makes a real copy of the call frame header,
- along with just the arguments expected by the callee, leaving the original
- call frame header and arguments behind. (The call instruction can't just discard
- extra arguments, because the "arguments" object may access them later.)
- This copying strategy ensures that all named values will be at the indices
- expected by the callee.
-*/
+ m_codeBlock->shrinkToFit();
-#ifndef NDEBUG
-static bool s_dumpsGeneratedCode = false;
-#endif
+ if (m_codeBlock->symbolTable() && !m_codeBlock->vm()->typeProfiler())
+ m_codeBlock->setSymbolTable(m_codeBlock->symbolTable()->cloneScopePart(*m_codeBlock->vm()));
-void BytecodeGenerator::setDumpsGeneratedCode(bool dumpsGeneratedCode)
-{
-#ifndef NDEBUG
- s_dumpsGeneratedCode = dumpsGeneratedCode;
-#else
- UNUSED_PARAM(dumpsGeneratedCode);
-#endif
+ if (m_expressionTooDeep)
+ return ParserError(ParserError::OutOfMemory);
+ return ParserError(ParserError::ErrorNone);
}
-bool BytecodeGenerator::dumpsGeneratedCode()
+BytecodeGenerator::BytecodeGenerator(VM& vm, ProgramNode* programNode, UnlinkedProgramCodeBlock* codeBlock, DebuggerMode debuggerMode, ProfilerMode profilerMode)
+ : m_shouldEmitDebugHooks(Options::forceDebuggerBytecodeGeneration() || debuggerMode == DebuggerOn)
+ , m_shouldEmitProfileHooks(Options::forceProfilerBytecodeGeneration() || profilerMode == ProfilerOn)
+ , m_scopeNode(programNode)
+ , m_codeBlock(vm, codeBlock)
+ , m_thisRegister(CallFrame::thisArgumentOffset())
+ , m_codeType(GlobalCode)
+ , m_vm(&vm)
{
-#ifndef NDEBUG
- return s_dumpsGeneratedCode;
-#else
- return false;
-#endif
-}
+ for (auto& constantRegister : m_linkTimeConstantRegisters)
+ constantRegister = nullptr;
-void BytecodeGenerator::generate()
-{
- m_codeBlock->setThisRegister(m_thisRegister.index());
+ m_codeBlock->setNumParameters(1); // Allocate space for "this"
- m_scopeNode->emitBytecode(*this);
+ emitOpcode(op_enter);
-#ifndef NDEBUG
- m_codeBlock->setInstructionCount(m_codeBlock->instructions().size());
+ allocateAndEmitScope();
- if (s_dumpsGeneratedCode)
- m_codeBlock->dump(m_scopeChain->globalObject()->globalExec());
-#endif
+ const VarStack& varStack = programNode->varStack();
+ const FunctionStack& functionStack = programNode->functionStack();
- if ((m_codeType == FunctionCode && !m_codeBlock->needsFullScopeChain() && !m_codeBlock->usesArguments()) || m_codeType == EvalCode)
- symbolTable().clear();
-
- m_codeBlock->setIsNumericCompareFunction(instructions() == m_globalData->numericCompareFunction(m_scopeChain->globalObject()->globalExec()));
+ for (size_t i = 0; i < functionStack.size(); ++i) {
+ FunctionBodyNode* function = functionStack[i];
+ m_functionsToInitialize.append(std::make_pair(function, GlobalFunctionVariable));
+ }
-#if !ENABLE(OPCODE_SAMPLING)
- if (!m_regeneratingForExceptionInfo && (m_codeType == FunctionCode || m_codeType == EvalCode))
- m_codeBlock->clearExceptionInfo();
-#endif
+ for (size_t i = 0; i < varStack.size(); ++i)
+ codeBlock->addVariableDeclaration(varStack[i].first, !!(varStack[i].second & DeclarationStacks::IsConstant));
- m_codeBlock->shrinkToFit();
}
-bool BytecodeGenerator::addVar(const Identifier& ident, bool isConstant, RegisterID*& r0)
+BytecodeGenerator::BytecodeGenerator(VM& vm, FunctionNode* functionNode, UnlinkedFunctionCodeBlock* codeBlock, DebuggerMode debuggerMode, ProfilerMode profilerMode)
+ : m_shouldEmitDebugHooks(Options::forceDebuggerBytecodeGeneration() || debuggerMode == DebuggerOn)
+ , m_shouldEmitProfileHooks(Options::forceProfilerBytecodeGeneration() || profilerMode == ProfilerOn)
+ , m_symbolTable(codeBlock->symbolTable())
+ , m_scopeNode(functionNode)
+ , m_codeBlock(vm, codeBlock)
+ , m_codeType(FunctionCode)
+ , m_vm(&vm)
+ , m_isBuiltinFunction(codeBlock->isBuiltinFunction())
{
- int index = m_calleeRegisters.size();
- SymbolTableEntry newEntry(index, isConstant ? ReadOnly : 0);
- pair<SymbolTable::iterator, bool> result = symbolTable().add(ident.ustring().rep(), newEntry);
+ for (auto& constantRegister : m_linkTimeConstantRegisters)
+ constantRegister = nullptr;
- if (!result.second) {
- r0 = ®isterFor(result.first->second.getIndex());
- return false;
+ if (m_isBuiltinFunction)
+ m_shouldEmitDebugHooks = false;
+
+ m_symbolTable->setUsesNonStrictEval(codeBlock->usesEval() && !codeBlock->isStrictMode());
+ Vector<Identifier> boundParameterProperties;
+ FunctionParameters& parameters = *functionNode->parameters();
+ for (size_t i = 0; i < parameters.size(); i++) {
+ auto pattern = parameters.at(i);
+ if (pattern->isBindingNode())
+ continue;
+ pattern->collectBoundIdentifiers(boundParameterProperties);
+ continue;
}
- ++m_codeBlock->m_numVars;
- r0 = newRegister();
- return true;
-}
+ bool shouldCaptureSomeOfTheThings = m_shouldEmitDebugHooks || m_codeBlock->needsFullScopeChain();
+ bool shouldCaptureAllOfTheThings = m_shouldEmitDebugHooks || codeBlock->usesEval();
+ bool needsArguments = functionNode->usesArguments() || codeBlock->usesEval();
+
+ auto captures = [&] (UniquedStringImpl* uid) -> bool {
+ if (shouldCaptureAllOfTheThings)
+ return true;
+ if (!shouldCaptureSomeOfTheThings)
+ return false;
+ if (needsArguments && uid == propertyNames().arguments.impl()) {
+ // Actually, we only need to capture the arguments object when we "need full activation"
+ // because of name scopes. But historically we did it this way, so for now we just preserve
+ // the old behavior.
+ // FIXME: https://bugs.webkit.org/show_bug.cgi?id=143072
+ return true;
+ }
+ return functionNode->captures(uid);
+ };
+ auto varKind = [&] (UniquedStringImpl* uid) -> VarKind {
+ return captures(uid) ? VarKind::Scope : VarKind::Stack;
+ };
-bool BytecodeGenerator::addGlobalVar(const Identifier& ident, bool isConstant, RegisterID*& r0)
-{
- int index = m_nextGlobalIndex;
- SymbolTableEntry newEntry(index, isConstant ? ReadOnly : 0);
- pair<SymbolTable::iterator, bool> result = symbolTable().add(ident.ustring().rep(), newEntry);
+ emitOpcode(op_enter);
- if (!result.second)
- index = result.first->second.getIndex();
- else {
- --m_nextGlobalIndex;
- m_globals.append(index + m_globalVarStorageOffset);
+ allocateAndEmitScope();
+
+ m_calleeRegister.setIndex(JSStack::Callee);
+
+ if (functionNameIsInScope(functionNode->ident(), functionNode->functionMode())
+ && functionNameScopeIsDynamic(codeBlock->usesEval(), codeBlock->isStrictMode())) {
+ // When we do this, we should make our local scope stack know about the function name symbol
+ // table. Currently this works because bytecode linking creates a phony name scope.
+ // FIXME: https://bugs.webkit.org/show_bug.cgi?id=141885
+ // Also, we could create the scope once per JSFunction instance that needs it. That wouldn't
+ // be any more correct, but it would be more performant.
+ // FIXME: https://bugs.webkit.org/show_bug.cgi?id=141887
+ emitPushFunctionNameScope(m_scopeRegister, functionNode->ident(), &m_calleeRegister, ReadOnly | DontDelete);
}
- r0 = ®isterFor(index);
- return result.second;
-}
-
-void BytecodeGenerator::allocateConstants(size_t count)
-{
- m_codeBlock->m_numConstants = count;
- if (!count)
- return;
+ if (shouldCaptureSomeOfTheThings) {
+ m_lexicalEnvironmentRegister = addVar();
+ m_codeBlock->setActivationRegister(m_lexicalEnvironmentRegister->virtualRegister());
+ emitOpcode(op_create_lexical_environment);
+ instructions().append(m_lexicalEnvironmentRegister->index());
+ instructions().append(scopeRegister()->index());
+ emitOpcode(op_mov);
+ instructions().append(scopeRegister()->index());
+ instructions().append(m_lexicalEnvironmentRegister->index());
+ }
- m_nextConstantIndex = m_calleeRegisters.size();
-
- for (size_t i = 0; i < count; ++i)
- newRegister();
- m_lastConstant = &m_calleeRegisters.last();
-}
-
-BytecodeGenerator::BytecodeGenerator(ProgramNode* programNode, const Debugger* debugger, const ScopeChain& scopeChain, SymbolTable* symbolTable, ProgramCodeBlock* codeBlock)
- : m_shouldEmitDebugHooks(!!debugger)
- , m_shouldEmitProfileHooks(scopeChain.globalObject()->supportsProfiling())
- , m_scopeChain(&scopeChain)
- , m_symbolTable(symbolTable)
- , m_scopeNode(programNode)
- , m_codeBlock(codeBlock)
- , m_thisRegister(RegisterFile::ProgramCodeThisRegister)
- , m_finallyDepth(0)
- , m_dynamicScopeDepth(0)
- , m_baseScopeDepth(0)
- , m_codeType(GlobalCode)
- , m_nextGlobalIndex(-1)
- , m_globalData(&scopeChain.globalObject()->globalExec()->globalData())
- , m_lastOpcodeID(op_end)
- , m_emitNodeDepth(0)
- , m_regeneratingForExceptionInfo(false)
- , m_codeBlockBeingRegeneratedFrom(0)
-{
- if (m_shouldEmitDebugHooks)
- m_codeBlock->setNeedsFullScopeChain(true);
-
- emitOpcode(op_enter);
- codeBlock->setGlobalData(m_globalData);
-
- // FIXME: Move code that modifies the global object to Interpreter::execute.
+ // Make sure the code block knows about all of our parameters, and make sure that parameters
+ // needing destructuring are noted.
+ m_parameters.grow(parameters.size() + 1); // reserve space for "this"
+ m_thisRegister.setIndex(initializeNextParameter()->index()); // this
+ for (unsigned i = 0; i < parameters.size(); ++i) {
+ auto pattern = parameters.at(i);
+ RegisterID* reg = initializeNextParameter();
+ if (!pattern->isBindingNode())
+ m_destructuringParameters.append(std::make_pair(reg, pattern));
+ }
- m_codeBlock->m_numParameters = 1; // Allocate space for "this"
+ // Figure out some interesting facts about our arguments.
+ bool capturesAnyArgumentByName = false;
+ if (functionNode->hasCapturedVariables()) {
+ FunctionParameters& parameters = *functionNode->parameters();
+ for (size_t i = 0; i < parameters.size(); ++i) {
+ auto pattern = parameters.at(i);
+ if (!pattern->isBindingNode())
+ continue;
+ const Identifier& ident = static_cast<const BindingNode*>(pattern)->boundProperty();
+ capturesAnyArgumentByName |= captures(ident.impl());
+ }
+ }
- JSGlobalObject* globalObject = scopeChain.globalObject();
- ExecState* exec = globalObject->globalExec();
- RegisterFile* registerFile = &exec->globalData().interpreter->registerFile();
+ if (capturesAnyArgumentByName)
+ ASSERT(m_lexicalEnvironmentRegister);
- // Shift register indexes in generated code to elide registers allocated by intermediate stack frames.
- m_globalVarStorageOffset = -RegisterFile::CallFrameHeaderSize - m_codeBlock->m_numParameters - registerFile->size();
-
- // Add previously defined symbols to bookkeeping.
- m_globals.grow(symbolTable->size());
- SymbolTable::iterator end = symbolTable->end();
- for (SymbolTable::iterator it = symbolTable->begin(); it != end; ++it)
- registerFor(it->second.getIndex()).setIndex(it->second.getIndex() + m_globalVarStorageOffset);
+ // Need to know what our functions are called. Parameters have some goofy behaviors when it
+ // comes to functions of the same name.
+ for (FunctionBodyNode* function : functionNode->functionStack())
+ m_functions.add(function->ident().impl());
+
+ if (needsArguments) {
+ // Create the arguments object now. We may put the arguments object into the activation if
+ // it is captured. Either way, we create two arguments object variables: one is our
+ // private variable that is immutable, and another that is the user-visible variable. The
+ // immutable one is only used here, or during formal parameter resolutions if we opt for
+ // DirectArguments.
- BatchedTransitionOptimizer optimizer(globalObject);
-
- const VarStack& varStack = programNode->varStack();
- const FunctionStack& functionStack = programNode->functionStack();
- bool canOptimizeNewGlobals = symbolTable->size() + functionStack.size() + varStack.size() < registerFile->maxGlobals();
- if (canOptimizeNewGlobals) {
- // Shift new symbols so they get stored prior to existing symbols.
- m_nextGlobalIndex -= symbolTable->size();
-
- for (size_t i = 0; i < functionStack.size(); ++i) {
- FuncDeclNode* funcDecl = functionStack[i].get();
- globalObject->removeDirect(funcDecl->m_ident); // Make sure our new function is not shadowed by an old property.
- emitNewFunction(addGlobalVar(funcDecl->m_ident, false), funcDecl);
+ m_argumentsRegister = addVar();
+ m_argumentsRegister->ref();
+ }
+
+ if (needsArguments && !codeBlock->isStrictMode()) {
+ // If we captured any formal parameter by name, then we use ScopedArguments. Otherwise we
+ // use DirectArguments. With ScopedArguments, we lift all of our arguments into the
+ // activation.
+
+ if (capturesAnyArgumentByName) {
+ m_symbolTable->setArgumentsLength(vm, parameters.size());
+
+ // For each parameter, we have two possibilities:
+ // Either it's a binding node with no function overlap, in which case it gets a name
+ // in the symbol table - or it just gets space reserved in the symbol table. Either
+ // way we lift the value into the scope.
+ for (unsigned i = 0; i < parameters.size(); ++i) {
+ ScopeOffset offset = m_symbolTable->takeNextScopeOffset();
+ m_symbolTable->setArgumentOffset(vm, i, offset);
+ if (UniquedStringImpl* name = visibleNameForParameter(parameters.at(i))) {
+ VarOffset varOffset(offset);
+ SymbolTableEntry entry(varOffset);
+ // Stores to these variables via the ScopedArguments object will not do
+ // notifyWrite(), since that would be cumbersome. Also, watching formal
+ // parameters when "arguments" is in play is unlikely to be super profitable.
+ // So, we just disable it.
+ entry.disableWatching();
+ m_symbolTable->set(name, entry);
+ }
+ emitOpcode(op_put_to_scope);
+ instructions().append(m_lexicalEnvironmentRegister->index());
+ instructions().append(UINT_MAX);
+ instructions().append(virtualRegisterForArgument(1 + i).offset());
+ instructions().append(ResolveModeAndType(ThrowIfNotFound, LocalClosureVar).operand());
+ instructions().append(0);
+ instructions().append(offset.offset());
+ }
+
+ // This creates a scoped arguments object and copies the overflow arguments into the
+ // scope. It's the equivalent of calling ScopedArguments::createByCopying().
+ emitOpcode(op_create_scoped_arguments);
+ instructions().append(m_argumentsRegister->index());
+ instructions().append(m_lexicalEnvironmentRegister->index());
+ } else {
+ // We're going to put all parameters into the DirectArguments object. First ensure
+ // that the symbol table knows that this is happening.
+ for (unsigned i = 0; i < parameters.size(); ++i) {
+ if (UniquedStringImpl* name = visibleNameForParameter(parameters.at(i)))
+ m_symbolTable->set(name, SymbolTableEntry(VarOffset(DirectArgumentsOffset(i))));
+ }
+
+ emitOpcode(op_create_direct_arguments);
+ instructions().append(m_argumentsRegister->index());
}
-
- Vector<RegisterID*, 32> newVars;
- for (size_t i = 0; i < varStack.size(); ++i)
- if (!globalObject->hasProperty(exec, varStack[i].first))
- newVars.append(addGlobalVar(varStack[i].first, varStack[i].second & DeclarationStacks::IsConstant));
-
- allocateConstants(programNode->neededConstants());
-
- for (size_t i = 0; i < newVars.size(); ++i)
- emitLoad(newVars[i], jsUndefined());
} else {
- for (size_t i = 0; i < functionStack.size(); ++i) {
- FuncDeclNode* funcDecl = functionStack[i].get();
- globalObject->putWithAttributes(exec, funcDecl->m_ident, funcDecl->makeFunction(exec, scopeChain.node()), DontDelete);
- }
- for (size_t i = 0; i < varStack.size(); ++i) {
- if (globalObject->hasProperty(exec, varStack[i].first))
+ // Create the formal parameters the normal way. Any of them could be captured, or not. If
+ // captured, lift them into the scope.
+ for (unsigned i = 0; i < parameters.size(); ++i) {
+ UniquedStringImpl* name = visibleNameForParameter(parameters.at(i));
+ if (!name)
+ continue;
+
+ if (!captures(name)) {
+ // This is the easy case - just tell the symbol table about the argument. It will
+ // be accessed directly.
+ m_symbolTable->set(name, SymbolTableEntry(VarOffset(virtualRegisterForArgument(1 + i))));
continue;
- int attributes = DontDelete;
- if (varStack[i].second & DeclarationStacks::IsConstant)
- attributes |= ReadOnly;
- globalObject->putWithAttributes(exec, varStack[i].first, jsUndefined(), attributes);
+ }
+
+ ScopeOffset offset = m_symbolTable->takeNextScopeOffset();
+ const Identifier& ident =
+ static_cast<const BindingNode*>(parameters.at(i))->boundProperty();
+ m_symbolTable->set(name, SymbolTableEntry(VarOffset(offset)));
+
+ emitOpcode(op_put_to_scope);
+ instructions().append(m_lexicalEnvironmentRegister->index());
+ instructions().append(addConstant(ident));
+ instructions().append(virtualRegisterForArgument(1 + i).offset());
+ instructions().append(ResolveModeAndType(ThrowIfNotFound, LocalClosureVar).operand());
+ instructions().append(0);
+ instructions().append(offset.offset());
}
-
- allocateConstants(programNode->neededConstants());
}
-}
-
-BytecodeGenerator::BytecodeGenerator(FunctionBodyNode* functionBody, const Debugger* debugger, const ScopeChain& scopeChain, SymbolTable* symbolTable, CodeBlock* codeBlock)
- : m_shouldEmitDebugHooks(!!debugger)
- , m_shouldEmitProfileHooks(scopeChain.globalObject()->supportsProfiling())
- , m_scopeChain(&scopeChain)
- , m_symbolTable(symbolTable)
- , m_scopeNode(functionBody)
- , m_codeBlock(codeBlock)
- , m_finallyDepth(0)
- , m_dynamicScopeDepth(0)
- , m_baseScopeDepth(0)
- , m_codeType(FunctionCode)
- , m_globalData(&scopeChain.globalObject()->globalExec()->globalData())
- , m_lastOpcodeID(op_end)
- , m_emitNodeDepth(0)
- , m_regeneratingForExceptionInfo(false)
- , m_codeBlockBeingRegeneratedFrom(0)
-{
- if (m_shouldEmitDebugHooks)
- m_codeBlock->setNeedsFullScopeChain(true);
-
- codeBlock->setGlobalData(m_globalData);
-
- bool usesArguments = functionBody->usesArguments();
- codeBlock->setUsesArguments(usesArguments);
- if (usesArguments) {
- m_argumentsRegister.setIndex(RegisterFile::OptionalCalleeArguments);
- addVar(propertyNames().arguments, false);
+
+ if (needsArguments && codeBlock->isStrictMode()) {
+ // Allocate an out-of-bands arguments object.
+ emitOpcode(op_create_out_of_band_arguments);
+ instructions().append(m_argumentsRegister->index());
}
-
- if (m_codeBlock->needsFullScopeChain()) {
- ++m_codeBlock->m_numVars;
- m_activationRegisterIndex = newRegister()->index();
- emitOpcode(op_enter_with_activation);
- instructions().append(m_activationRegisterIndex);
- } else
- emitOpcode(op_enter);
-
- if (usesArguments)
- emitOpcode(op_create_arguments);
-
- const DeclarationStacks::FunctionStack& functionStack = functionBody->functionStack();
- for (size_t i = 0; i < functionStack.size(); ++i) {
- FuncDeclNode* funcDecl = functionStack[i].get();
- const Identifier& ident = funcDecl->m_ident;
- m_functions.add(ident.ustring().rep());
- emitNewFunction(addVar(ident, false), funcDecl);
+
+ // Now declare all variables.
+ for (const Identifier& ident : boundParameterProperties)
+ createVariable(ident, varKind(ident.impl()), IsVariable);
+ for (FunctionBodyNode* function : functionNode->functionStack()) {
+ const Identifier& ident = function->ident();
+ createVariable(ident, varKind(ident.impl()), IsVariable);
+ m_functionsToInitialize.append(std::make_pair(function, NormalFunctionVariable));
}
-
- const DeclarationStacks::VarStack& varStack = functionBody->varStack();
- for (size_t i = 0; i < varStack.size(); ++i)
- addVar(varStack[i].first, varStack[i].second & DeclarationStacks::IsConstant);
-
- const Identifier* parameters = functionBody->parameters();
- size_t parameterCount = functionBody->parameterCount();
- m_nextParameterIndex = -RegisterFile::CallFrameHeaderSize - parameterCount - 1;
- m_parameters.grow(1 + parameterCount); // reserve space for "this"
-
- // Add "this" as a parameter
- m_thisRegister.setIndex(m_nextParameterIndex);
- ++m_nextParameterIndex;
- ++m_codeBlock->m_numParameters;
-
- if (functionBody->usesThis() || m_shouldEmitDebugHooks) {
- emitOpcode(op_convert_this);
- instructions().append(m_thisRegister.index());
+ for (auto& entry : functionNode->varStack()) {
+ ConstantMode constantMode = modeForIsConstant(entry.second & DeclarationStacks::IsConstant);
+ // Variables named "arguments" are never const.
+ if (entry.first == propertyNames().arguments)
+ constantMode = IsVariable;
+ createVariable(entry.first, varKind(entry.first.impl()), constantMode, IgnoreExisting);
}
- for (size_t i = 0; i < parameterCount; ++i)
- addParameter(parameters[i]);
-
- allocateConstants(functionBody->neededConstants());
+ // There are some variables that need to be preinitialized to something other than Undefined:
+ //
+ // - "arguments": unless it's used as a function or parameter, this should refer to the
+ // arguments object.
+ //
+ // - callee: unless it's used as a var, function, or parameter, this should refer to the
+ // callee (i.e. our function).
+ //
+ // - functions: these always override everything else.
+ //
+ // The most logical way to do all of this is to initialize none of the variables until now,
+ // and then initialize them in BytecodeGenerator::generate() in such an order that the rules
+ // for how these things override each other end up holding. We would initialize the callee
+ // first, then "arguments", then all arguments, then the functions.
+ //
+ // But some arguments are already initialized by default, since if they aren't captured and we
+ // don't have "arguments" then we just point the symbol table at the stack slot of those
+ // arguments. We end up initializing the rest of the arguments that have an uncomplicated
+ // binding (i.e. don't involve destructuring) above when figuring out how to lay them out,
+ // because that's just the simplest thing. This means that when we initialize them, we have to
+ // watch out for the things that override arguments (namely, functions).
+ //
+ // We also initialize callee here as well, just because it's so weird. We know whether we want
+ // to do this because we can just check if it's in the symbol table.
+ if (functionNameIsInScope(functionNode->ident(), functionNode->functionMode())
+ && !functionNameScopeIsDynamic(codeBlock->usesEval(), codeBlock->isStrictMode())
+ && m_symbolTable->get(functionNode->ident().impl()).isNull()) {
+ if (captures(functionNode->ident().impl())) {
+ ScopeOffset offset;
+ {
+ ConcurrentJITLocker locker(m_symbolTable->m_lock);
+ offset = m_symbolTable->takeNextScopeOffset(locker);
+ m_symbolTable->add(
+ locker, functionNode->ident().impl(),
+ SymbolTableEntry(VarOffset(offset), ReadOnly));
+ }
+
+ emitOpcode(op_put_to_scope);
+ instructions().append(m_lexicalEnvironmentRegister->index());
+ instructions().append(addConstant(functionNode->ident()));
+ instructions().append(m_calleeRegister.index());
+ instructions().append(ResolveModeAndType(ThrowIfNotFound, LocalClosureVar).operand());
+ instructions().append(0);
+ instructions().append(offset.offset());
+ } else {
+ m_symbolTable->add(
+ functionNode->ident().impl(),
+ SymbolTableEntry(VarOffset(m_calleeRegister.virtualRegister()), ReadOnly));
+ }
+ }
+
+ // This is our final act of weirdness. "arguments" is overridden by everything except the
+ // callee. We add it to the symbol table if it's not already there and it's not an argument.
+ if (needsArguments) {
+ // If "arguments" is overridden by a function or destructuring parameter name, then it's
+ // OK for us to call createVariable() because it won't change anything. It's also OK for
+ // us to them tell BytecodeGenerator::generate() to write to it because it will do so
+ // before it initializes functions and destructuring parameters. But if "arguments" is
+ // overridden by a "simple" function parameter, then we have to bail: createVariable()
+ // would assert and BytecodeGenerator::generate() would write the "arguments" after the
+ // argument value had already been properly initialized.
+
+ bool haveParameterNamedArguments = false;
+ for (unsigned i = 0; i < parameters.size(); ++i) {
+ UniquedStringImpl* name = visibleNameForParameter(parameters.at(i));
+ if (name == propertyNames().arguments.impl()) {
+ haveParameterNamedArguments = true;
+ break;
+ }
+ }
+
+ if (!haveParameterNamedArguments) {
+ createVariable(
+ propertyNames().arguments, varKind(propertyNames().arguments.impl()), IsVariable);
+ m_needToInitializeArguments = true;
+ }
+ }
+
+ if (isConstructor()) {
+ if (constructorKind() == ConstructorKind::Derived) {
+ m_newTargetRegister = addVar();
+ emitMove(m_newTargetRegister, &m_thisRegister);
+ emitMoveEmptyValue(&m_thisRegister);
+ } else
+ emitCreateThis(&m_thisRegister);
+ } else if (constructorKind() != ConstructorKind::None) {
+ emitThrowTypeError("Cannot call a class constructor");
+ } else if (functionNode->usesThis() || codeBlock->usesEval()) {
+ m_codeBlock->addPropertyAccessInstruction(instructions().size());
+ emitOpcode(op_to_this);
+ instructions().append(kill(&m_thisRegister));
+ instructions().append(0);
+ instructions().append(0);
+ }
}
-BytecodeGenerator::BytecodeGenerator(EvalNode* evalNode, const Debugger* debugger, const ScopeChain& scopeChain, SymbolTable* symbolTable, EvalCodeBlock* codeBlock)
- : m_shouldEmitDebugHooks(!!debugger)
- , m_shouldEmitProfileHooks(scopeChain.globalObject()->supportsProfiling())
- , m_scopeChain(&scopeChain)
- , m_symbolTable(symbolTable)
+BytecodeGenerator::BytecodeGenerator(VM& vm, EvalNode* evalNode, UnlinkedEvalCodeBlock* codeBlock, DebuggerMode debuggerMode, ProfilerMode profilerMode)
+ : m_shouldEmitDebugHooks(Options::forceDebuggerBytecodeGeneration() || debuggerMode == DebuggerOn)
+ , m_shouldEmitProfileHooks(Options::forceProfilerBytecodeGeneration() || profilerMode == ProfilerOn)
+ , m_symbolTable(codeBlock->symbolTable())
, m_scopeNode(evalNode)
- , m_codeBlock(codeBlock)
- , m_thisRegister(RegisterFile::ProgramCodeThisRegister)
- , m_finallyDepth(0)
- , m_dynamicScopeDepth(0)
- , m_baseScopeDepth(codeBlock->baseScopeDepth())
+ , m_codeBlock(vm, codeBlock)
+ , m_thisRegister(CallFrame::thisArgumentOffset())
, m_codeType(EvalCode)
- , m_globalData(&scopeChain.globalObject()->globalExec()->globalData())
- , m_lastOpcodeID(op_end)
- , m_emitNodeDepth(0)
- , m_regeneratingForExceptionInfo(false)
- , m_codeBlockBeingRegeneratedFrom(0)
-{
- if (m_shouldEmitDebugHooks || m_baseScopeDepth)
- m_codeBlock->setNeedsFullScopeChain(true);
-
- emitOpcode(op_enter);
- codeBlock->setGlobalData(m_globalData);
- m_codeBlock->m_numParameters = 1; // Allocate space for "this"
-
- allocateConstants(evalNode->neededConstants());
-}
-
-RegisterID* BytecodeGenerator::addParameter(const Identifier& ident)
+ , m_vm(&vm)
{
- // Parameters overwrite var declarations, but not function declarations.
- RegisterID* result = 0;
- UString::Rep* rep = ident.ustring().rep();
- if (!m_functions.contains(rep)) {
- symbolTable().set(rep, m_nextParameterIndex);
- RegisterID& parameter = registerFor(m_nextParameterIndex);
- parameter.setIndex(m_nextParameterIndex);
- result = ¶meter;
- }
+ for (auto& constantRegister : m_linkTimeConstantRegisters)
+ constantRegister = nullptr;
- // To maintain the calling convention, we have to allocate unique space for
- // each parameter, even if the parameter doesn't make it into the symbol table.
- ++m_nextParameterIndex;
- ++m_codeBlock->m_numParameters;
- return result;
-}
+ m_symbolTable->setUsesNonStrictEval(codeBlock->usesEval() && !codeBlock->isStrictMode());
+ m_codeBlock->setNumParameters(1);
-RegisterID* BytecodeGenerator::registerFor(const Identifier& ident)
-{
- if (ident == propertyNames().thisIdentifier)
- return &m_thisRegister;
+ emitOpcode(op_enter);
- if (!shouldOptimizeLocals())
- return 0;
+ allocateAndEmitScope();
- SymbolTableEntry entry = symbolTable().get(ident.ustring().rep());
- if (entry.isNull())
- return 0;
+ const DeclarationStacks::FunctionStack& functionStack = evalNode->functionStack();
+ for (size_t i = 0; i < functionStack.size(); ++i)
+ m_codeBlock->addFunctionDecl(makeFunction(functionStack[i]));
- return ®isterFor(entry.getIndex());
+ const DeclarationStacks::VarStack& varStack = evalNode->varStack();
+ unsigned numVariables = varStack.size();
+ Vector<Identifier, 0, UnsafeVectorOverflow> variables;
+ variables.reserveCapacity(numVariables);
+ for (size_t i = 0; i < numVariables; ++i) {
+ ASSERT(varStack[i].first.impl()->isAtomic() || varStack[i].first.impl()->isSymbol());
+ variables.append(varStack[i].first);
+ }
+ codeBlock->adoptVariables(variables);
}
-RegisterID* BytecodeGenerator::constRegisterFor(const Identifier& ident)
+BytecodeGenerator::~BytecodeGenerator()
{
- if (m_codeType == EvalCode)
- return 0;
-
- SymbolTableEntry entry = symbolTable().get(ident.ustring().rep());
- ASSERT(!entry.isNull());
-
- return ®isterFor(entry.getIndex());
}
-bool BytecodeGenerator::isLocal(const Identifier& ident)
+RegisterID* BytecodeGenerator::initializeNextParameter()
{
- if (ident == propertyNames().thisIdentifier)
- return true;
-
- return shouldOptimizeLocals() && symbolTable().contains(ident.ustring().rep());
+ VirtualRegister reg = virtualRegisterForArgument(m_codeBlock->numParameters());
+ RegisterID& parameter = registerFor(reg);
+ parameter.setIndex(reg.offset());
+ m_codeBlock->addParameter();
+ return ¶meter;
}
-bool BytecodeGenerator::isLocalConstant(const Identifier& ident)
+UniquedStringImpl* BytecodeGenerator::visibleNameForParameter(DestructuringPatternNode* pattern)
{
- return symbolTable().get(ident.ustring().rep()).isReadOnly();
+ if (pattern->isBindingNode()) {
+ const Identifier& ident = static_cast<const BindingNode*>(pattern)->boundProperty();
+ if (!m_functions.contains(ident.impl()))
+ return ident.impl();
+ }
+ return nullptr;
}
RegisterID* BytecodeGenerator::newRegister()
{
- m_calleeRegisters.append(m_calleeRegisters.size());
- m_codeBlock->m_numCalleeRegisters = max<int>(m_codeBlock->m_numCalleeRegisters, m_calleeRegisters.size());
+ m_calleeRegisters.append(virtualRegisterForLocal(m_calleeRegisters.size()));
+ int numCalleeRegisters = max<int>(m_codeBlock->m_numCalleeRegisters, m_calleeRegisters.size());
+ numCalleeRegisters = WTF::roundUpToMultipleOf(stackAlignmentRegisters(), numCalleeRegisters);
+ m_codeBlock->m_numCalleeRegisters = numCalleeRegisters;
return &m_calleeRegisters.last();
}
return result;
}
-RegisterID* BytecodeGenerator::highestUsedRegister()
-{
- size_t count = m_codeBlock->m_numCalleeRegisters;
- while (m_calleeRegisters.size() < count)
- newRegister();
- return &m_calleeRegisters.last();
-}
-
-PassRefPtr<LabelScope> BytecodeGenerator::newLabelScope(LabelScope::Type type, const Identifier* name)
+LabelScopePtr BytecodeGenerator::newLabelScope(LabelScope::Type type, const Identifier* name)
{
// Reclaim free label scopes.
while (m_labelScopes.size() && !m_labelScopes.last().refCount())
m_labelScopes.removeLast();
// Allocate new label scope.
- LabelScope scope(type, name, scopeDepth(), newLabel(), type == LabelScope::Loop ? newLabel() : 0); // Only loops have continue targets.
+ LabelScope scope(type, name, scopeDepth(), newLabel(), type == LabelScope::Loop ? newLabel() : PassRefPtr<Label>()); // Only loops have continue targets.
m_labelScopes.append(scope);
- return &m_labelScopes.last();
+ return LabelScopePtr(m_labelScopes, m_labelScopes.size() - 1);
}
PassRefPtr<Label> BytecodeGenerator::newLabel()
m_labels.removeLast();
// Allocate new label ID.
- m_labels.append(m_codeBlock);
+ m_labels.append(*this);
return &m_labels.last();
}
void BytecodeGenerator::emitOpcode(OpcodeID opcodeID)
{
- instructions().append(globalData()->interpreter->getOpcode(opcodeID));
+#ifndef NDEBUG
+ size_t opcodePosition = instructions().size();
+ ASSERT(opcodePosition - m_lastOpcodePosition == opcodeLength(m_lastOpcodeID) || m_lastOpcodeID == op_end);
+ m_lastOpcodePosition = opcodePosition;
+#endif
+ instructions().append(opcodeID);
m_lastOpcodeID = opcodeID;
}
+UnlinkedArrayProfile BytecodeGenerator::newArrayProfile()
+{
+ return m_codeBlock->addArrayProfile();
+}
+
+UnlinkedArrayAllocationProfile BytecodeGenerator::newArrayAllocationProfile()
+{
+ return m_codeBlock->addArrayAllocationProfile();
+}
+
+UnlinkedObjectAllocationProfile BytecodeGenerator::newObjectAllocationProfile()
+{
+ return m_codeBlock->addObjectAllocationProfile();
+}
+
+UnlinkedValueProfile BytecodeGenerator::emitProfiledOpcode(OpcodeID opcodeID)
+{
+ UnlinkedValueProfile result = m_codeBlock->addValueProfile();
+ emitOpcode(opcodeID);
+ return result;
+}
+
+void BytecodeGenerator::emitLoopHint()
+{
+ emitOpcode(op_loop_hint);
+}
+
void BytecodeGenerator::retrieveLastBinaryOp(int& dstIndex, int& src1Index, int& src2Index)
{
ASSERT(instructions().size() >= 4);
{
ASSERT(instructions().size() >= 4);
instructions().shrink(instructions().size() - 4);
+ m_lastOpcodeID = op_end;
}
void ALWAYS_INLINE BytecodeGenerator::rewindUnaryOp()
{
ASSERT(instructions().size() >= 3);
instructions().shrink(instructions().size() - 3);
+ m_lastOpcodeID = op_end;
}
PassRefPtr<Label> BytecodeGenerator::emitJump(Label* target)
{
- emitOpcode(target->isForward() ? op_jmp : op_loop);
- instructions().append(target->offsetFrom(instructions().size()));
+ size_t begin = instructions().size();
+ emitOpcode(op_jmp);
+ instructions().append(target->bind(begin, instructions().size()));
return target;
}
PassRefPtr<Label> BytecodeGenerator::emitJumpIfTrue(RegisterID* cond, Label* target)
{
- if (m_lastOpcodeID == op_less && !target->isForward()) {
+ if (m_lastOpcodeID == op_less) {
+ int dstIndex;
+ int src1Index;
+ int src2Index;
+
+ retrieveLastBinaryOp(dstIndex, src1Index, src2Index);
+
+ if (cond->index() == dstIndex && cond->isTemporary() && !cond->refCount()) {
+ rewindBinaryOp();
+
+ size_t begin = instructions().size();
+ emitOpcode(op_jless);
+ instructions().append(src1Index);
+ instructions().append(src2Index);
+ instructions().append(target->bind(begin, instructions().size()));
+ return target;
+ }
+ } else if (m_lastOpcodeID == op_lesseq) {
+ int dstIndex;
+ int src1Index;
+ int src2Index;
+
+ retrieveLastBinaryOp(dstIndex, src1Index, src2Index);
+
+ if (cond->index() == dstIndex && cond->isTemporary() && !cond->refCount()) {
+ rewindBinaryOp();
+
+ size_t begin = instructions().size();
+ emitOpcode(op_jlesseq);
+ instructions().append(src1Index);
+ instructions().append(src2Index);
+ instructions().append(target->bind(begin, instructions().size()));
+ return target;
+ }
+ } else if (m_lastOpcodeID == op_greater) {
int dstIndex;
int src1Index;
int src2Index;
if (cond->index() == dstIndex && cond->isTemporary() && !cond->refCount()) {
rewindBinaryOp();
- emitOpcode(op_loop_if_less);
+
+ size_t begin = instructions().size();
+ emitOpcode(op_jgreater);
instructions().append(src1Index);
instructions().append(src2Index);
- instructions().append(target->offsetFrom(instructions().size()));
+ instructions().append(target->bind(begin, instructions().size()));
return target;
}
- } else if (m_lastOpcodeID == op_lesseq && !target->isForward()) {
+ } else if (m_lastOpcodeID == op_greatereq) {
int dstIndex;
int src1Index;
int src2Index;
if (cond->index() == dstIndex && cond->isTemporary() && !cond->refCount()) {
rewindBinaryOp();
- emitOpcode(op_loop_if_lesseq);
+
+ size_t begin = instructions().size();
+ emitOpcode(op_jgreatereq);
instructions().append(src1Index);
instructions().append(src2Index);
- instructions().append(target->offsetFrom(instructions().size()));
+ instructions().append(target->bind(begin, instructions().size()));
return target;
}
} else if (m_lastOpcodeID == op_eq_null && target->isForward()) {
if (cond->index() == dstIndex && cond->isTemporary() && !cond->refCount()) {
rewindUnaryOp();
+
+ size_t begin = instructions().size();
emitOpcode(op_jeq_null);
instructions().append(srcIndex);
- instructions().append(target->offsetFrom(instructions().size()));
+ instructions().append(target->bind(begin, instructions().size()));
return target;
}
} else if (m_lastOpcodeID == op_neq_null && target->isForward()) {
if (cond->index() == dstIndex && cond->isTemporary() && !cond->refCount()) {
rewindUnaryOp();
+
+ size_t begin = instructions().size();
emitOpcode(op_jneq_null);
instructions().append(srcIndex);
- instructions().append(target->offsetFrom(instructions().size()));
+ instructions().append(target->bind(begin, instructions().size()));
return target;
}
}
- emitOpcode(target->isForward() ? op_jtrue : op_loop_if_true);
+ size_t begin = instructions().size();
+
+ emitOpcode(op_jtrue);
instructions().append(cond->index());
- instructions().append(target->offsetFrom(instructions().size()));
+ instructions().append(target->bind(begin, instructions().size()));
return target;
}
PassRefPtr<Label> BytecodeGenerator::emitJumpIfFalse(RegisterID* cond, Label* target)
{
- ASSERT(target->isForward());
-
- if (m_lastOpcodeID == op_less) {
+ if (m_lastOpcodeID == op_less && target->isForward()) {
int dstIndex;
int src1Index;
int src2Index;
if (cond->index() == dstIndex && cond->isTemporary() && !cond->refCount()) {
rewindBinaryOp();
+
+ size_t begin = instructions().size();
emitOpcode(op_jnless);
instructions().append(src1Index);
instructions().append(src2Index);
- instructions().append(target->offsetFrom(instructions().size()));
+ instructions().append(target->bind(begin, instructions().size()));
return target;
}
- } else if (m_lastOpcodeID == op_not) {
+ } else if (m_lastOpcodeID == op_lesseq && target->isForward()) {
int dstIndex;
- int srcIndex;
+ int src1Index;
+ int src2Index;
- retrieveLastUnaryOp(dstIndex, srcIndex);
+ retrieveLastBinaryOp(dstIndex, src1Index, src2Index);
if (cond->index() == dstIndex && cond->isTemporary() && !cond->refCount()) {
- rewindUnaryOp();
- emitOpcode(op_jtrue);
- instructions().append(srcIndex);
- instructions().append(target->offsetFrom(instructions().size()));
+ rewindBinaryOp();
+
+ size_t begin = instructions().size();
+ emitOpcode(op_jnlesseq);
+ instructions().append(src1Index);
+ instructions().append(src2Index);
+ instructions().append(target->bind(begin, instructions().size()));
return target;
}
- } else if (m_lastOpcodeID == op_eq_null) {
+ } else if (m_lastOpcodeID == op_greater && target->isForward()) {
int dstIndex;
- int srcIndex;
+ int src1Index;
+ int src2Index;
- retrieveLastUnaryOp(dstIndex, srcIndex);
+ retrieveLastBinaryOp(dstIndex, src1Index, src2Index);
if (cond->index() == dstIndex && cond->isTemporary() && !cond->refCount()) {
- rewindUnaryOp();
- emitOpcode(op_jneq_null);
- instructions().append(srcIndex);
- instructions().append(target->offsetFrom(instructions().size()));
+ rewindBinaryOp();
+
+ size_t begin = instructions().size();
+ emitOpcode(op_jngreater);
+ instructions().append(src1Index);
+ instructions().append(src2Index);
+ instructions().append(target->bind(begin, instructions().size()));
+ return target;
+ }
+ } else if (m_lastOpcodeID == op_greatereq && target->isForward()) {
+ int dstIndex;
+ int src1Index;
+ int src2Index;
+
+ retrieveLastBinaryOp(dstIndex, src1Index, src2Index);
+
+ if (cond->index() == dstIndex && cond->isTemporary() && !cond->refCount()) {
+ rewindBinaryOp();
+
+ size_t begin = instructions().size();
+ emitOpcode(op_jngreatereq);
+ instructions().append(src1Index);
+ instructions().append(src2Index);
+ instructions().append(target->bind(begin, instructions().size()));
return target;
}
- } else if (m_lastOpcodeID == op_neq_null) {
+ } else if (m_lastOpcodeID == op_not) {
int dstIndex;
int srcIndex;
if (cond->index() == dstIndex && cond->isTemporary() && !cond->refCount()) {
rewindUnaryOp();
+
+ size_t begin = instructions().size();
+ emitOpcode(op_jtrue);
+ instructions().append(srcIndex);
+ instructions().append(target->bind(begin, instructions().size()));
+ return target;
+ }
+ } else if (m_lastOpcodeID == op_eq_null && target->isForward()) {
+ int dstIndex;
+ int srcIndex;
+
+ retrieveLastUnaryOp(dstIndex, srcIndex);
+
+ if (cond->index() == dstIndex && cond->isTemporary() && !cond->refCount()) {
+ rewindUnaryOp();
+
+ size_t begin = instructions().size();
+ emitOpcode(op_jneq_null);
+ instructions().append(srcIndex);
+ instructions().append(target->bind(begin, instructions().size()));
+ return target;
+ }
+ } else if (m_lastOpcodeID == op_neq_null && target->isForward()) {
+ int dstIndex;
+ int srcIndex;
+
+ retrieveLastUnaryOp(dstIndex, srcIndex);
+
+ if (cond->index() == dstIndex && cond->isTemporary() && !cond->refCount()) {
+ rewindUnaryOp();
+
+ size_t begin = instructions().size();
emitOpcode(op_jeq_null);
instructions().append(srcIndex);
- instructions().append(target->offsetFrom(instructions().size()));
+ instructions().append(target->bind(begin, instructions().size()));
return target;
}
}
+ size_t begin = instructions().size();
emitOpcode(op_jfalse);
instructions().append(cond->index());
- instructions().append(target->offsetFrom(instructions().size()));
+ instructions().append(target->bind(begin, instructions().size()));
return target;
}
-unsigned BytecodeGenerator::addConstant(FuncDeclNode* n)
+PassRefPtr<Label> BytecodeGenerator::emitJumpIfNotFunctionCall(RegisterID* cond, Label* target)
{
- // No need to explicitly unique function body nodes -- they're unique already.
- return m_codeBlock->addFunction(n);
+ size_t begin = instructions().size();
+
+ emitOpcode(op_jneq_ptr);
+ instructions().append(cond->index());
+ instructions().append(Special::CallFunction);
+ instructions().append(target->bind(begin, instructions().size()));
+ return target;
}
-unsigned BytecodeGenerator::addConstant(FuncExprNode* n)
+PassRefPtr<Label> BytecodeGenerator::emitJumpIfNotFunctionApply(RegisterID* cond, Label* target)
{
- // No need to explicitly unique function expression nodes -- they're unique already.
- return m_codeBlock->addFunctionExpression(n);
+ size_t begin = instructions().size();
+
+ emitOpcode(op_jneq_ptr);
+ instructions().append(cond->index());
+ instructions().append(Special::ApplyFunction);
+ instructions().append(target->bind(begin, instructions().size()));
+ return target;
+}
+
+bool BytecodeGenerator::hasConstant(const Identifier& ident) const
+{
+ UniquedStringImpl* rep = ident.impl();
+ return m_identifierMap.contains(rep);
}
unsigned BytecodeGenerator::addConstant(const Identifier& ident)
{
- UString::Rep* rep = ident.ustring().rep();
- pair<IdentifierMap::iterator, bool> result = m_identifierMap.add(rep, m_codeBlock->numberOfIdentifiers());
- if (result.second) // new entry
- m_codeBlock->addIdentifier(Identifier(m_globalData, rep));
+ UniquedStringImpl* rep = ident.impl();
+ IdentifierMap::AddResult result = m_identifierMap.add(rep, m_codeBlock->numberOfIdentifiers());
+ if (result.isNewEntry)
+ m_codeBlock->addIdentifier(ident);
- return result.first->second;
+ return result.iterator->value;
}
-RegisterID* BytecodeGenerator::addConstant(JSValuePtr v)
+// We can't hash JSValue(), so we use a dedicated data member to cache it.
+RegisterID* BytecodeGenerator::addConstantEmptyValue()
{
- pair<JSValueMap::iterator, bool> result = m_jsValueMap.add(JSValuePtr::encode(v), m_nextConstantIndex);
- if (result.second) {
- RegisterID& constant = m_calleeRegisters[m_nextConstantIndex];
-
- ++m_nextConstantIndex;
-
- m_codeBlock->addConstantRegister(JSValuePtr(v));
- return &constant;
+ if (!m_emptyValueRegister) {
+ int index = m_nextConstantOffset;
+ m_constantPoolRegisters.append(FirstConstantRegisterIndex + m_nextConstantOffset);
+ ++m_nextConstantOffset;
+ m_codeBlock->addConstant(JSValue());
+ m_emptyValueRegister = &m_constantPoolRegisters[index];
}
- return ®isterFor(result.first->second);
+ return m_emptyValueRegister;
}
-unsigned BytecodeGenerator::addUnexpectedConstant(JSValuePtr v)
+RegisterID* BytecodeGenerator::addConstantValue(JSValue v, SourceCodeRepresentation sourceCodeRepresentation)
{
- return m_codeBlock->addUnexpectedConstant(v);
+ if (!v)
+ return addConstantEmptyValue();
+
+ int index = m_nextConstantOffset;
+
+ EncodedJSValueWithRepresentation valueMapKey { JSValue::encode(v), sourceCodeRepresentation };
+ JSValueMap::AddResult result = m_jsValueMap.add(valueMapKey, m_nextConstantOffset);
+ if (result.isNewEntry) {
+ m_constantPoolRegisters.append(FirstConstantRegisterIndex + m_nextConstantOffset);
+ ++m_nextConstantOffset;
+ m_codeBlock->addConstant(v, sourceCodeRepresentation);
+ } else
+ index = result.iterator->value;
+ return &m_constantPoolRegisters[index];
+}
+
+RegisterID* BytecodeGenerator::emitMoveLinkTimeConstant(RegisterID* dst, LinkTimeConstant type)
+{
+ unsigned constantIndex = static_cast<unsigned>(type);
+ if (!m_linkTimeConstantRegisters[constantIndex]) {
+ int index = m_nextConstantOffset;
+ m_constantPoolRegisters.append(FirstConstantRegisterIndex + m_nextConstantOffset);
+ ++m_nextConstantOffset;
+ m_codeBlock->addConstant(type);
+ m_linkTimeConstantRegisters[constantIndex] = &m_constantPoolRegisters[index];
+ }
+
+ emitOpcode(op_mov);
+ instructions().append(dst->index());
+ instructions().append(m_linkTimeConstantRegisters[constantIndex]->index());
+
+ return dst;
}
unsigned BytecodeGenerator::addRegExp(RegExp* r)
return m_codeBlock->addRegExp(r);
}
+RegisterID* BytecodeGenerator::emitMoveEmptyValue(RegisterID* dst)
+{
+ RefPtr<RegisterID> emptyValue = addConstantEmptyValue();
+
+ emitOpcode(op_mov);
+ instructions().append(dst->index());
+ instructions().append(emptyValue->index());
+ return dst;
+}
+
RegisterID* BytecodeGenerator::emitMove(RegisterID* dst, RegisterID* src)
{
+ ASSERT(src != m_emptyValueRegister);
+
+ m_staticPropertyAnalyzer.mov(dst->index(), src->index());
emitOpcode(op_mov);
instructions().append(dst->index());
instructions().append(src->index());
+
+ if (!dst->isTemporary() && vm()->typeProfiler())
+ emitProfileType(dst, ProfileTypeBytecodeHasGlobalID, nullptr);
+
return dst;
}
return dst;
}
-RegisterID* BytecodeGenerator::emitPreInc(RegisterID* srcDst)
+RegisterID* BytecodeGenerator::emitInc(RegisterID* srcDst)
{
- emitOpcode(op_pre_inc);
+ emitOpcode(op_inc);
instructions().append(srcDst->index());
return srcDst;
}
-RegisterID* BytecodeGenerator::emitPreDec(RegisterID* srcDst)
+RegisterID* BytecodeGenerator::emitDec(RegisterID* srcDst)
{
- emitOpcode(op_pre_dec);
+ emitOpcode(op_dec);
instructions().append(srcDst->index());
return srcDst;
}
-RegisterID* BytecodeGenerator::emitPostInc(RegisterID* dst, RegisterID* srcDst)
-{
- emitOpcode(op_post_inc);
- instructions().append(dst->index());
- instructions().append(srcDst->index());
- return dst;
-}
-
-RegisterID* BytecodeGenerator::emitPostDec(RegisterID* dst, RegisterID* srcDst)
-{
- emitOpcode(op_post_dec);
- instructions().append(dst->index());
- instructions().append(srcDst->index());
- return dst;
-}
-
RegisterID* BytecodeGenerator::emitBinaryOp(OpcodeID opcodeID, RegisterID* dst, RegisterID* src1, RegisterID* src2, OperandTypes types)
{
emitOpcode(opcodeID);
instructions().append(src2->index());
if (opcodeID == op_bitor || opcodeID == op_bitand || opcodeID == op_bitxor ||
- opcodeID == op_add || opcodeID == op_mul || opcodeID == op_sub) {
+ opcodeID == op_add || opcodeID == op_mul || opcodeID == op_sub || opcodeID == op_div)
instructions().append(types.toInt());
- }
return dst;
}
if (src1->index() == dstIndex
&& src1->isTemporary()
&& m_codeBlock->isConstantRegisterIndex(src2->index())
- && m_codeBlock->constantRegister(src2->index() - m_codeBlock->m_numVars).jsValue(m_scopeChain->globalObject()->globalExec()).isString()) {
- const UString& value = asString(m_codeBlock->constantRegister(src2->index() - m_codeBlock->m_numVars).jsValue(m_scopeChain->globalObject()->globalExec()))->value();
+ && m_codeBlock->constantRegister(src2->index()).get().isString()) {
+ const String& value = asString(m_codeBlock->constantRegister(src2->index()).get())->tryGetValue();
if (value == "undefined") {
rewindUnaryOp();
emitOpcode(op_is_undefined);
}
if (value == "object") {
rewindUnaryOp();
- emitOpcode(op_is_object);
+ emitOpcode(op_is_object_or_null);
instructions().append(dst->index());
instructions().append(srcIndex);
return dst;
return dst;
}
-RegisterID* BytecodeGenerator::emitLoad(RegisterID* dst, bool b)
+void BytecodeGenerator::emitTypeProfilerExpressionInfo(const JSTextPosition& startDivot, const JSTextPosition& endDivot)
{
- return emitLoad(dst, jsBoolean(b));
+ unsigned start = startDivot.offset; // Ranges are inclusive of their endpoints, AND 0 indexed.
+ unsigned end = endDivot.offset - 1; // End Ranges already go one past the inclusive range, so subtract 1.
+ unsigned instructionOffset = instructions().size() - 1;
+ m_codeBlock->addTypeProfilerExpressionInfo(instructionOffset, start, end);
}
-RegisterID* BytecodeGenerator::emitLoad(RegisterID* dst, double number)
+void BytecodeGenerator::emitProfileType(RegisterID* registerToProfile, ProfileTypeBytecodeFlag flag, const Identifier* identifier)
{
- // FIXME: Our hash tables won't hold infinity, so we make a new JSNumberCell each time.
- // Later we can do the extra work to handle that like the other cases.
- if (number == HashTraits<double>::emptyValue() || HashTraits<double>::isDeletedValue(number))
- return emitLoad(dst, jsNumber(globalData(), number));
- JSValuePtr& valueInMap = m_numberMap.add(number, noValue()).first->second;
- if (!valueInMap)
- valueInMap = jsNumber(globalData(), number);
- return emitLoad(dst, valueInMap);
+ if (flag == ProfileTypeBytecodeGetFromScope || flag == ProfileTypeBytecodePutToScope)
+ RELEASE_ASSERT(identifier);
+
+ // The format of this instruction is: op_profile_type regToProfile, TypeLocation*, flag, identifier?, resolveType?
+ emitOpcode(op_profile_type);
+ instructions().append(registerToProfile->index());
+ instructions().append(0);
+ instructions().append(flag);
+ instructions().append(identifier ? addConstant(*identifier) : 0);
+ instructions().append(resolveType());
+}
+
+void BytecodeGenerator::emitProfileControlFlow(int textOffset)
+{
+ if (vm()->controlFlowProfiler()) {
+ RELEASE_ASSERT(textOffset >= 0);
+ size_t bytecodeOffset = instructions().size();
+ m_codeBlock->addOpProfileControlFlowBytecodeOffset(bytecodeOffset);
+
+ emitOpcode(op_profile_control_flow);
+ instructions().append(textOffset);
+ }
+}
+
+RegisterID* BytecodeGenerator::emitLoad(RegisterID* dst, bool b)
+{
+ return emitLoad(dst, jsBoolean(b));
}
RegisterID* BytecodeGenerator::emitLoad(RegisterID* dst, const Identifier& identifier)
{
- JSString*& stringInMap = m_stringMap.add(identifier.ustring().rep(), 0).first->second;
+ JSString*& stringInMap = m_stringMap.add(identifier.impl(), nullptr).iterator->value;
if (!stringInMap)
- stringInMap = jsOwnedString(globalData(), identifier.ustring());
- return emitLoad(dst, JSValuePtr(stringInMap));
+ stringInMap = jsOwnedString(vm(), identifier.string());
+ return emitLoad(dst, JSValue(stringInMap));
}
-RegisterID* BytecodeGenerator::emitLoad(RegisterID* dst, JSValuePtr v)
+RegisterID* BytecodeGenerator::emitLoad(RegisterID* dst, JSValue v, SourceCodeRepresentation sourceCodeRepresentation)
{
- RegisterID* constantID = addConstant(v);
+ RegisterID* constantID = addConstantValue(v, sourceCodeRepresentation);
if (dst)
return emitMove(dst, constantID);
return constantID;
}
-RegisterID* BytecodeGenerator::emitUnexpectedLoad(RegisterID* dst, bool b)
+RegisterID* BytecodeGenerator::emitLoadGlobalObject(RegisterID* dst)
{
- emitOpcode(op_unexpected_load);
- instructions().append(dst->index());
- instructions().append(addUnexpectedConstant(jsBoolean(b)));
- return dst;
+ if (!m_globalObjectRegister) {
+ int index = m_nextConstantOffset;
+ m_constantPoolRegisters.append(FirstConstantRegisterIndex + m_nextConstantOffset);
+ ++m_nextConstantOffset;
+ m_codeBlock->addConstant(JSValue());
+ m_globalObjectRegister = &m_constantPoolRegisters[index];
+ m_codeBlock->setGlobalObjectRegister(VirtualRegister(index));
+ }
+ if (dst)
+ emitMove(dst, m_globalObjectRegister);
+ return m_globalObjectRegister;
}
-RegisterID* BytecodeGenerator::emitUnexpectedLoad(RegisterID* dst, double d)
+Variable BytecodeGenerator::variable(const Identifier& property)
{
- emitOpcode(op_unexpected_load);
- instructions().append(dst->index());
- instructions().append(addUnexpectedConstant(jsNumber(globalData(), d)));
- return dst;
+ if (property == propertyNames().thisIdentifier) {
+ return Variable(
+ property, VarOffset(thisRegister()->virtualRegister()), thisRegister(),
+ ReadOnly, Variable::SpecialVariable);
+ }
+
+ if (!shouldOptimizeLocals())
+ return Variable(property);
+
+ SymbolTableEntry entry = symbolTable().get(property.impl());
+ if (entry.isNull())
+ return Variable(property);
+
+ if (entry.varOffset().isScope() && m_localScopeDepth) {
+ // FIXME: We should be able to statically resolve through our local scopes.
+ // https://bugs.webkit.org/show_bug.cgi?id=141885
+ return Variable(property);
+ }
+
+ return variableForLocalEntry(property, entry);
}
-bool BytecodeGenerator::findScopedProperty(const Identifier& property, int& index, size_t& stackDepth, bool forWriting, JSObject*& globalObject)
+Variable BytecodeGenerator::variablePerSymbolTable(const Identifier& property)
{
- // Cases where we cannot statically optimize the lookup.
- if (property == propertyNames().arguments || !canOptimizeNonLocals()) {
- stackDepth = 0;
- index = missingSymbolMarker();
+ SymbolTableEntry entry = symbolTable().get(property.impl());
+ if (entry.isNull())
+ return Variable(property);
+
+ return variableForLocalEntry(property, entry);
+}
- if (shouldOptimizeLocals() && m_codeType == GlobalCode) {
- ScopeChainIterator iter = m_scopeChain->begin();
- globalObject = *iter;
- ASSERT((++iter) == m_scopeChain->end());
- }
- return false;
- }
+Variable BytecodeGenerator::variableForLocalEntry(
+ const Identifier& property, const SymbolTableEntry& entry)
+{
+ VarOffset offset = entry.varOffset();
+
+ RegisterID* local;
+ if (offset.isStack())
+ local = ®isterFor(offset.stackOffset());
+ else
+ local = nullptr;
+
+ return Variable(property, offset, local, entry.getAttributes(), Variable::NormalVariable);
+}
- size_t depth = 0;
+void BytecodeGenerator::createVariable(
+ const Identifier& property, VarKind varKind, ConstantMode constantMode,
+ ExistingVariableMode existingVariableMode)
+{
+ ASSERT(property != propertyNames().thisIdentifier);
- ScopeChainIterator iter = m_scopeChain->begin();
- ScopeChainIterator end = m_scopeChain->end();
- for (; iter != end; ++iter, ++depth) {
- JSObject* currentScope = *iter;
- if (!currentScope->isVariableObject())
- break;
- JSVariableObject* currentVariableObject = static_cast<JSVariableObject*>(currentScope);
- SymbolTableEntry entry = currentVariableObject->symbolTable().get(property.ustring().rep());
-
- // Found the property
- if (!entry.isNull()) {
- if (entry.isReadOnly() && forWriting) {
- stackDepth = 0;
- index = missingSymbolMarker();
- if (++iter == end)
- globalObject = currentVariableObject;
- return false;
- }
- stackDepth = depth;
- index = entry.getIndex();
- if (++iter == end)
- globalObject = currentVariableObject;
- return true;
+ ConcurrentJITLocker locker(symbolTable().m_lock);
+ SymbolTableEntry entry = symbolTable().get(locker, property.impl());
+
+ if (!entry.isNull()) {
+ if (existingVariableMode == IgnoreExisting)
+ return;
+
+ // Do some checks to ensure that the variable we're being asked to create is sufficiently
+ // compatible with the one we have already created.
+
+ VarOffset offset = entry.varOffset();
+
+ // We can't change our minds about whether it's captured.
+ if (offset.kind() != varKind || constantMode != entry.constantMode()) {
+ dataLog(
+ "Trying to add variable called ", property, " as ", varKind, "/", constantMode,
+ " but it was already added as ", offset, "/", entry.constantMode(), ".\n");
+ RELEASE_ASSERT_NOT_REACHED();
}
- if (currentVariableObject->isDynamicScope())
- break;
- }
- // Can't locate the property but we're able to avoid a few lookups.
- stackDepth = depth;
- index = missingSymbolMarker();
- JSObject* scope = *iter;
- if (++iter == end)
- globalObject = scope;
- return true;
+ return;
+ }
+
+ VarOffset varOffset;
+ if (varKind == VarKind::Scope)
+ varOffset = VarOffset(symbolTable().takeNextScopeOffset(locker));
+ else {
+ ASSERT(varKind == VarKind::Stack);
+ varOffset = VarOffset(virtualRegisterForLocal(m_calleeRegisters.size()));
+ }
+ SymbolTableEntry newEntry(varOffset, constantMode == IsConstant ? ReadOnly : 0);
+ symbolTable().add(locker, property.impl(), newEntry);
+
+ if (varKind == VarKind::Stack) {
+ RegisterID* local = addVar();
+ RELEASE_ASSERT(local->index() == varOffset.stackOffset().offset());
+ }
}
-RegisterID* BytecodeGenerator::emitInstanceOf(RegisterID* dst, RegisterID* value, RegisterID* base, RegisterID* basePrototype)
-{
- emitOpcode(op_instanceof);
+void BytecodeGenerator::emitCheckHasInstance(RegisterID* dst, RegisterID* value, RegisterID* base, Label* target)
+{
+ size_t begin = instructions().size();
+ emitOpcode(op_check_has_instance);
instructions().append(dst->index());
instructions().append(value->index());
instructions().append(base->index());
- instructions().append(basePrototype->index());
- return dst;
+ instructions().append(target->bind(begin, instructions().size()));
}
-RegisterID* BytecodeGenerator::emitResolve(RegisterID* dst, const Identifier& property)
+// Indicates the least upper bound of resolve type based on local scope. The bytecode linker
+// will start with this ResolveType and compute the least upper bound including intercepting scopes.
+ResolveType BytecodeGenerator::resolveType()
{
- size_t depth = 0;
- int index = 0;
- JSObject* globalObject = 0;
- if (!findScopedProperty(property, index, depth, false, globalObject) && !globalObject) {
- // We can't optimise at all :-(
- emitOpcode(op_resolve);
- instructions().append(dst->index());
- instructions().append(addConstant(property));
- return dst;
- }
-
- if (globalObject) {
- bool forceGlobalResolve = false;
- if (m_regeneratingForExceptionInfo) {
-#if ENABLE(JIT)
- forceGlobalResolve = m_codeBlockBeingRegeneratedFrom->hasGlobalResolveInfoAtBytecodeOffset(instructions().size());
-#else
- forceGlobalResolve = m_codeBlockBeingRegeneratedFrom->hasGlobalResolveInstructionAtBytecodeOffset(instructions().size());
-#endif
- }
-
- if (index != missingSymbolMarker() && !forceGlobalResolve) {
- // Directly index the property lookup across multiple scopes.
- return emitGetScopedVar(dst, depth, index, globalObject);
- }
+ if (m_localScopeDepth)
+ return Dynamic;
+ if (m_symbolTable && m_symbolTable->usesNonStrictEval())
+ return GlobalPropertyWithVarInjectionChecks;
+ return GlobalProperty;
+}
-#if ENABLE(JIT)
- m_codeBlock->addGlobalResolveInfo(instructions().size());
-#else
- m_codeBlock->addGlobalResolveInstruction(instructions().size());
-#endif
- emitOpcode(op_resolve_global);
- instructions().append(dst->index());
- instructions().append(globalObject);
- instructions().append(addConstant(property));
+RegisterID* BytecodeGenerator::emitResolveScope(RegisterID* dst, const Variable& variable)
+{
+ switch (variable.offset().kind()) {
+ case VarKind::Stack:
+ return nullptr;
+
+ case VarKind::DirectArgument:
+ return argumentsRegister();
+
+ case VarKind::Scope:
+ // This always refers to the activation that *we* allocated, and not the current scope that code
+ // lives in. Note that this will change once we have proper support for block scoping. Once that
+ // changes, it will be correct for this code to return scopeRegister(). The only reason why we
+ // don't do that already is that m_lexicalEnvironment is required by ConstDeclNode. ConstDeclNode
+ // requires weird things because it is a shameful pile of nonsense, but block scoping would make
+ // that code sensible and obviate the need for us to do bad things.
+ return m_lexicalEnvironmentRegister;
+
+ case VarKind::Invalid:
+ // Indicates non-local resolution.
+
+ ASSERT(!m_symbolTable || !m_symbolTable->contains(variable.ident().impl()) || resolveType() == Dynamic);
+
+ m_codeBlock->addPropertyAccessInstruction(instructions().size());
+
+ // resolve_scope dst, id, ResolveType, depth
+ emitOpcode(op_resolve_scope);
+ dst = tempDestination(dst);
+ instructions().append(kill(dst));
+ instructions().append(scopeRegister()->index());
+ instructions().append(addConstant(variable.ident()));
+ instructions().append(resolveType());
instructions().append(0);
instructions().append(0);
return dst;
}
-
- if (index != missingSymbolMarker()) {
- // Directly index the property lookup across multiple scopes.
- return emitGetScopedVar(dst, depth, index, globalObject);
- }
-
- // In this case we are at least able to drop a few scope chains from the
- // lookup chain, although we still need to hash from then on.
- emitOpcode(op_resolve_skip);
- instructions().append(dst->index());
- instructions().append(addConstant(property));
- instructions().append(depth);
- return dst;
+
+ RELEASE_ASSERT_NOT_REACHED();
+ return nullptr;
}
-RegisterID* BytecodeGenerator::emitGetScopedVar(RegisterID* dst, size_t depth, int index, JSValuePtr globalObject)
+RegisterID* BytecodeGenerator::emitGetFromScope(RegisterID* dst, RegisterID* scope, const Variable& variable, ResolveMode resolveMode)
{
- if (globalObject) {
- emitOpcode(op_get_global_var);
- instructions().append(dst->index());
- instructions().append(asCell(globalObject));
- instructions().append(index);
+ switch (variable.offset().kind()) {
+ case VarKind::Stack:
+ return emitMove(dst, variable.local());
+
+ case VarKind::DirectArgument: {
+ UnlinkedValueProfile profile = emitProfiledOpcode(op_get_from_arguments);
+ instructions().append(kill(dst));
+ instructions().append(scope->index());
+ instructions().append(variable.offset().capturedArgumentsOffset().offset());
+ instructions().append(profile);
return dst;
}
-
- emitOpcode(op_get_scoped_var);
- instructions().append(dst->index());
- instructions().append(index);
- instructions().append(depth);
- return dst;
+
+ case VarKind::Scope:
+ case VarKind::Invalid: {
+ m_codeBlock->addPropertyAccessInstruction(instructions().size());
+
+ // get_from_scope dst, scope, id, ResolveModeAndType, Structure, Operand
+ UnlinkedValueProfile profile = emitProfiledOpcode(op_get_from_scope);
+ instructions().append(kill(dst));
+ instructions().append(scope->index());
+ instructions().append(addConstant(variable.ident()));
+ instructions().append(ResolveModeAndType(resolveMode, variable.offset().isScope() ? LocalClosureVar : resolveType()).operand());
+ instructions().append(0);
+ instructions().append(variable.offset().isScope() ? variable.offset().scopeOffset().offset() : 0);
+ instructions().append(profile);
+ return dst;
+ } }
+
+ RELEASE_ASSERT_NOT_REACHED();
}
-RegisterID* BytecodeGenerator::emitPutScopedVar(size_t depth, int index, RegisterID* value, JSValuePtr globalObject)
+RegisterID* BytecodeGenerator::emitPutToScope(RegisterID* scope, const Variable& variable, RegisterID* value, ResolveMode resolveMode)
{
- if (globalObject) {
- emitOpcode(op_put_global_var);
- instructions().append(asCell(globalObject));
- instructions().append(index);
+ switch (variable.offset().kind()) {
+ case VarKind::Stack:
+ emitMove(variable.local(), value);
+ return value;
+
+ case VarKind::DirectArgument:
+ emitOpcode(op_put_to_arguments);
+ instructions().append(scope->index());
+ instructions().append(variable.offset().capturedArgumentsOffset().offset());
instructions().append(value->index());
return value;
- }
- emitOpcode(op_put_scoped_var);
- instructions().append(index);
- instructions().append(depth);
- instructions().append(value->index());
- return value;
+
+ case VarKind::Scope:
+ case VarKind::Invalid: {
+ m_codeBlock->addPropertyAccessInstruction(instructions().size());
+
+ // put_to_scope scope, id, value, ResolveModeAndType, Structure, Operand
+ emitOpcode(op_put_to_scope);
+ instructions().append(scope->index());
+ instructions().append(addConstant(variable.ident()));
+ instructions().append(value->index());
+ ScopeOffset offset;
+ if (variable.offset().isScope()) {
+ offset = variable.offset().scopeOffset();
+ instructions().append(ResolveModeAndType(resolveMode, LocalClosureVar).operand());
+ } else {
+ ASSERT(resolveType() != LocalClosureVar);
+ instructions().append(ResolveModeAndType(resolveMode, resolveType()).operand());
+ }
+ instructions().append(0);
+ instructions().append(!!offset ? offset.offset() : 0);
+ return value;
+ } }
+
+ RELEASE_ASSERT_NOT_REACHED();
}
-RegisterID* BytecodeGenerator::emitResolveBase(RegisterID* dst, const Identifier& property)
+RegisterID* BytecodeGenerator::initializeVariable(const Variable& variable, RegisterID* value)
{
- emitOpcode(op_resolve_base);
- instructions().append(dst->index());
- instructions().append(addConstant(property));
- return dst;
+ RegisterID* scope;
+ switch (variable.offset().kind()) {
+ case VarKind::Stack:
+ scope = nullptr;
+ break;
+
+ case VarKind::DirectArgument:
+ scope = argumentsRegister();
+ break;
+
+ case VarKind::Scope:
+ scope = scopeRegister();
+ break;
+
+ default:
+ scope = nullptr;
+ RELEASE_ASSERT_NOT_REACHED();
+ break;
+ }
+
+ return emitPutToScope(scope, variable, value, ThrowIfNotFound);
}
-RegisterID* BytecodeGenerator::emitResolveWithBase(RegisterID* baseDst, RegisterID* propDst, const Identifier& property)
+RegisterID* BytecodeGenerator::emitInstanceOf(RegisterID* dst, RegisterID* value, RegisterID* basePrototype)
{
- emitOpcode(op_resolve_with_base);
- instructions().append(baseDst->index());
- instructions().append(propDst->index());
- instructions().append(addConstant(property));
- return baseDst;
+ emitOpcode(op_instanceof);
+ instructions().append(dst->index());
+ instructions().append(value->index());
+ instructions().append(basePrototype->index());
+ return dst;
}
-RegisterID* BytecodeGenerator::emitResolveFunction(RegisterID* baseDst, RegisterID* funcDst, const Identifier& property)
+RegisterID* BytecodeGenerator::emitInitGlobalConst(const Identifier& identifier, RegisterID* value)
{
- emitOpcode(op_resolve_func);
- instructions().append(baseDst->index());
- instructions().append(funcDst->index());
- instructions().append(addConstant(property));
- return baseDst;
+ ASSERT(m_codeType == GlobalCode);
+ emitOpcode(op_init_global_const_nop);
+ instructions().append(0);
+ instructions().append(value->index());
+ instructions().append(0);
+ instructions().append(addConstant(identifier));
+ return value;
}
RegisterID* BytecodeGenerator::emitGetById(RegisterID* dst, RegisterID* base, const Identifier& property)
{
-#if ENABLE(JIT)
- m_codeBlock->addStructureStubInfo(StructureStubInfo(op_get_by_id));
-#else
m_codeBlock->addPropertyAccessInstruction(instructions().size());
-#endif
- emitOpcode(op_get_by_id);
- instructions().append(dst->index());
+ UnlinkedValueProfile profile = emitProfiledOpcode(op_get_by_id);
+ instructions().append(kill(dst));
instructions().append(base->index());
instructions().append(addConstant(property));
instructions().append(0);
instructions().append(0);
instructions().append(0);
instructions().append(0);
+ instructions().append(profile);
return dst;
}
RegisterID* BytecodeGenerator::emitPutById(RegisterID* base, const Identifier& property, RegisterID* value)
{
-#if ENABLE(JIT)
- m_codeBlock->addStructureStubInfo(StructureStubInfo(op_put_by_id));
-#else
+ unsigned propertyIndex = addConstant(property);
+
+ m_staticPropertyAnalyzer.putById(base->index(), propertyIndex);
+
m_codeBlock->addPropertyAccessInstruction(instructions().size());
-#endif
emitOpcode(op_put_by_id);
instructions().append(base->index());
- instructions().append(addConstant(property));
+ instructions().append(propertyIndex);
instructions().append(value->index());
instructions().append(0);
instructions().append(0);
instructions().append(0);
instructions().append(0);
+ instructions().append(0);
+
return value;
}
-RegisterID* BytecodeGenerator::emitPutGetter(RegisterID* base, const Identifier& property, RegisterID* value)
+RegisterID* BytecodeGenerator::emitDirectPutById(RegisterID* base, const Identifier& property, RegisterID* value, PropertyNode::PutType putType)
{
- emitOpcode(op_put_getter);
+ ASSERT(!parseIndex(property));
+ unsigned propertyIndex = addConstant(property);
+
+ m_staticPropertyAnalyzer.putById(base->index(), propertyIndex);
+
+ m_codeBlock->addPropertyAccessInstruction(instructions().size());
+
+ emitOpcode(op_put_by_id);
instructions().append(base->index());
- instructions().append(addConstant(property));
+ instructions().append(propertyIndex);
instructions().append(value->index());
+ instructions().append(0);
+ instructions().append(0);
+ instructions().append(0);
+ instructions().append(0);
+ instructions().append(putType == PropertyNode::KnownDirect || property != m_vm->propertyNames->underscoreProto);
return value;
}
-RegisterID* BytecodeGenerator::emitPutSetter(RegisterID* base, const Identifier& property, RegisterID* value)
+void BytecodeGenerator::emitPutGetterById(RegisterID* base, const Identifier& property, RegisterID* getter)
{
- emitOpcode(op_put_setter);
+ unsigned propertyIndex = addConstant(property);
+ m_staticPropertyAnalyzer.putById(base->index(), propertyIndex);
+
+ emitOpcode(op_put_getter_by_id);
instructions().append(base->index());
- instructions().append(addConstant(property));
- instructions().append(value->index());
- return value;
+ instructions().append(propertyIndex);
+ instructions().append(getter->index());
+}
+
+void BytecodeGenerator::emitPutSetterById(RegisterID* base, const Identifier& property, RegisterID* setter)
+{
+ unsigned propertyIndex = addConstant(property);
+ m_staticPropertyAnalyzer.putById(base->index(), propertyIndex);
+
+ emitOpcode(op_put_setter_by_id);
+ instructions().append(base->index());
+ instructions().append(propertyIndex);
+ instructions().append(setter->index());
+}
+
+void BytecodeGenerator::emitPutGetterSetter(RegisterID* base, const Identifier& property, RegisterID* getter, RegisterID* setter)
+{
+ unsigned propertyIndex = addConstant(property);
+
+ m_staticPropertyAnalyzer.putById(base->index(), propertyIndex);
+
+ emitOpcode(op_put_getter_setter);
+ instructions().append(base->index());
+ instructions().append(propertyIndex);
+ instructions().append(getter->index());
+ instructions().append(setter->index());
}
RegisterID* BytecodeGenerator::emitDeleteById(RegisterID* dst, RegisterID* base, const Identifier& property)
RegisterID* BytecodeGenerator::emitGetByVal(RegisterID* dst, RegisterID* base, RegisterID* property)
{
- emitOpcode(op_get_by_val);
- instructions().append(dst->index());
+ for (size_t i = m_forInContextStack.size(); i > 0; i--) {
+ ForInContext* context = m_forInContextStack[i - 1].get();
+ if (context->local() != property)
+ continue;
+
+ if (!context->isValid())
+ break;
+
+ if (context->type() == ForInContext::IndexedForInContextType) {
+ property = static_cast<IndexedForInContext*>(context)->index();
+ break;
+ }
+
+ ASSERT(context->type() == ForInContext::StructureForInContextType);
+ StructureForInContext* structureContext = static_cast<StructureForInContext*>(context);
+ UnlinkedValueProfile profile = emitProfiledOpcode(op_get_direct_pname);
+ instructions().append(kill(dst));
+ instructions().append(base->index());
+ instructions().append(property->index());
+ instructions().append(structureContext->index()->index());
+ instructions().append(structureContext->enumerator()->index());
+ instructions().append(profile);
+ return dst;
+ }
+
+ UnlinkedArrayProfile arrayProfile = newArrayProfile();
+ UnlinkedValueProfile profile = emitProfiledOpcode(op_get_by_val);
+ instructions().append(kill(dst));
instructions().append(base->index());
instructions().append(property->index());
+ instructions().append(arrayProfile);
+ instructions().append(profile);
return dst;
}
RegisterID* BytecodeGenerator::emitPutByVal(RegisterID* base, RegisterID* property, RegisterID* value)
{
+ UnlinkedArrayProfile arrayProfile = newArrayProfile();
emitOpcode(op_put_by_val);
instructions().append(base->index());
instructions().append(property->index());
instructions().append(value->index());
+ instructions().append(arrayProfile);
+
+ return value;
+}
+
+RegisterID* BytecodeGenerator::emitDirectPutByVal(RegisterID* base, RegisterID* property, RegisterID* value)
+{
+ UnlinkedArrayProfile arrayProfile = newArrayProfile();
+ emitOpcode(op_put_by_val_direct);
+ instructions().append(base->index());
+ instructions().append(property->index());
+ instructions().append(value->index());
+ instructions().append(arrayProfile);
return value;
}
return value;
}
+RegisterID* BytecodeGenerator::emitCreateThis(RegisterID* dst)
+{
+ size_t begin = instructions().size();
+ m_staticPropertyAnalyzer.createThis(m_thisRegister.index(), begin + 3);
+
+ m_codeBlock->addPropertyAccessInstruction(instructions().size());
+ emitOpcode(op_create_this);
+ instructions().append(m_thisRegister.index());
+ instructions().append(m_thisRegister.index());
+ instructions().append(0);
+ instructions().append(0);
+ return dst;
+}
+
+void BytecodeGenerator::emitTDZCheck(RegisterID* target)
+{
+ emitOpcode(op_check_tdz);
+ instructions().append(target->index());
+}
+
RegisterID* BytecodeGenerator::emitNewObject(RegisterID* dst)
{
+ size_t begin = instructions().size();
+ m_staticPropertyAnalyzer.newObject(dst->index(), begin + 2);
+
emitOpcode(op_new_object);
instructions().append(dst->index());
+ instructions().append(0);
+ instructions().append(newObjectAllocationProfile());
return dst;
}
-RegisterID* BytecodeGenerator::emitNewArray(RegisterID* dst, ElementNode* elements)
+unsigned BytecodeGenerator::addConstantBuffer(unsigned length)
+{
+ return m_codeBlock->addConstantBuffer(length);
+}
+
+JSString* BytecodeGenerator::addStringConstant(const Identifier& identifier)
+{
+ JSString*& stringInMap = m_stringMap.add(identifier.impl(), nullptr).iterator->value;
+ if (!stringInMap) {
+ stringInMap = jsString(vm(), identifier.string());
+ addConstantValue(stringInMap);
+ }
+ return stringInMap;
+}
+
+JSTemplateRegistryKey* BytecodeGenerator::addTemplateRegistryKeyConstant(const TemplateRegistryKey& templateRegistryKey)
+{
+ JSTemplateRegistryKey*& templateRegistryKeyInMap = m_templateRegistryKeyMap.add(templateRegistryKey, nullptr).iterator->value;
+ if (!templateRegistryKeyInMap) {
+ templateRegistryKeyInMap = JSTemplateRegistryKey::create(*vm(), templateRegistryKey);
+ addConstantValue(templateRegistryKeyInMap);
+ }
+ return templateRegistryKeyInMap;
+}
+
+RegisterID* BytecodeGenerator::emitNewArray(RegisterID* dst, ElementNode* elements, unsigned length)
{
- Vector<RefPtr<RegisterID>, 16> argv;
+#if !ASSERT_DISABLED
+ unsigned checkLength = 0;
+#endif
+ bool hadVariableExpression = false;
+ if (length) {
+ for (ElementNode* n = elements; n; n = n->next()) {
+ if (!n->value()->isConstant()) {
+ hadVariableExpression = true;
+ break;
+ }
+ if (n->elision())
+ break;
+#if !ASSERT_DISABLED
+ checkLength++;
+#endif
+ }
+ if (!hadVariableExpression) {
+ ASSERT(length == checkLength);
+ unsigned constantBufferIndex = addConstantBuffer(length);
+ JSValue* constantBuffer = m_codeBlock->constantBuffer(constantBufferIndex).data();
+ unsigned index = 0;
+ for (ElementNode* n = elements; index < length; n = n->next()) {
+ ASSERT(n->value()->isConstant());
+ constantBuffer[index++] = static_cast<ConstantNode*>(n->value())->jsValue(*this);
+ }
+ emitOpcode(op_new_array_buffer);
+ instructions().append(dst->index());
+ instructions().append(constantBufferIndex);
+ instructions().append(length);
+ instructions().append(newArrayAllocationProfile());
+ return dst;
+ }
+ }
+
+ Vector<RefPtr<RegisterID>, 16, UnsafeVectorOverflow> argv;
for (ElementNode* n = elements; n; n = n->next()) {
- if (n->elision())
+ if (!length)
break;
+ length--;
+ ASSERT(!n->value()->isSpreadExpression());
argv.append(newTemporary());
// op_new_array requires the initial values to be a sequential range of registers
- ASSERT(argv.size() == 1 || argv[argv.size() - 1]->index() == argv[argv.size() - 2]->index() + 1);
+ ASSERT(argv.size() == 1 || argv[argv.size() - 1]->index() == argv[argv.size() - 2]->index() - 1);
emitNode(argv.last().get(), n->value());
}
+ ASSERT(!length);
emitOpcode(op_new_array);
instructions().append(dst->index());
instructions().append(argv.size() ? argv[0]->index() : 0); // argv
instructions().append(argv.size()); // argc
+ instructions().append(newArrayAllocationProfile());
return dst;
}
-RegisterID* BytecodeGenerator::emitNewFunction(RegisterID* dst, FuncDeclNode* n)
+RegisterID* BytecodeGenerator::emitNewFunction(RegisterID* dst, FunctionBodyNode* function)
+{
+ return emitNewFunctionInternal(dst, m_codeBlock->addFunctionDecl(makeFunction(function)));
+}
+
+RegisterID* BytecodeGenerator::emitNewFunctionInternal(RegisterID* dst, unsigned index)
{
emitOpcode(op_new_func);
instructions().append(dst->index());
- instructions().append(addConstant(n));
+ instructions().append(scopeRegister()->index());
+ instructions().append(index);
return dst;
}
return dst;
}
-
RegisterID* BytecodeGenerator::emitNewFunctionExpression(RegisterID* r0, FuncExprNode* n)
{
+ FunctionBodyNode* function = n->body();
+ unsigned index = m_codeBlock->addFunctionExpr(makeFunction(function));
+
emitOpcode(op_new_func_exp);
instructions().append(r0->index());
- instructions().append(addConstant(n));
+ instructions().append(scopeRegister()->index());
+ instructions().append(index);
return r0;
}
-RegisterID* BytecodeGenerator::emitCall(RegisterID* dst, RegisterID* func, RegisterID* thisRegister, ArgumentsNode* argumentsNode, unsigned divot, unsigned startOffset, unsigned endOffset)
+RegisterID* BytecodeGenerator::emitNewDefaultConstructor(RegisterID* dst, ConstructorKind constructorKind, const Identifier& name)
+{
+ UnlinkedFunctionExecutable* executable = m_vm->builtinExecutables()->createDefaultConstructor(constructorKind, name);
+
+ unsigned index = m_codeBlock->addFunctionExpr(executable);
+
+ emitOpcode(op_new_func_exp);
+ instructions().append(dst->index());
+ instructions().append(scopeRegister()->index());
+ instructions().append(index);
+ return dst;
+}
+
+RegisterID* BytecodeGenerator::emitCall(RegisterID* dst, RegisterID* func, ExpectedFunction expectedFunction, CallArguments& callArguments, const JSTextPosition& divot, const JSTextPosition& divotStart, const JSTextPosition& divotEnd)
{
- return emitCall(op_call, dst, func, thisRegister, argumentsNode, divot, startOffset, endOffset);
+ return emitCall(op_call, dst, func, expectedFunction, callArguments, divot, divotStart, divotEnd);
}
-RegisterID* BytecodeGenerator::emitCallEval(RegisterID* dst, RegisterID* func, RegisterID* thisRegister, ArgumentsNode* argumentsNode, unsigned divot, unsigned startOffset, unsigned endOffset)
+RegisterID* BytecodeGenerator::emitCallEval(RegisterID* dst, RegisterID* func, CallArguments& callArguments, const JSTextPosition& divot, const JSTextPosition& divotStart, const JSTextPosition& divotEnd)
{
- return emitCall(op_call_eval, dst, func, thisRegister, argumentsNode, divot, startOffset, endOffset);
+ return emitCall(op_call_eval, dst, func, NoExpectedFunction, callArguments, divot, divotStart, divotEnd);
}
-RegisterID* BytecodeGenerator::emitCall(OpcodeID opcodeID, RegisterID* dst, RegisterID* func, RegisterID* thisRegister, ArgumentsNode* argumentsNode, unsigned divot, unsigned startOffset, unsigned endOffset)
+ExpectedFunction BytecodeGenerator::expectedFunctionForIdentifier(const Identifier& identifier)
{
- ASSERT(opcodeID == op_call || opcodeID == op_call_eval);
- ASSERT(func->refCount());
- ASSERT(thisRegister->refCount());
+ if (identifier == m_vm->propertyNames->Object || identifier == m_vm->propertyNames->ObjectPrivateName)
+ return ExpectObjectConstructor;
+ if (identifier == m_vm->propertyNames->Array || identifier == m_vm->propertyNames->ArrayPrivateName)
+ return ExpectArrayConstructor;
+ return NoExpectedFunction;
+}
- RegisterID* originalFunc = func;
- if (m_shouldEmitProfileHooks) {
- // If codegen decided to recycle func as this call's destination register,
- // we need to undo that optimization here so that func will still be around
- // for the sake of op_profile_did_call.
- if (dst == func) {
- RefPtr<RegisterID> movedThisRegister = emitMove(newTemporary(), thisRegister);
- RefPtr<RegisterID> movedFunc = emitMove(thisRegister, func);
-
- thisRegister = movedThisRegister.release().releaseRef();
- func = movedFunc.release().releaseRef();
+ExpectedFunction BytecodeGenerator::emitExpectedFunctionSnippet(RegisterID* dst, RegisterID* func, ExpectedFunction expectedFunction, CallArguments& callArguments, Label* done)
+{
+ RefPtr<Label> realCall = newLabel();
+ switch (expectedFunction) {
+ case ExpectObjectConstructor: {
+ // If the number of arguments is non-zero, then we can't do anything interesting.
+ if (callArguments.argumentCountIncludingThis() >= 2)
+ return NoExpectedFunction;
+
+ size_t begin = instructions().size();
+ emitOpcode(op_jneq_ptr);
+ instructions().append(func->index());
+ instructions().append(Special::ObjectConstructor);
+ instructions().append(realCall->bind(begin, instructions().size()));
+
+ if (dst != ignoredResult())
+ emitNewObject(dst);
+ break;
+ }
+
+ case ExpectArrayConstructor: {
+ // If you're doing anything other than "new Array()" or "new Array(foo)" then we
+ // don't do inline it, for now. The only reason is that call arguments are in
+ // the opposite order of what op_new_array expects, so we'd either need to change
+ // how op_new_array works or we'd need an op_new_array_reverse. Neither of these
+ // things sounds like it's worth it.
+ if (callArguments.argumentCountIncludingThis() > 2)
+ return NoExpectedFunction;
+
+ size_t begin = instructions().size();
+ emitOpcode(op_jneq_ptr);
+ instructions().append(func->index());
+ instructions().append(Special::ArrayConstructor);
+ instructions().append(realCall->bind(begin, instructions().size()));
+
+ if (dst != ignoredResult()) {
+ if (callArguments.argumentCountIncludingThis() == 2) {
+ emitOpcode(op_new_array_with_size);
+ instructions().append(dst->index());
+ instructions().append(callArguments.argumentRegister(0)->index());
+ instructions().append(newArrayAllocationProfile());
+ } else {
+ ASSERT(callArguments.argumentCountIncludingThis() == 1);
+ emitOpcode(op_new_array);
+ instructions().append(dst->index());
+ instructions().append(0);
+ instructions().append(0);
+ instructions().append(newArrayAllocationProfile());
+ }
}
+ break;
}
+
+ default:
+ ASSERT(expectedFunction == NoExpectedFunction);
+ return NoExpectedFunction;
+ }
+
+ size_t begin = instructions().size();
+ emitOpcode(op_jmp);
+ instructions().append(done->bind(begin, instructions().size()));
+ emitLabel(realCall.get());
+
+ return expectedFunction;
+}
+
+RegisterID* BytecodeGenerator::emitCall(OpcodeID opcodeID, RegisterID* dst, RegisterID* func, ExpectedFunction expectedFunction, CallArguments& callArguments, const JSTextPosition& divot, const JSTextPosition& divotStart, const JSTextPosition& divotEnd)
+{
+ ASSERT(opcodeID == op_call || opcodeID == op_call_eval);
+ ASSERT(func->refCount());
+
+ if (m_shouldEmitProfileHooks)
+ emitMove(callArguments.profileHookRegister(), func);
// Generate code for arguments.
- Vector<RefPtr<RegisterID>, 16> argv;
- argv.append(thisRegister);
- for (ArgumentListNode* n = argumentsNode->m_listNode.get(); n; n = n->m_next.get()) {
- argv.append(newTemporary());
- // op_call requires the arguments to be a sequential range of registers
- ASSERT(argv[argv.size() - 1]->index() == argv[argv.size() - 2]->index() + 1);
- emitNode(argv.last().get(), n);
+ unsigned argument = 0;
+ if (callArguments.argumentsNode()) {
+ ArgumentListNode* n = callArguments.argumentsNode()->m_listNode;
+ if (n && n->m_expr->isSpreadExpression()) {
+ RELEASE_ASSERT(!n->m_next);
+ auto expression = static_cast<SpreadExpressionNode*>(n->m_expr)->expression();
+ RefPtr<RegisterID> argumentRegister;
+ argumentRegister = expression->emitBytecode(*this, callArguments.argumentRegister(0));
+ RefPtr<RegisterID> thisRegister = emitMove(newTemporary(), callArguments.thisRegister());
+ return emitCallVarargs(dst, func, callArguments.thisRegister(), argumentRegister.get(), newTemporary(), 0, callArguments.profileHookRegister(), divot, divotStart, divotEnd);
+ }
+ for (; n; n = n->m_next)
+ emitNode(callArguments.argumentRegister(argument++), n);
}
-
+
// Reserve space for call frame.
- Vector<RefPtr<RegisterID>, RegisterFile::CallFrameHeaderSize> callFrame;
- for (int i = 0; i < RegisterFile::CallFrameHeaderSize; ++i)
+ Vector<RefPtr<RegisterID>, JSStack::CallFrameHeaderSize, UnsafeVectorOverflow> callFrame;
+ for (int i = 0; i < JSStack::CallFrameHeaderSize; ++i)
callFrame.append(newTemporary());
if (m_shouldEmitProfileHooks) {
emitOpcode(op_profile_will_call);
- instructions().append(func->index());
+ instructions().append(callArguments.profileHookRegister()->index());
+ }
-#if ENABLE(JIT)
- m_codeBlock->addFunctionRegisterInfo(instructions().size(), func->index());
-#endif
+ emitExpressionInfo(divot, divotStart, divotEnd);
+
+ RefPtr<Label> done = newLabel();
+ expectedFunction = emitExpectedFunctionSnippet(dst, func, expectedFunction, callArguments, done.get());
+
+ // Emit call.
+ UnlinkedArrayProfile arrayProfile = newArrayProfile();
+ UnlinkedValueProfile profile = emitProfiledOpcode(opcodeID);
+ ASSERT(dst);
+ ASSERT(dst != ignoredResult());
+ instructions().append(dst->index());
+ instructions().append(func->index());
+ instructions().append(callArguments.argumentCountIncludingThis());
+ instructions().append(callArguments.stackOffset());
+ instructions().append(m_codeBlock->addLLIntCallLinkInfo());
+ instructions().append(0);
+ instructions().append(arrayProfile);
+ instructions().append(profile);
+
+ if (expectedFunction != NoExpectedFunction)
+ emitLabel(done.get());
+
+ if (m_shouldEmitProfileHooks) {
+ emitOpcode(op_profile_did_call);
+ instructions().append(callArguments.profileHookRegister()->index());
}
- emitExpressionInfo(divot, startOffset, endOffset);
+ return dst;
+}
+
+RegisterID* BytecodeGenerator::emitCallVarargs(RegisterID* dst, RegisterID* func, RegisterID* thisRegister, RegisterID* arguments, RegisterID* firstFreeRegister, int32_t firstVarArgOffset, RegisterID* profileHookRegister, const JSTextPosition& divot, const JSTextPosition& divotStart, const JSTextPosition& divotEnd)
+{
+ return emitCallVarargs(op_call_varargs, dst, func, thisRegister, arguments, firstFreeRegister, firstVarArgOffset, profileHookRegister, divot, divotStart, divotEnd);
+}
+
+RegisterID* BytecodeGenerator::emitConstructVarargs(RegisterID* dst, RegisterID* func, RegisterID* thisRegister, RegisterID* arguments, RegisterID* firstFreeRegister, int32_t firstVarArgOffset, RegisterID* profileHookRegister, const JSTextPosition& divot, const JSTextPosition& divotStart, const JSTextPosition& divotEnd)
+{
+ return emitCallVarargs(op_construct_varargs, dst, func, thisRegister, arguments, firstFreeRegister, firstVarArgOffset, profileHookRegister, divot, divotStart, divotEnd);
+}
+
+RegisterID* BytecodeGenerator::emitCallVarargs(OpcodeID opcode, RegisterID* dst, RegisterID* func, RegisterID* thisRegister, RegisterID* arguments, RegisterID* firstFreeRegister, int32_t firstVarArgOffset, RegisterID* profileHookRegister, const JSTextPosition& divot, const JSTextPosition& divotStart, const JSTextPosition& divotEnd)
+{
+ if (m_shouldEmitProfileHooks) {
+ emitMove(profileHookRegister, func);
+ emitOpcode(op_profile_will_call);
+ instructions().append(profileHookRegister->index());
+ }
+
+ emitExpressionInfo(divot, divotStart, divotEnd);
+
+ // Emit call.
+ UnlinkedArrayProfile arrayProfile = newArrayProfile();
+ UnlinkedValueProfile profile = emitProfiledOpcode(opcode);
+ ASSERT(dst != ignoredResult());
+ instructions().append(dst->index());
+ instructions().append(func->index());
+ instructions().append(thisRegister ? thisRegister->index() : 0);
+ instructions().append(arguments->index());
+ instructions().append(firstFreeRegister->index());
+ instructions().append(firstVarArgOffset);
+ instructions().append(arrayProfile);
+ instructions().append(profile);
+ if (m_shouldEmitProfileHooks) {
+ emitOpcode(op_profile_did_call);
+ instructions().append(profileHookRegister->index());
+ }
+ return dst;
+}
+
+void BytecodeGenerator::emitCallDefineProperty(RegisterID* newObj, RegisterID* propertyNameRegister,
+ RegisterID* valueRegister, RegisterID* getterRegister, RegisterID* setterRegister, unsigned options, const JSTextPosition& position)
+{
+ RefPtr<RegisterID> descriptorRegister = emitNewObject(newTemporary());
-#if ENABLE(JIT)
- m_codeBlock->addCallLinkInfo();
-#endif
+ RefPtr<RegisterID> trueRegister = emitLoad(newTemporary(), true);
+ if (options & PropertyConfigurable)
+ emitDirectPutById(descriptorRegister.get(), propertyNames().configurable, trueRegister.get(), PropertyNode::Unknown);
+ if (options & PropertyWritable)
+ emitDirectPutById(descriptorRegister.get(), propertyNames().writable, trueRegister.get(), PropertyNode::Unknown);
+ else if (valueRegister) {
+ RefPtr<RegisterID> falseRegister = emitLoad(newTemporary(), false);
+ emitDirectPutById(descriptorRegister.get(), propertyNames().writable, falseRegister.get(), PropertyNode::Unknown);
+ }
+ if (options & PropertyEnumerable)
+ emitDirectPutById(descriptorRegister.get(), propertyNames().enumerable, trueRegister.get(), PropertyNode::Unknown);
- // Emit call.
- emitOpcode(opcodeID);
- instructions().append(dst->index()); // dst
- instructions().append(func->index()); // func
- instructions().append(argv.size()); // argCount
- instructions().append(argv[0]->index() + argv.size() + RegisterFile::CallFrameHeaderSize); // registerOffset
+ if (valueRegister)
+ emitDirectPutById(descriptorRegister.get(), propertyNames().value, valueRegister, PropertyNode::Unknown);
+ if (getterRegister)
+ emitDirectPutById(descriptorRegister.get(), propertyNames().get, getterRegister, PropertyNode::Unknown);
+ if (setterRegister)
+ emitDirectPutById(descriptorRegister.get(), propertyNames().set, setterRegister, PropertyNode::Unknown);
- if (m_shouldEmitProfileHooks) {
- emitOpcode(op_profile_did_call);
- instructions().append(func->index());
+ RefPtr<RegisterID> definePropertyRegister = emitMoveLinkTimeConstant(newTemporary(), LinkTimeConstant::DefinePropertyFunction);
- if (dst == originalFunc) {
- thisRegister->deref();
- func->deref();
- }
- }
+ CallArguments callArguments(*this, nullptr, 3);
+ emitLoad(callArguments.thisRegister(), jsUndefined());
+ emitMove(callArguments.argumentRegister(0), newObj);
+ emitMove(callArguments.argumentRegister(1), propertyNameRegister);
+ emitMove(callArguments.argumentRegister(2), descriptorRegister.get());
- return dst;
+ emitCall(newTemporary(), definePropertyRegister.get(), NoExpectedFunction, callArguments, position, position, position);
}
RegisterID* BytecodeGenerator::emitReturn(RegisterID* src)
{
- if (m_codeBlock->needsFullScopeChain()) {
- emitOpcode(op_tear_off_activation);
- instructions().append(m_activationRegisterIndex);
- } else if (m_codeBlock->usesArguments() && m_codeBlock->m_numParameters > 1)
- emitOpcode(op_tear_off_arguments);
+ if (isConstructor()) {
+ bool derived = constructorKind() == ConstructorKind::Derived;
+ if (derived && src->index() == m_thisRegister.index())
+ emitTDZCheck(src);
+
+ RefPtr<Label> isObjectLabel = newLabel();
+ emitJumpIfTrue(emitIsObject(newTemporary(), src), isObjectLabel.get());
+
+ if (derived) {
+ RefPtr<Label> isUndefinedLabel = newLabel();
+ emitJumpIfTrue(emitIsUndefined(newTemporary(), src), isUndefinedLabel.get());
+ emitThrowTypeError("Cannot return a non-object type in the constructor of a derived class.");
+ emitLabel(isUndefinedLabel.get());
+ if (constructorKind() == ConstructorKind::Derived)
+ emitTDZCheck(&m_thisRegister);
+ }
+
+ emitUnaryNoDstOp(op_ret, &m_thisRegister);
+
+ emitLabel(isObjectLabel.get());
+ }
return emitUnaryNoDstOp(op_ret, src);
}
return src;
}
-RegisterID* BytecodeGenerator::emitConstruct(RegisterID* dst, RegisterID* func, ArgumentsNode* argumentsNode, unsigned divot, unsigned startOffset, unsigned endOffset)
+RegisterID* BytecodeGenerator::emitConstruct(RegisterID* dst, RegisterID* func, ExpectedFunction expectedFunction, CallArguments& callArguments, const JSTextPosition& divot, const JSTextPosition& divotStart, const JSTextPosition& divotEnd)
{
ASSERT(func->refCount());
- RegisterID* originalFunc = func;
- if (m_shouldEmitProfileHooks) {
- // If codegen decided to recycle func as this call's destination register,
- // we need to undo that optimization here so that func will still be around
- // for the sake of op_profile_did_call.
- if (dst == func) {
- RefPtr<RegisterID> movedFunc = emitMove(newTemporary(), func);
- func = movedFunc.release().releaseRef();
- }
- }
-
- RefPtr<RegisterID> funcProto = newTemporary();
+ if (m_shouldEmitProfileHooks)
+ emitMove(callArguments.profileHookRegister(), func);
// Generate code for arguments.
- Vector<RefPtr<RegisterID>, 16> argv;
- argv.append(newTemporary()); // reserve space for "this"
- for (ArgumentListNode* n = argumentsNode ? argumentsNode->m_listNode.get() : 0; n; n = n->m_next.get()) {
- argv.append(newTemporary());
- // op_construct requires the arguments to be a sequential range of registers
- ASSERT(argv[argv.size() - 1]->index() == argv[argv.size() - 2]->index() + 1);
- emitNode(argv.last().get(), n);
+ unsigned argument = 0;
+ if (ArgumentsNode* argumentsNode = callArguments.argumentsNode()) {
+
+ ArgumentListNode* n = callArguments.argumentsNode()->m_listNode;
+ if (n && n->m_expr->isSpreadExpression()) {
+ RELEASE_ASSERT(!n->m_next);
+ auto expression = static_cast<SpreadExpressionNode*>(n->m_expr)->expression();
+ RefPtr<RegisterID> argumentRegister;
+ argumentRegister = expression->emitBytecode(*this, callArguments.argumentRegister(0));
+ return emitConstructVarargs(dst, func, callArguments.thisRegister(), argumentRegister.get(), newTemporary(), 0, callArguments.profileHookRegister(), divot, divotStart, divotEnd);
+ }
+
+ for (ArgumentListNode* n = argumentsNode->m_listNode; n; n = n->m_next)
+ emitNode(callArguments.argumentRegister(argument++), n);
}
if (m_shouldEmitProfileHooks) {
emitOpcode(op_profile_will_call);
- instructions().append(func->index());
+ instructions().append(callArguments.profileHookRegister()->index());
}
- // Load prototype.
- emitExpressionInfo(divot, startOffset, endOffset);
- emitGetByIdExceptionInfo(op_construct);
- emitGetById(funcProto.get(), func, globalData()->propertyNames->prototype);
-
// Reserve space for call frame.
- Vector<RefPtr<RegisterID>, RegisterFile::CallFrameHeaderSize> callFrame;
- for (int i = 0; i < RegisterFile::CallFrameHeaderSize; ++i)
+ Vector<RefPtr<RegisterID>, JSStack::CallFrameHeaderSize, UnsafeVectorOverflow> callFrame;
+ for (int i = 0; i < JSStack::CallFrameHeaderSize; ++i)
callFrame.append(newTemporary());
- emitExpressionInfo(divot, startOffset, endOffset);
-
-#if ENABLE(JIT)
- m_codeBlock->addCallLinkInfo();
-#endif
-
- emitOpcode(op_construct);
- instructions().append(dst->index()); // dst
- instructions().append(func->index()); // func
- instructions().append(argv.size()); // argCount
- instructions().append(argv[0]->index() + argv.size() + RegisterFile::CallFrameHeaderSize); // registerOffset
- instructions().append(funcProto->index()); // proto
- instructions().append(argv[0]->index()); // thisRegister
+ emitExpressionInfo(divot, divotStart, divotEnd);
+
+ RefPtr<Label> done = newLabel();
+ expectedFunction = emitExpectedFunctionSnippet(dst, func, expectedFunction, callArguments, done.get());
- emitOpcode(op_construct_verify);
+ UnlinkedValueProfile profile = emitProfiledOpcode(op_construct);
+ ASSERT(dst != ignoredResult());
instructions().append(dst->index());
- instructions().append(argv[0]->index());
+ instructions().append(func->index());
+ instructions().append(callArguments.argumentCountIncludingThis());
+ instructions().append(callArguments.stackOffset());
+ instructions().append(m_codeBlock->addLLIntCallLinkInfo());
+ instructions().append(0);
+ instructions().append(0);
+ instructions().append(profile);
+
+ if (expectedFunction != NoExpectedFunction)
+ emitLabel(done.get());
if (m_shouldEmitProfileHooks) {
emitOpcode(op_profile_did_call);
- instructions().append(func->index());
-
- if (dst == originalFunc)
- func->deref();
+ instructions().append(callArguments.profileHookRegister()->index());
}
return dst;
}
-RegisterID* BytecodeGenerator::emitPushScope(RegisterID* scope)
+RegisterID* BytecodeGenerator::emitStrcat(RegisterID* dst, RegisterID* src, int count)
+{
+ emitOpcode(op_strcat);
+ instructions().append(dst->index());
+ instructions().append(src->index());
+ instructions().append(count);
+
+ return dst;
+}
+
+void BytecodeGenerator::emitToPrimitive(RegisterID* dst, RegisterID* src)
+{
+ emitOpcode(op_to_primitive);
+ instructions().append(dst->index());
+ instructions().append(src->index());
+}
+
+void BytecodeGenerator::emitGetScope()
+{
+ emitOpcode(op_get_scope);
+ instructions().append(scopeRegister()->index());
+}
+
+RegisterID* BytecodeGenerator::emitPushWithScope(RegisterID* dst, RegisterID* scope)
{
- ASSERT(scope->isTemporary());
ControlFlowContext context;
context.isFinallyBlock = false;
m_scopeContextStack.append(context);
- m_dynamicScopeDepth++;
+ m_localScopeDepth++;
- return emitUnaryNoDstOp(op_push_scope, scope);
+ return emitUnaryOp(op_push_with_scope, dst, scope);
}
-void BytecodeGenerator::emitPopScope()
+void BytecodeGenerator::emitPopScope(RegisterID* srcDst)
{
ASSERT(m_scopeContextStack.size());
ASSERT(!m_scopeContextStack.last().isFinallyBlock);
emitOpcode(op_pop_scope);
+ instructions().append(srcDst->index());
m_scopeContextStack.removeLast();
- m_dynamicScopeDepth--;
+ m_localScopeDepth--;
}
-void BytecodeGenerator::emitDebugHook(DebugHookID debugHookID, int firstLine, int lastLine)
+void BytecodeGenerator::emitDebugHook(DebugHookID debugHookID, unsigned line, unsigned charOffset, unsigned lineStart)
{
+#if ENABLE(DEBUG_WITH_BREAKPOINT)
+ if (debugHookID != DidReachBreakpoint)
+ return;
+#else
if (!m_shouldEmitDebugHooks)
return;
+#endif
+ JSTextPosition divot(line, charOffset, lineStart);
+ emitExpressionInfo(divot, divot, divot);
emitOpcode(op_debug);
instructions().append(debugHookID);
- instructions().append(firstLine);
- instructions().append(lastLine);
+ instructions().append(false);
+}
+
+void BytecodeGenerator::pushFinallyContext(StatementNode* finallyBlock)
+{
+ // Reclaim free label scopes.
+ while (m_labelScopes.size() && !m_labelScopes.last().refCount())
+ m_labelScopes.removeLast();
+
+ ControlFlowContext scope;
+ scope.isFinallyBlock = true;
+ FinallyContext context = {
+ finallyBlock,
+ nullptr,
+ nullptr,
+ static_cast<unsigned>(m_scopeContextStack.size()),
+ static_cast<unsigned>(m_switchContextStack.size()),
+ static_cast<unsigned>(m_forInContextStack.size()),
+ static_cast<unsigned>(m_tryContextStack.size()),
+ static_cast<unsigned>(m_labelScopes.size()),
+ m_finallyDepth,
+ m_localScopeDepth
+ };
+ scope.finallyContext = context;
+ m_scopeContextStack.append(scope);
+ m_finallyDepth++;
}
-void BytecodeGenerator::pushFinallyContext(Label* target, RegisterID* retAddrDst)
+void BytecodeGenerator::pushIteratorCloseContext(RegisterID* iterator, ThrowableExpressionData* node)
{
+ // Reclaim free label scopes.
+ while (m_labelScopes.size() && !m_labelScopes.last().refCount())
+ m_labelScopes.removeLast();
+
ControlFlowContext scope;
scope.isFinallyBlock = true;
- FinallyContext context = { target, retAddrDst };
+ FinallyContext context = {
+ nullptr,
+ iterator,
+ node,
+ static_cast<unsigned>(m_scopeContextStack.size()),
+ static_cast<unsigned>(m_switchContextStack.size()),
+ static_cast<unsigned>(m_forInContextStack.size()),
+ static_cast<unsigned>(m_tryContextStack.size()),
+ static_cast<unsigned>(m_labelScopes.size()),
+ m_finallyDepth,
+ m_localScopeDepth
+ };
scope.finallyContext = context;
m_scopeContextStack.append(scope);
m_finallyDepth++;
{
ASSERT(m_scopeContextStack.size());
ASSERT(m_scopeContextStack.last().isFinallyBlock);
+ ASSERT(m_scopeContextStack.last().finallyContext.finallyBlock);
+ ASSERT(!m_scopeContextStack.last().finallyContext.iterator);
+ ASSERT(!m_scopeContextStack.last().finallyContext.enumerationNode);
+ ASSERT(m_finallyDepth > 0);
+ m_scopeContextStack.removeLast();
+ m_finallyDepth--;
+}
+
+void BytecodeGenerator::popIteratorCloseContext()
+{
+ ASSERT(m_scopeContextStack.size());
+ ASSERT(m_scopeContextStack.last().isFinallyBlock);
+ ASSERT(!m_scopeContextStack.last().finallyContext.finallyBlock);
+ ASSERT(m_scopeContextStack.last().finallyContext.iterator);
+ ASSERT(m_scopeContextStack.last().finallyContext.enumerationNode);
ASSERT(m_finallyDepth > 0);
m_scopeContextStack.removeLast();
m_finallyDepth--;
}
-LabelScope* BytecodeGenerator::breakTarget(const Identifier& name)
+LabelScopePtr BytecodeGenerator::breakTarget(const Identifier& name)
{
// Reclaim free label scopes.
- while (m_labelScopes.size() && !m_labelScopes.last().refCount())
+ //
+ // The condition was previously coded as 'm_labelScopes.size() && !m_labelScopes.last().refCount()',
+ // however sometimes this appears to lead to GCC going a little haywire and entering the loop with
+ // size 0, leading to segfaulty badness. We are yet to identify a valid cause within our code to
+ // cause the GCC codegen to misbehave in this fashion, and as such the following refactoring of the
+ // loop condition is a workaround.
+ while (m_labelScopes.size()) {
+ if (m_labelScopes.last().refCount())
+ break;
m_labelScopes.removeLast();
+ }
if (!m_labelScopes.size())
- return 0;
+ return LabelScopePtr::null();
// We special-case the following, which is a syntax error in Firefox:
// label:
LabelScope* scope = &m_labelScopes[i];
if (scope->type() != LabelScope::NamedLabel) {
ASSERT(scope->breakTarget());
- return scope;
+ return LabelScopePtr(m_labelScopes, i);
}
}
- return 0;
+ return LabelScopePtr::null();
}
for (int i = m_labelScopes.size() - 1; i >= 0; --i) {
LabelScope* scope = &m_labelScopes[i];
if (scope->name() && *scope->name() == name) {
ASSERT(scope->breakTarget());
- return scope;
+ return LabelScopePtr(m_labelScopes, i);
}
}
- return 0;
+ return LabelScopePtr::null();
}
-LabelScope* BytecodeGenerator::continueTarget(const Identifier& name)
+LabelScopePtr BytecodeGenerator::continueTarget(const Identifier& name)
{
// Reclaim free label scopes.
while (m_labelScopes.size() && !m_labelScopes.last().refCount())
m_labelScopes.removeLast();
if (!m_labelScopes.size())
- return 0;
+ return LabelScopePtr::null();
if (name.isEmpty()) {
for (int i = m_labelScopes.size() - 1; i >= 0; --i) {
LabelScope* scope = &m_labelScopes[i];
if (scope->type() == LabelScope::Loop) {
ASSERT(scope->continueTarget());
- return scope;
+ return LabelScopePtr(m_labelScopes, i);
}
}
- return 0;
+ return LabelScopePtr::null();
}
// Continue to the loop nested nearest to the label scope that matches
// 'name'.
- LabelScope* result = 0;
+ LabelScopePtr result = LabelScopePtr::null();
for (int i = m_labelScopes.size() - 1; i >= 0; --i) {
LabelScope* scope = &m_labelScopes[i];
if (scope->type() == LabelScope::Loop) {
ASSERT(scope->continueTarget());
- result = scope;
+ result = LabelScopePtr(m_labelScopes, i);
}
if (scope->name() && *scope->name() == name)
- return result; // may be 0
+ return result; // may be null.
}
- return 0;
+ return LabelScopePtr::null();
}
-PassRefPtr<Label> BytecodeGenerator::emitComplexJumpScopes(Label* target, ControlFlowContext* topScope, ControlFlowContext* bottomScope)
+void BytecodeGenerator::allocateAndEmitScope()
+{
+ m_scopeRegister = addVar();
+ m_scopeRegister->ref();
+ m_codeBlock->setScopeRegister(scopeRegister()->virtualRegister());
+ emitGetScope();
+}
+
+void BytecodeGenerator::emitComplexPopScopes(RegisterID* scope, ControlFlowContext* topScope, ControlFlowContext* bottomScope)
{
while (topScope > bottomScope) {
// First we count the number of dynamic scopes we need to remove to get
if (nNormalScopes) {
// We need to remove a number of dynamic scopes to get to the next
// finally block
- emitOpcode(op_jmp_scopes);
- instructions().append(nNormalScopes);
-
- // If topScope == bottomScope then there isn't actually a finally block
- // left to emit, so make the jmp_scopes jump directly to the target label
- if (topScope == bottomScope) {
- instructions().append(target->offsetFrom(instructions().size()));
- return target;
+ while (nNormalScopes--) {
+ emitOpcode(op_pop_scope);
+ instructions().append(scope->index());
}
- // Otherwise we just use jmp_scopes to pop a group of scopes and go
- // to the next instruction
- RefPtr<Label> nextInsn = newLabel();
- instructions().append(nextInsn->offsetFrom(instructions().size()));
- emitLabel(nextInsn.get());
+ // If topScope == bottomScope then there isn't a finally block left to emit.
+ if (topScope == bottomScope)
+ return;
}
+
+ Vector<ControlFlowContext> savedScopeContextStack;
+ Vector<SwitchInfo> savedSwitchContextStack;
+ Vector<std::unique_ptr<ForInContext>> savedForInContextStack;
+ Vector<TryContext> poppedTryContexts;
+ LabelScopeStore savedLabelScopes;
+ while (topScope > bottomScope && topScope->isFinallyBlock) {
+ RefPtr<Label> beforeFinally = emitLabel(newLabel().get());
+
+ // Save the current state of the world while instating the state of the world
+ // for the finally block.
+ FinallyContext finallyContext = topScope->finallyContext;
+ bool flipScopes = finallyContext.scopeContextStackSize != m_scopeContextStack.size();
+ bool flipSwitches = finallyContext.switchContextStackSize != m_switchContextStack.size();
+ bool flipForIns = finallyContext.forInContextStackSize != m_forInContextStack.size();
+ bool flipTries = finallyContext.tryContextStackSize != m_tryContextStack.size();
+ bool flipLabelScopes = finallyContext.labelScopesSize != m_labelScopes.size();
+ int topScopeIndex = -1;
+ int bottomScopeIndex = -1;
+ if (flipScopes) {
+ topScopeIndex = topScope - m_scopeContextStack.begin();
+ bottomScopeIndex = bottomScope - m_scopeContextStack.begin();
+ savedScopeContextStack = m_scopeContextStack;
+ m_scopeContextStack.shrink(finallyContext.scopeContextStackSize);
+ }
+ if (flipSwitches) {
+ savedSwitchContextStack = m_switchContextStack;
+ m_switchContextStack.shrink(finallyContext.switchContextStackSize);
+ }
+ if (flipForIns) {
+ savedForInContextStack.swap(m_forInContextStack);
+ m_forInContextStack.shrink(finallyContext.forInContextStackSize);
+ }
+ if (flipTries) {
+ while (m_tryContextStack.size() != finallyContext.tryContextStackSize) {
+ ASSERT(m_tryContextStack.size() > finallyContext.tryContextStackSize);
+ TryContext context = m_tryContextStack.last();
+ m_tryContextStack.removeLast();
+ TryRange range;
+ range.start = context.start;
+ range.end = beforeFinally;
+ range.tryData = context.tryData;
+ m_tryRanges.append(range);
+ poppedTryContexts.append(context);
+ }
+ }
+ if (flipLabelScopes) {
+ savedLabelScopes = m_labelScopes;
+ while (m_labelScopes.size() > finallyContext.labelScopesSize)
+ m_labelScopes.removeLast();
+ }
+ int savedFinallyDepth = m_finallyDepth;
+ m_finallyDepth = finallyContext.finallyDepth;
+ int savedDynamicScopeDepth = m_localScopeDepth;
+ m_localScopeDepth = finallyContext.dynamicScopeDepth;
+
+ if (finallyContext.finallyBlock) {
+ // Emit the finally block.
+ emitNode(finallyContext.finallyBlock);
+ } else {
+ // Emit the IteratorClose block.
+ ASSERT(finallyContext.iterator);
+ emitIteratorClose(finallyContext.iterator, finallyContext.enumerationNode);
+ }
- // To get here there must be at least one finally block present
- do {
- ASSERT(topScope->isFinallyBlock);
- emitJumpSubroutine(topScope->finallyContext.retAddrDst, topScope->finallyContext.finallyAddr);
+ RefPtr<Label> afterFinally = emitLabel(newLabel().get());
+
+ // Restore the state of the world.
+ if (flipScopes) {
+ m_scopeContextStack = savedScopeContextStack;
+ topScope = &m_scopeContextStack[topScopeIndex]; // assert it's within bounds
+ bottomScope = m_scopeContextStack.begin() + bottomScopeIndex; // don't assert, since it the index might be -1.
+ }
+ if (flipSwitches)
+ m_switchContextStack = savedSwitchContextStack;
+ if (flipForIns)
+ m_forInContextStack.swap(savedForInContextStack);
+ if (flipTries) {
+ ASSERT(m_tryContextStack.size() == finallyContext.tryContextStackSize);
+ for (unsigned i = poppedTryContexts.size(); i--;) {
+ TryContext context = poppedTryContexts[i];
+ context.start = afterFinally;
+ m_tryContextStack.append(context);
+ }
+ poppedTryContexts.clear();
+ }
+ if (flipLabelScopes)
+ m_labelScopes = savedLabelScopes;
+ m_finallyDepth = savedFinallyDepth;
+ m_localScopeDepth = savedDynamicScopeDepth;
+
--topScope;
- if (!topScope->isFinallyBlock)
- break;
- } while (topScope > bottomScope);
+ }
}
- return emitJump(target);
}
-PassRefPtr<Label> BytecodeGenerator::emitJumpScopes(Label* target, int targetScopeDepth)
+void BytecodeGenerator::emitPopScopes(RegisterID* scope, int targetScopeDepth)
{
ASSERT(scopeDepth() - targetScopeDepth >= 0);
- ASSERT(target->isForward());
size_t scopeDelta = scopeDepth() - targetScopeDepth;
ASSERT(scopeDelta <= m_scopeContextStack.size());
if (!scopeDelta)
- return emitJump(target);
+ return;
- if (m_finallyDepth)
- return emitComplexJumpScopes(target, &m_scopeContextStack.last(), &m_scopeContextStack.last() - scopeDelta);
+ if (!m_finallyDepth) {
+ while (scopeDelta--) {
+ emitOpcode(op_pop_scope);
+ instructions().append(scope->index());
+ }
+ return;
+ }
- emitOpcode(op_jmp_scopes);
- instructions().append(scopeDelta);
- instructions().append(target->offsetFrom(instructions().size()));
- return target;
+ emitComplexPopScopes(scope, &m_scopeContextStack.last(), &m_scopeContextStack.last() - scopeDelta);
}
-RegisterID* BytecodeGenerator::emitNextPropertyName(RegisterID* dst, RegisterID* iter, Label* target)
+TryData* BytecodeGenerator::pushTry(Label* start)
{
- emitOpcode(op_next_pname);
- instructions().append(dst->index());
- instructions().append(iter->index());
- instructions().append(target->offsetFrom(instructions().size()));
- return dst;
+ TryData tryData;
+ tryData.target = newLabel();
+ tryData.targetScopeDepth = UINT_MAX;
+ tryData.handlerType = HandlerType::Illegal;
+ m_tryData.append(tryData);
+ TryData* result = &m_tryData.last();
+
+ TryContext tryContext;
+ tryContext.start = start;
+ tryContext.tryData = result;
+
+ m_tryContextStack.append(tryContext);
+
+ return result;
}
-RegisterID* BytecodeGenerator::emitCatch(RegisterID* targetRegister, Label* start, Label* end)
+void BytecodeGenerator::popTryAndEmitCatch(TryData* tryData, RegisterID* exceptionRegister, RegisterID* thrownValueRegister, Label* end, HandlerType handlerType)
{
-#if ENABLE(JIT)
- HandlerInfo info = { start->offsetFrom(0), end->offsetFrom(0), instructions().size(), m_dynamicScopeDepth + m_baseScopeDepth, 0 };
-#else
- HandlerInfo info = { start->offsetFrom(0), end->offsetFrom(0), instructions().size(), m_dynamicScopeDepth + m_baseScopeDepth };
-#endif
+ m_usesExceptions = true;
+
+ ASSERT_UNUSED(tryData, m_tryContextStack.last().tryData == tryData);
+
+ TryRange tryRange;
+ tryRange.start = m_tryContextStack.last().start;
+ tryRange.end = end;
+ tryRange.tryData = m_tryContextStack.last().tryData;
+ m_tryRanges.append(tryRange);
+ m_tryContextStack.removeLast();
+
+ emitLabel(tryRange.tryData->target.get());
+ tryRange.tryData->targetScopeDepth = m_localScopeDepth;
+ tryRange.tryData->handlerType = handlerType;
- m_codeBlock->addExceptionHandler(info);
emitOpcode(op_catch);
- instructions().append(targetRegister->index());
- return targetRegister;
+ instructions().append(exceptionRegister->index());
+ instructions().append(thrownValueRegister->index());
}
-RegisterID* BytecodeGenerator::emitNewError(RegisterID* dst, ErrorType type, JSValuePtr message)
+void BytecodeGenerator::emitThrowReferenceError(const String& message)
{
- emitOpcode(op_new_error);
- instructions().append(dst->index());
- instructions().append(static_cast<int>(type));
- instructions().append(addUnexpectedConstant(message));
- return dst;
+ emitOpcode(op_throw_static_error);
+ instructions().append(addConstantValue(addStringConstant(Identifier::fromString(m_vm, message)))->index());
+ instructions().append(true);
}
-PassRefPtr<Label> BytecodeGenerator::emitJumpSubroutine(RegisterID* retAddrDst, Label* finally)
+void BytecodeGenerator::emitThrowTypeError(const String& message)
{
- emitOpcode(op_jsr);
- instructions().append(retAddrDst->index());
- instructions().append(finally->offsetFrom(instructions().size()));
- return finally;
+ emitOpcode(op_throw_static_error);
+ instructions().append(addConstantValue(addStringConstant(Identifier::fromString(m_vm, message)))->index());
+ instructions().append(false);
}
-void BytecodeGenerator::emitSubroutineReturn(RegisterID* retAddrSrc)
+void BytecodeGenerator::emitPushFunctionNameScope(RegisterID* dst, const Identifier& property, RegisterID* value, unsigned attributes)
{
- emitOpcode(op_sret);
- instructions().append(retAddrSrc->index());
+ emitOpcode(op_push_name_scope);
+ instructions().append(dst->index());
+ instructions().append(value->index());
+ instructions().append(addConstantValue(SymbolTable::createNameScopeTable(*vm(), property, attributes))->index());
+ instructions().append(JSNameScope::FunctionNameScope);
}
-void BytecodeGenerator::emitPushNewScope(RegisterID* dst, Identifier& property, RegisterID* value)
+void BytecodeGenerator::emitPushCatchScope(RegisterID* dst, const Identifier& property, RegisterID* value, unsigned attributes)
{
ControlFlowContext context;
context.isFinallyBlock = false;
m_scopeContextStack.append(context);
- m_dynamicScopeDepth++;
-
- emitOpcode(op_push_new_scope);
+ m_localScopeDepth++;
+
+ emitOpcode(op_push_name_scope);
instructions().append(dst->index());
- instructions().append(addConstant(property));
instructions().append(value->index());
+ instructions().append(addConstantValue(SymbolTable::createNameScopeTable(*vm(), property, attributes))->index());
+ instructions().append(JSNameScope::CatchScope);
}
void BytecodeGenerator::beginSwitch(RegisterID* scrutineeRegister, SwitchInfo::SwitchType type)
{
- SwitchInfo info = { instructions().size(), type };
+ SwitchInfo info = { static_cast<uint32_t>(instructions().size()), type };
switch (type) {
case SwitchInfo::SwitchImmediate:
emitOpcode(op_switch_imm);
emitOpcode(op_switch_string);
break;
default:
- ASSERT_NOT_REACHED();
+ RELEASE_ASSERT_NOT_REACHED();
}
instructions().append(0); // place holder for table index
ASSERT(node->isNumber());
double value = static_cast<NumberNode*>(node)->value();
int32_t key = static_cast<int32_t>(value);
- ASSERT(JSValuePtr::makeInt32Fast(key) && (JSValuePtr::makeInt32Fast(key).getInt32Fast() == value));
ASSERT(key == value);
ASSERT(key >= min);
ASSERT(key <= max);
return key - min;
}
-static void prepareJumpTableForImmediateSwitch(SimpleJumpTable& jumpTable, int32_t switchAddress, uint32_t clauseCount, RefPtr<Label>* labels, ExpressionNode** nodes, int32_t min, int32_t max)
-{
- jumpTable.min = min;
- jumpTable.branchOffsets.resize(max - min + 1);
- jumpTable.branchOffsets.fill(0);
- for (uint32_t i = 0; i < clauseCount; ++i) {
- // We're emitting this after the clause labels should have been fixed, so
- // the labels should not be "forward" references
- ASSERT(!labels[i]->isForward());
- jumpTable.add(keyForImmediateSwitch(nodes[i], min, max), labels[i]->offsetFrom(switchAddress));
- }
-}
-
static int32_t keyForCharacterSwitch(ExpressionNode* node, int32_t min, int32_t max)
{
UNUSED_PARAM(max);
ASSERT(node->isString());
- UString::Rep* clause = static_cast<StringNode*>(node)->value().ustring().rep();
- ASSERT(clause->size() == 1);
+ StringImpl* clause = static_cast<StringNode*>(node)->value().impl();
+ ASSERT(clause->length() == 1);
- int32_t key = clause->data()[0];
+ int32_t key = (*clause)[0];
ASSERT(key >= min);
ASSERT(key <= max);
return key - min;
}
-static void prepareJumpTableForCharacterSwitch(SimpleJumpTable& jumpTable, int32_t switchAddress, uint32_t clauseCount, RefPtr<Label>* labels, ExpressionNode** nodes, int32_t min, int32_t max)
+static void prepareJumpTableForSwitch(
+ UnlinkedSimpleJumpTable& jumpTable, int32_t switchAddress, uint32_t clauseCount,
+ RefPtr<Label>* labels, ExpressionNode** nodes, int32_t min, int32_t max,
+ int32_t (*keyGetter)(ExpressionNode*, int32_t min, int32_t max))
{
jumpTable.min = min;
jumpTable.branchOffsets.resize(max - min + 1);
// We're emitting this after the clause labels should have been fixed, so
// the labels should not be "forward" references
ASSERT(!labels[i]->isForward());
- jumpTable.add(keyForCharacterSwitch(nodes[i], min, max), labels[i]->offsetFrom(switchAddress));
+ jumpTable.add(keyGetter(nodes[i], min, max), labels[i]->bind(switchAddress, switchAddress + 3));
}
}
-static void prepareJumpTableForStringSwitch(StringJumpTable& jumpTable, int32_t switchAddress, uint32_t clauseCount, RefPtr<Label>* labels, ExpressionNode** nodes)
+static void prepareJumpTableForStringSwitch(UnlinkedStringJumpTable& jumpTable, int32_t switchAddress, uint32_t clauseCount, RefPtr<Label>* labels, ExpressionNode** nodes)
{
for (uint32_t i = 0; i < clauseCount; ++i) {
// We're emitting this after the clause labels should have been fixed, so
ASSERT(!labels[i]->isForward());
ASSERT(nodes[i]->isString());
- UString::Rep* clause = static_cast<StringNode*>(nodes[i])->value().ustring().rep();
- OffsetLocation location;
- location.branchOffset = labels[i]->offsetFrom(switchAddress);
-#if ENABLE(JIT)
- location.ctiOffset = 0;
-#endif
- jumpTable.offsetTable.add(clause, location);
+ StringImpl* clause = static_cast<StringNode*>(nodes[i])->value().impl();
+ jumpTable.offsetTable.add(clause, labels[i]->bind(switchAddress, switchAddress + 3));
}
}
{
SwitchInfo switchInfo = m_switchContextStack.last();
m_switchContextStack.removeLast();
- if (switchInfo.switchType == SwitchInfo::SwitchImmediate) {
- instructions()[switchInfo.bytecodeOffset + 1] = m_codeBlock->numberOfImmediateSwitchJumpTables();
- instructions()[switchInfo.bytecodeOffset + 2] = defaultLabel->offsetFrom(switchInfo.bytecodeOffset + 3);
-
- SimpleJumpTable& jumpTable = m_codeBlock->addImmediateSwitchJumpTable();
- prepareJumpTableForImmediateSwitch(jumpTable, switchInfo.bytecodeOffset + 3, clauseCount, labels, nodes, min, max);
- } else if (switchInfo.switchType == SwitchInfo::SwitchCharacter) {
- instructions()[switchInfo.bytecodeOffset + 1] = m_codeBlock->numberOfCharacterSwitchJumpTables();
- instructions()[switchInfo.bytecodeOffset + 2] = defaultLabel->offsetFrom(switchInfo.bytecodeOffset + 3);
+
+ switch (switchInfo.switchType) {
+ case SwitchInfo::SwitchImmediate:
+ case SwitchInfo::SwitchCharacter: {
+ instructions()[switchInfo.bytecodeOffset + 1] = m_codeBlock->numberOfSwitchJumpTables();
+ instructions()[switchInfo.bytecodeOffset + 2] = defaultLabel->bind(switchInfo.bytecodeOffset, switchInfo.bytecodeOffset + 3);
+
+ UnlinkedSimpleJumpTable& jumpTable = m_codeBlock->addSwitchJumpTable();
+ prepareJumpTableForSwitch(
+ jumpTable, switchInfo.bytecodeOffset, clauseCount, labels, nodes, min, max,
+ switchInfo.switchType == SwitchInfo::SwitchImmediate
+ ? keyForImmediateSwitch
+ : keyForCharacterSwitch);
+ break;
+ }
- SimpleJumpTable& jumpTable = m_codeBlock->addCharacterSwitchJumpTable();
- prepareJumpTableForCharacterSwitch(jumpTable, switchInfo.bytecodeOffset + 3, clauseCount, labels, nodes, min, max);
- } else {
- ASSERT(switchInfo.switchType == SwitchInfo::SwitchString);
+ case SwitchInfo::SwitchString: {
instructions()[switchInfo.bytecodeOffset + 1] = m_codeBlock->numberOfStringSwitchJumpTables();
- instructions()[switchInfo.bytecodeOffset + 2] = defaultLabel->offsetFrom(switchInfo.bytecodeOffset + 3);
+ instructions()[switchInfo.bytecodeOffset + 2] = defaultLabel->bind(switchInfo.bytecodeOffset, switchInfo.bytecodeOffset + 3);
- StringJumpTable& jumpTable = m_codeBlock->addStringSwitchJumpTable();
- prepareJumpTableForStringSwitch(jumpTable, switchInfo.bytecodeOffset + 3, clauseCount, labels, nodes);
+ UnlinkedStringJumpTable& jumpTable = m_codeBlock->addStringSwitchJumpTable();
+ prepareJumpTableForStringSwitch(jumpTable, switchInfo.bytecodeOffset, clauseCount, labels, nodes);
+ break;
+ }
+
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ break;
}
}
// And we could make the caller pass the node pointer in, if there was some way of getting
// that from an arbitrary node. However, calling emitExpressionInfo without any useful data
// is still good enough to get us an accurate line number.
- emitExpressionInfo(0, 0, 0);
- RegisterID* exception = emitNewError(newTemporary(), SyntaxError, jsString(globalData(), "Expression too deep"));
- emitThrow(exception);
- return exception;
+ m_expressionTooDeep = true;
+ return newTemporary();
+}
+
+bool BytecodeGenerator::isArgumentNumber(const Identifier& ident, int argumentNumber)
+{
+ RegisterID* registerID = variable(ident).local();
+ if (!registerID)
+ return false;
+ return registerID->index() == CallFrame::argumentOffset(argumentNumber);
+}
+
+void BytecodeGenerator::emitReadOnlyExceptionIfNeeded()
+{
+ if (!isStrictMode())
+ return;
+ emitOpcode(op_throw_static_error);
+ instructions().append(addConstantValue(addStringConstant(Identifier::fromString(m_vm, StrictModeReadonlyPropertyWriteError)))->index());
+ instructions().append(false);
+}
+
+void BytecodeGenerator::emitEnumeration(ThrowableExpressionData* node, ExpressionNode* subjectNode, const std::function<void(BytecodeGenerator&, RegisterID*)>& callBack)
+{
+ RefPtr<RegisterID> subject = newTemporary();
+ emitNode(subject.get(), subjectNode);
+ RefPtr<RegisterID> iterator = emitGetById(newTemporary(), subject.get(), propertyNames().iteratorSymbol);
+ {
+ CallArguments args(*this, nullptr);
+ emitMove(args.thisRegister(), subject.get());
+ emitCall(iterator.get(), iterator.get(), NoExpectedFunction, args, node->divot(), node->divotStart(), node->divotEnd());
+ }
+
+ RefPtr<Label> loopDone = newLabel();
+ // RefPtr<Register> iterator's lifetime must be longer than IteratorCloseContext.
+ pushIteratorCloseContext(iterator.get(), node);
+ {
+ LabelScopePtr scope = newLabelScope(LabelScope::Loop);
+ RefPtr<RegisterID> value = newTemporary();
+ emitLoad(value.get(), jsUndefined());
+
+ emitJump(scope->continueTarget());
+
+ RefPtr<Label> loopStart = newLabel();
+ emitLabel(loopStart.get());
+ emitLoopHint();
+
+ RefPtr<Label> tryStartLabel = newLabel();
+ emitLabel(tryStartLabel.get());
+ TryData* tryData = pushTry(tryStartLabel.get());
+ callBack(*this, value.get());
+ emitJump(scope->continueTarget());
+
+ // IteratorClose sequence for throw-ed control flow.
+ {
+ RefPtr<Label> catchHere = emitLabel(newLabel().get());
+ RefPtr<RegisterID> exceptionRegister = newTemporary();
+ RefPtr<RegisterID> thrownValueRegister = newTemporary();
+ popTryAndEmitCatch(tryData, exceptionRegister.get(),
+ thrownValueRegister.get(), catchHere.get(), HandlerType::SynthesizedFinally);
+
+ RefPtr<Label> catchDone = newLabel();
+
+ RefPtr<RegisterID> returnMethod = emitGetById(newTemporary(), iterator.get(), propertyNames().returnKeyword);
+ emitJumpIfTrue(emitIsUndefined(newTemporary(), returnMethod.get()), catchDone.get());
+
+ RefPtr<Label> returnCallTryStart = newLabel();
+ emitLabel(returnCallTryStart.get());
+ TryData* returnCallTryData = pushTry(returnCallTryStart.get());
+
+ CallArguments returnArguments(*this, nullptr);
+ emitMove(returnArguments.thisRegister(), iterator.get());
+ emitCall(value.get(), returnMethod.get(), NoExpectedFunction, returnArguments, node->divot(), node->divotStart(), node->divotEnd());
+
+ emitLabel(catchDone.get());
+ emitThrow(exceptionRegister.get());
+
+ // Absorb exception.
+ popTryAndEmitCatch(returnCallTryData, newTemporary(),
+ newTemporary(), catchDone.get(), HandlerType::SynthesizedFinally);
+ emitThrow(exceptionRegister.get());
+ }
+
+ emitLabel(scope->continueTarget());
+ {
+ emitIteratorNext(value.get(), iterator.get(), node);
+ emitJumpIfTrue(emitGetById(newTemporary(), value.get(), propertyNames().done), loopDone.get());
+ emitGetById(value.get(), value.get(), propertyNames().value);
+ emitJump(loopStart.get());
+ }
+
+ emitLabel(scope->breakTarget());
+ }
+
+ // IteratorClose sequence for break-ed control flow.
+ popIteratorCloseContext();
+ emitIteratorClose(iterator.get(), node);
+ emitLabel(loopDone.get());
+}
+
+#if ENABLE(ES6_TEMPLATE_LITERAL_SYNTAX)
+RegisterID* BytecodeGenerator::emitGetTemplateObject(RegisterID* dst, TaggedTemplateNode* taggedTemplate)
+{
+ TemplateRegistryKey::StringVector rawStrings;
+ TemplateRegistryKey::StringVector cookedStrings;
+
+ TemplateStringListNode* templateString = taggedTemplate->templateLiteral()->templateStrings();
+ for (; templateString; templateString = templateString->next()) {
+ rawStrings.append(templateString->value()->raw().impl());
+ cookedStrings.append(templateString->value()->cooked().impl());
+ }
+
+ RefPtr<RegisterID> getTemplateObject = nullptr;
+ Variable var = variable(propertyNames().getTemplateObjectPrivateName);
+ if (RegisterID* local = var.local())
+ getTemplateObject = emitMove(newTemporary(), local);
+ else {
+ getTemplateObject = newTemporary();
+ RefPtr<RegisterID> scope = newTemporary();
+ moveToDestinationIfNeeded(scope.get(), emitResolveScope(scope.get(), var));
+ emitGetFromScope(getTemplateObject.get(), scope.get(), var, ThrowIfNotFound);
+ }
+
+ CallArguments arguments(*this, nullptr);
+ emitLoad(arguments.thisRegister(), JSValue(addTemplateRegistryKeyConstant(TemplateRegistryKey(rawStrings, cookedStrings))));
+ return emitCall(dst, getTemplateObject.get(), NoExpectedFunction, arguments, taggedTemplate->divot(), taggedTemplate->divotStart(), taggedTemplate->divotEnd());
+}
+#endif
+
+RegisterID* BytecodeGenerator::emitGetEnumerableLength(RegisterID* dst, RegisterID* base)
+{
+ emitOpcode(op_get_enumerable_length);
+ instructions().append(dst->index());
+ instructions().append(base->index());
+ return dst;
+}
+
+RegisterID* BytecodeGenerator::emitHasGenericProperty(RegisterID* dst, RegisterID* base, RegisterID* propertyName)
+{
+ emitOpcode(op_has_generic_property);
+ instructions().append(dst->index());
+ instructions().append(base->index());
+ instructions().append(propertyName->index());
+ return dst;
+}
+
+RegisterID* BytecodeGenerator::emitHasIndexedProperty(RegisterID* dst, RegisterID* base, RegisterID* propertyName)
+{
+ UnlinkedArrayProfile arrayProfile = newArrayProfile();
+ emitOpcode(op_has_indexed_property);
+ instructions().append(dst->index());
+ instructions().append(base->index());
+ instructions().append(propertyName->index());
+ instructions().append(arrayProfile);
+ return dst;
+}
+
+RegisterID* BytecodeGenerator::emitHasStructureProperty(RegisterID* dst, RegisterID* base, RegisterID* propertyName, RegisterID* enumerator)
+{
+ emitOpcode(op_has_structure_property);
+ instructions().append(dst->index());
+ instructions().append(base->index());
+ instructions().append(propertyName->index());
+ instructions().append(enumerator->index());
+ return dst;
+}
+
+RegisterID* BytecodeGenerator::emitGetPropertyEnumerator(RegisterID* dst, RegisterID* base)
+{
+ emitOpcode(op_get_property_enumerator);
+ instructions().append(dst->index());
+ instructions().append(base->index());
+ return dst;
+}
+
+RegisterID* BytecodeGenerator::emitEnumeratorStructurePropertyName(RegisterID* dst, RegisterID* enumerator, RegisterID* index)
+{
+ emitOpcode(op_enumerator_structure_pname);
+ instructions().append(dst->index());
+ instructions().append(enumerator->index());
+ instructions().append(index->index());
+ return dst;
+}
+
+RegisterID* BytecodeGenerator::emitEnumeratorGenericPropertyName(RegisterID* dst, RegisterID* enumerator, RegisterID* index)
+{
+ emitOpcode(op_enumerator_generic_pname);
+ instructions().append(dst->index());
+ instructions().append(enumerator->index());
+ instructions().append(index->index());
+ return dst;
+}
+
+RegisterID* BytecodeGenerator::emitToIndexString(RegisterID* dst, RegisterID* index)
+{
+ emitOpcode(op_to_index_string);
+ instructions().append(dst->index());
+ instructions().append(index->index());
+ return dst;
+}
+
+
+RegisterID* BytecodeGenerator::emitIsObject(RegisterID* dst, RegisterID* src)
+{
+ emitOpcode(op_is_object);
+ instructions().append(dst->index());
+ instructions().append(src->index());
+ return dst;
+}
+
+RegisterID* BytecodeGenerator::emitIsUndefined(RegisterID* dst, RegisterID* src)
+{
+ emitOpcode(op_is_undefined);
+ instructions().append(dst->index());
+ instructions().append(src->index());
+ return dst;
+}
+
+RegisterID* BytecodeGenerator::emitIteratorNext(RegisterID* dst, RegisterID* iterator, const ThrowableExpressionData* node)
+{
+ {
+ RefPtr<RegisterID> next = emitGetById(newTemporary(), iterator, propertyNames().next);
+ CallArguments nextArguments(*this, nullptr);
+ emitMove(nextArguments.thisRegister(), iterator);
+ emitCall(dst, next.get(), NoExpectedFunction, nextArguments, node->divot(), node->divotStart(), node->divotEnd());
+ }
+ {
+ RefPtr<Label> typeIsObject = newLabel();
+ emitJumpIfTrue(emitIsObject(newTemporary(), dst), typeIsObject.get());
+ emitThrowTypeError(ASCIILiteral("Iterator result interface is not an object."));
+ emitLabel(typeIsObject.get());
+ }
+ return dst;
+}
+
+void BytecodeGenerator::emitIteratorClose(RegisterID* iterator, const ThrowableExpressionData* node)
+{
+ RefPtr<Label> done = newLabel();
+ RefPtr<RegisterID> returnMethod = emitGetById(newTemporary(), iterator, propertyNames().returnKeyword);
+ emitJumpIfTrue(emitIsUndefined(newTemporary(), returnMethod.get()), done.get());
+
+ RefPtr<RegisterID> value = newTemporary();
+ CallArguments returnArguments(*this, nullptr);
+ emitMove(returnArguments.thisRegister(), iterator);
+ emitCall(value.get(), returnMethod.get(), NoExpectedFunction, returnArguments, node->divot(), node->divotStart(), node->divotEnd());
+ emitJumpIfTrue(emitIsObject(newTemporary(), value.get()), done.get());
+ emitThrowTypeError(ASCIILiteral("Iterator result interface is not an object."));
+ emitLabel(done.get());
+}
+
+void BytecodeGenerator::pushIndexedForInScope(RegisterID* localRegister, RegisterID* indexRegister)
+{
+ if (!localRegister)
+ return;
+ m_forInContextStack.append(std::make_unique<IndexedForInContext>(localRegister, indexRegister));
+}
+
+void BytecodeGenerator::popIndexedForInScope(RegisterID* localRegister)
+{
+ if (!localRegister)
+ return;
+ m_forInContextStack.removeLast();
+}
+
+void BytecodeGenerator::pushStructureForInScope(RegisterID* localRegister, RegisterID* indexRegister, RegisterID* propertyRegister, RegisterID* enumeratorRegister)
+{
+ if (!localRegister)
+ return;
+ m_forInContextStack.append(std::make_unique<StructureForInContext>(localRegister, indexRegister, propertyRegister, enumeratorRegister));
+}
+
+void BytecodeGenerator::popStructureForInScope(RegisterID* localRegister)
+{
+ if (!localRegister)
+ return;
+ m_forInContextStack.removeLast();
+}
+
+void BytecodeGenerator::invalidateForInContextForLocal(RegisterID* localRegister)
+{
+ // Lexically invalidating ForInContexts is kind of weak sauce, but it only occurs if
+ // either of the following conditions is true:
+ //
+ // (1) The loop iteration variable is re-assigned within the body of the loop.
+ // (2) The loop iteration variable is captured in the lexical scope of the function.
+ //
+ // These two situations occur sufficiently rarely that it's okay to use this style of
+ // "analysis" to make iteration faster. If we didn't want to do this, we would either have
+ // to perform some flow-sensitive analysis to see if/when the loop iteration variable was
+ // reassigned, or we'd have to resort to runtime checks to see if the variable had been
+ // reassigned from its original value.
+ for (size_t i = m_forInContextStack.size(); i > 0; i--) {
+ ForInContext* context = m_forInContextStack[i - 1].get();
+ if (context->local() != localRegister)
+ continue;
+ context->invalidate();
+ break;
+ }
}
} // namespace JSC