/*
- * Copyright (C) 2008, 2009, 2012 Apple Inc. All rights reserved.
+ * Copyright (C) 2008, 2009, 2012, 2013, 2014 Apple Inc. All rights reserved.
* Copyright (C) 2008 Cameron Zwarich <cwzwarich@uwaterloo.ca>
+ * Copyright (C) 2012 Igalia, S.L.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
#include "config.h"
#include "BytecodeGenerator.h"
-#include "BatchedTransitionOptimizer.h"
-#include "JSFunction.h"
#include "Interpreter.h"
+#include "JSActivation.h"
+#include "JSFunction.h"
+#include "JSNameScope.h"
#include "LowLevelInterpreter.h"
-#include "ScopeChain.h"
+#include "JSCInlines.h"
+#include "Options.h"
+#include "StackAlignment.h"
#include "StrongInlines.h"
-#include "UString.h"
+#include "UnlinkedCodeBlock.h"
+#include "UnlinkedInstructionStream.h"
+#include <wtf/StdLibExtras.h>
+#include <wtf/text/WTFString.h>
using namespace std;
namespace JSC {
-/*
- The layout of a register frame looks like this:
-
- For
-
- function f(x, y) {
- var v1;
- function g() { }
- var v2;
- return (x) * (y);
- }
-
- assuming (x) and (y) generated temporaries t1 and t2, you would have
-
- ------------------------------------
- | x | y | g | v2 | v1 | t1 | t2 | <-- value held
- ------------------------------------
- | -5 | -4 | -3 | -2 | -1 | +0 | +1 | <-- register index
- ------------------------------------
- | params->|<-locals | temps->
-
- Because temporary registers are allocated in a stack-like fashion, we
- can reclaim them with a simple popping algorithm. The same goes for labels.
- (We never reclaim parameter or local registers, because parameters and
- locals are DontDelete.)
-
- The register layout before a function call looks like this:
-
- For
-
- function f(x, y)
- {
- }
-
- f(1);
-
- > <------------------------------
- < > reserved: call frame | 1 | <-- value held
- > >snip< <------------------------------
- < > +0 | +1 | +2 | +3 | +4 | +5 | <-- register index
- > <------------------------------
- | params->|<-locals | temps->
-
- The call instruction fills in the "call frame" registers. It also pads
- missing arguments at the end of the call:
-
- > <-----------------------------------
- < > reserved: call frame | 1 | ? | <-- value held ("?" stands for "undefined")
- > >snip< <-----------------------------------
- < > +0 | +1 | +2 | +3 | +4 | +5 | +6 | <-- register index
- > <-----------------------------------
- | params->|<-locals | temps->
-
- After filling in missing arguments, the call instruction sets up the new
- stack frame to overlap the end of the old stack frame:
-
- |----------------------------------> <
- | reserved: call frame | 1 | ? < > <-- value held ("?" stands for "undefined")
- |----------------------------------> >snip< <
- | -7 | -6 | -5 | -4 | -3 | -2 | -1 < > <-- register index
- |----------------------------------> <
- | | params->|<-locals | temps->
-
- That way, arguments are "copied" into the callee's stack frame for free.
-
- If the caller supplies too many arguments, this trick doesn't work. The
- extra arguments protrude into space reserved for locals and temporaries.
- In that case, the call instruction makes a real copy of the call frame header,
- along with just the arguments expected by the callee, leaving the original
- call frame header and arguments behind. (The call instruction can't just discard
- extra arguments, because the "arguments" object may access them later.)
- This copying strategy ensures that all named values will be at the indices
- expected by the callee.
-*/
-
-static bool s_dumpsGeneratedCode = false;
-
void Label::setLocation(unsigned location)
{
m_location = location;
m_generator->m_instructions[m_unresolvedJumps[i].second].u.operand = m_location - m_unresolvedJumps[i].first;
}
-void BytecodeGenerator::setDumpsGeneratedCode(bool dumpsGeneratedCode)
-{
- s_dumpsGeneratedCode = dumpsGeneratedCode;
-}
-
-bool BytecodeGenerator::dumpsGeneratedCode()
-{
- return s_dumpsGeneratedCode;
-}
-
-JSObject* BytecodeGenerator::generate()
+ParserError BytecodeGenerator::generate()
{
SamplingRegion samplingRegion("Bytecode Generation");
- m_codeBlock->setThisRegister(m_thisRegister.index());
+ m_codeBlock->setThisRegister(m_thisRegister.virtualRegister());
+ for (size_t i = 0; i < m_deconstructedParameters.size(); i++) {
+ auto& entry = m_deconstructedParameters[i];
+ entry.second->bindValue(*this, entry.first.get());
+ }
m_scopeNode->emitBytecode(*this);
-
- m_codeBlock->instructions() = RefCountedArray<Instruction>(m_instructions);
- if (s_dumpsGeneratedCode)
- m_codeBlock->dump(m_scopeChain->globalObject->globalExec());
+ m_staticPropertyAnalyzer.kill();
- if ((m_codeType == FunctionCode && !m_codeBlock->needsFullScopeChain() && !m_codeBlock->usesArguments()) || m_codeType == EvalCode)
- symbolTable().clear();
+ for (unsigned i = 0; i < m_tryRanges.size(); ++i) {
+ TryRange& range = m_tryRanges[i];
+ int start = range.start->bind();
+ int end = range.end->bind();
+
+ // This will happen for empty try blocks and for some cases of finally blocks:
+ //
+ // try {
+ // try {
+ // } finally {
+ // return 42;
+ // // *HERE*
+ // }
+ // } finally {
+ // print("things");
+ // }
+ //
+ // The return will pop scopes to execute the outer finally block. But this includes
+ // popping the try context for the inner try. The try context is live in the fall-through
+ // part of the finally block not because we will emit a handler that overlaps the finally,
+ // but because we haven't yet had a chance to plant the catch target. Then when we finish
+ // emitting code for the outer finally block, we repush the try contex, this time with a
+ // new start index. But that means that the start index for the try range corresponding
+ // to the inner-finally-following-the-return (marked as "*HERE*" above) will be greater
+ // than the end index of the try block. This is harmless since end < start handlers will
+ // never get matched in our logic, but we do the runtime a favor and choose to not emit
+ // such handlers at all.
+ if (end <= start)
+ continue;
+
+ ASSERT(range.tryData->targetScopeDepth != UINT_MAX);
+ UnlinkedHandlerInfo info = {
+ static_cast<uint32_t>(start), static_cast<uint32_t>(end),
+ static_cast<uint32_t>(range.tryData->target->bind()),
+ range.tryData->targetScopeDepth
+ };
+ m_codeBlock->addExceptionHandler(info);
+ }
+
+ m_codeBlock->setInstructions(std::make_unique<UnlinkedInstructionStream>(m_instructions));
m_codeBlock->shrinkToFit();
+ if (m_codeBlock->symbolTable())
+ m_codeBlock->setSymbolTable(m_codeBlock->symbolTable()->cloneCapturedNames(*m_codeBlock->vm()));
+
if (m_expressionTooDeep)
- return createOutOfMemoryError(m_scopeChain->globalObject.get());
- return 0;
+ return ParserError(ParserError::OutOfMemory);
+ return ParserError(ParserError::ErrorNone);
}
-bool BytecodeGenerator::addVar(const Identifier& ident, bool isConstant, RegisterID*& r0)
+bool BytecodeGenerator::addVar(
+ const Identifier& ident, ConstantMode constantMode, WatchMode watchMode, RegisterID*& r0)
{
- int index = m_calleeRegisters.size();
- SymbolTableEntry newEntry(index, isConstant ? ReadOnly : 0);
- SymbolTable::AddResult result = symbolTable().add(ident.impl(), newEntry);
+ ASSERT(static_cast<size_t>(m_codeBlock->m_numVars) == m_calleeRegisters.size());
+
+ ConcurrentJITLocker locker(symbolTable().m_lock);
+ int index = virtualRegisterForLocal(m_calleeRegisters.size()).offset();
+ SymbolTableEntry newEntry(index, constantMode == IsConstant ? ReadOnly : 0);
+ SymbolTable::Map::AddResult result = symbolTable().add(locker, ident.impl(), newEntry);
if (!result.isNewEntry) {
- r0 = ®isterFor(result.iterator->second.getIndex());
+ r0 = ®isterFor(result.iterator->value.getIndex());
return false;
}
-
+
+ if (watchMode == IsWatchable) {
+ while (m_watchableVariables.size() < static_cast<size_t>(m_codeBlock->m_numVars))
+ m_watchableVariables.append(Identifier());
+ m_watchableVariables.append(ident);
+ }
+
r0 = addVar();
+
+ ASSERT(watchMode == NotWatchable || static_cast<size_t>(m_codeBlock->m_numVars) == m_watchableVariables.size());
+
return true;
}
-int BytecodeGenerator::addGlobalVar(const Identifier& ident, bool isConstant)
-{
- int index = symbolTable().size();
- SymbolTableEntry newEntry(index, isConstant ? ReadOnly : 0);
- SymbolTable::AddResult result = symbolTable().add(ident.impl(), newEntry);
- if (!result.isNewEntry)
- index = result.iterator->second.getIndex();
- return index;
-}
-
void BytecodeGenerator::preserveLastVar()
{
if ((m_firstConstantIndex = m_calleeRegisters.size()) != 0)
m_lastVar = &m_calleeRegisters.last();
}
-BytecodeGenerator::BytecodeGenerator(ProgramNode* programNode, ScopeChainNode* scopeChain, SymbolTable* symbolTable, ProgramCodeBlock* codeBlock, CompilationKind compilationKind)
- : m_shouldEmitDebugHooks(scopeChain->globalObject->debugger())
- , m_shouldEmitProfileHooks(scopeChain->globalObject->globalObjectMethodTable()->supportsProfiling(scopeChain->globalObject.get()))
- , m_shouldEmitRichSourceInfo(scopeChain->globalObject->globalObjectMethodTable()->supportsRichSourceInfo(scopeChain->globalObject.get()))
- , m_scopeChain(*scopeChain->globalData, scopeChain)
- , m_symbolTable(symbolTable)
+BytecodeGenerator::BytecodeGenerator(VM& vm, ProgramNode* programNode, UnlinkedProgramCodeBlock* codeBlock, DebuggerMode debuggerMode, ProfilerMode profilerMode)
+ : m_shouldEmitDebugHooks(Options::forceDebuggerBytecodeGeneration() || debuggerMode == DebuggerOn)
+ , m_shouldEmitProfileHooks(Options::forceProfilerBytecodeGeneration() || profilerMode == ProfilerOn)
+ , m_symbolTable(0)
, m_scopeNode(programNode)
- , m_codeBlock(codeBlock)
+ , m_codeBlock(vm, codeBlock)
, m_thisRegister(CallFrame::thisArgumentOffset())
+ , m_activationRegister(0)
+ , m_emptyValueRegister(0)
+ , m_globalObjectRegister(0)
, m_finallyDepth(0)
- , m_dynamicScopeDepth(0)
- , m_baseScopeDepth(0)
+ , m_localScopeDepth(0)
, m_codeType(GlobalCode)
, m_nextConstantOffset(0)
, m_globalConstantIndex(0)
- , m_hasCreatedActivation(true)
, m_firstLazyFunction(0)
, m_lastLazyFunction(0)
- , m_globalData(scopeChain->globalData)
+ , m_staticPropertyAnalyzer(&m_instructions)
+ , m_vm(&vm)
, m_lastOpcodeID(op_end)
#ifndef NDEBUG
, m_lastOpcodePosition(0)
#endif
- , m_stack(wtfThreadData().stack())
, m_usesExceptions(false)
, m_expressionTooDeep(false)
+ , m_isBuiltinFunction(false)
{
- m_globalData->startedCompiling(m_codeBlock);
- if (m_shouldEmitDebugHooks)
- m_codeBlock->setNeedsFullScopeChain(true);
-
- emitOpcode(op_enter);
- codeBlock->setGlobalData(m_globalData);
-
- // FIXME: Move code that modifies the global object to Interpreter::execute.
-
m_codeBlock->setNumParameters(1); // Allocate space for "this"
- codeBlock->m_numCapturedVars = codeBlock->m_numVars;
-
- if (compilationKind == OptimizingCompilation)
- return;
- JSGlobalObject* globalObject = scopeChain->globalObject.get();
- ExecState* exec = globalObject->globalExec();
-
- BatchedTransitionOptimizer optimizer(*m_globalData, globalObject);
+ emitOpcode(op_enter);
const VarStack& varStack = programNode->varStack();
const FunctionStack& functionStack = programNode->functionStack();
- size_t newGlobals = varStack.size() + functionStack.size();
- if (!newGlobals)
- return;
- globalObject->resizeRegisters(symbolTable->size() + newGlobals);
-
for (size_t i = 0; i < functionStack.size(); ++i) {
FunctionBodyNode* function = functionStack[i];
- globalObject->removeDirect(*m_globalData, function->ident()); // Newly declared functions overwrite existing properties.
-
- JSValue value = JSFunction::create(exec, makeFunction(exec, function), scopeChain);
- int index = addGlobalVar(function->ident(), false);
- globalObject->registerAt(index).set(*m_globalData, globalObject, value);
+ UnlinkedFunctionExecutable* unlinkedFunction = makeFunction(function);
+ codeBlock->addFunctionDeclaration(*m_vm, function->ident(), unlinkedFunction);
}
- for (size_t i = 0; i < varStack.size(); ++i) {
- if (globalObject->hasProperty(exec, *varStack[i].first))
- continue;
- addGlobalVar(*varStack[i].first, varStack[i].second & DeclarationStacks::IsConstant);
- }
+ for (size_t i = 0; i < varStack.size(); ++i)
+ codeBlock->addVariableDeclaration(varStack[i].first, !!(varStack[i].second & DeclarationStacks::IsConstant));
+
}
-BytecodeGenerator::BytecodeGenerator(FunctionBodyNode* functionBody, ScopeChainNode* scopeChain, SymbolTable* symbolTable, CodeBlock* codeBlock, CompilationKind)
- : m_shouldEmitDebugHooks(scopeChain->globalObject->debugger())
- , m_shouldEmitProfileHooks(scopeChain->globalObject->globalObjectMethodTable()->supportsProfiling(scopeChain->globalObject.get()))
- , m_shouldEmitRichSourceInfo(scopeChain->globalObject->globalObjectMethodTable()->supportsRichSourceInfo(scopeChain->globalObject.get()))
- , m_scopeChain(*scopeChain->globalData, scopeChain)
- , m_symbolTable(symbolTable)
+BytecodeGenerator::BytecodeGenerator(VM& vm, FunctionBodyNode* functionBody, UnlinkedFunctionCodeBlock* codeBlock, DebuggerMode debuggerMode, ProfilerMode profilerMode)
+ : m_shouldEmitDebugHooks(Options::forceDebuggerBytecodeGeneration() || debuggerMode == DebuggerOn)
+ , m_shouldEmitProfileHooks(Options::forceProfilerBytecodeGeneration() || profilerMode == ProfilerOn)
+ , m_symbolTable(codeBlock->symbolTable())
, m_scopeNode(functionBody)
- , m_codeBlock(codeBlock)
+ , m_codeBlock(vm, codeBlock)
, m_activationRegister(0)
+ , m_emptyValueRegister(0)
+ , m_globalObjectRegister(0)
, m_finallyDepth(0)
- , m_dynamicScopeDepth(0)
- , m_baseScopeDepth(0)
+ , m_localScopeDepth(0)
, m_codeType(FunctionCode)
, m_nextConstantOffset(0)
, m_globalConstantIndex(0)
- , m_hasCreatedActivation(false)
, m_firstLazyFunction(0)
, m_lastLazyFunction(0)
- , m_globalData(scopeChain->globalData)
+ , m_staticPropertyAnalyzer(&m_instructions)
+ , m_vm(&vm)
, m_lastOpcodeID(op_end)
#ifndef NDEBUG
, m_lastOpcodePosition(0)
#endif
- , m_stack(wtfThreadData().stack())
, m_usesExceptions(false)
, m_expressionTooDeep(false)
+ , m_isBuiltinFunction(codeBlock->isBuiltinFunction())
{
- m_globalData->startedCompiling(m_codeBlock);
- if (m_shouldEmitDebugHooks)
- m_codeBlock->setNeedsFullScopeChain(true);
+ if (m_isBuiltinFunction)
+ m_shouldEmitDebugHooks = false;
+
+ m_symbolTable->setUsesNonStrictEval(codeBlock->usesEval() && !codeBlock->isStrictMode());
+ Vector<Identifier> boundParameterProperties;
+ FunctionParameters& parameters = *functionBody->parameters();
+ for (size_t i = 0; i < parameters.size(); i++) {
+ auto pattern = parameters.at(i);
+ if (pattern->isBindingNode())
+ continue;
+ pattern->collectBoundIdentifiers(boundParameterProperties);
+ continue;
+ }
+ m_symbolTable->setParameterCountIncludingThis(functionBody->parameters()->size() + 1);
- codeBlock->setGlobalData(m_globalData);
-
emitOpcode(op_enter);
- if (m_codeBlock->needsFullScopeChain()) {
+ if (m_codeBlock->needsFullScopeChain() || m_shouldEmitDebugHooks) {
m_activationRegister = addVar();
emitInitLazyRegister(m_activationRegister);
- m_codeBlock->setActivationRegister(m_activationRegister->index());
+ m_codeBlock->setActivationRegister(m_activationRegister->virtualRegister());
}
- // Both op_tear_off_activation and op_tear_off_arguments tear off the 'arguments'
- // object, if created.
- if (m_codeBlock->needsFullScopeChain() || functionBody->usesArguments()) {
+ m_symbolTable->setCaptureStart(virtualRegisterForLocal(m_codeBlock->m_numVars).offset());
+
+ if (functionBody->usesArguments() || codeBlock->usesEval()) { // May reify arguments object.
RegisterID* unmodifiedArgumentsRegister = addVar(); // Anonymous, so it can't be modified by user code.
- RegisterID* argumentsRegister = addVar(propertyNames().arguments, false); // Can be changed by assigning to 'arguments'.
+ RegisterID* argumentsRegister = addVar(propertyNames().arguments, IsVariable, NotWatchable); // Can be changed by assigning to 'arguments'.
// We can save a little space by hard-coding the knowledge that the two
// 'arguments' values are stored in consecutive registers, and storing
// only the index of the assignable one.
- codeBlock->setArgumentsRegister(argumentsRegister->index());
- ASSERT_UNUSED(unmodifiedArgumentsRegister, unmodifiedArgumentsRegister->index() == JSC::unmodifiedArgumentsRegister(codeBlock->argumentsRegister()));
+ codeBlock->setArgumentsRegister(argumentsRegister->virtualRegister());
+ ASSERT_UNUSED(unmodifiedArgumentsRegister, unmodifiedArgumentsRegister->virtualRegister() == JSC::unmodifiedArgumentsRegister(codeBlock->argumentsRegister()));
emitInitLazyRegister(argumentsRegister);
emitInitLazyRegister(unmodifiedArgumentsRegister);
- if (m_codeBlock->isStrictMode()) {
+ if (shouldTearOffArgumentsEagerly()) {
emitOpcode(op_create_arguments);
instructions().append(argumentsRegister->index());
}
+ }
- // The debugger currently retrieves the arguments object from an activation rather than pulling
- // it from a call frame. In the long-term it should stop doing that (<rdar://problem/6911886>),
- // but for now we force eager creation of the arguments object when debugging.
- if (m_shouldEmitDebugHooks) {
- emitOpcode(op_create_arguments);
- instructions().append(argumentsRegister->index());
+ bool shouldCaptureAllTheThings = m_shouldEmitDebugHooks || codeBlock->usesEval();
+
+ bool capturesAnyArgumentByName = false;
+ Vector<RegisterID*, 0, UnsafeVectorOverflow> capturedArguments;
+ if (functionBody->hasCapturedVariables() || shouldCaptureAllTheThings) {
+ FunctionParameters& parameters = *functionBody->parameters();
+ capturedArguments.resize(parameters.size());
+ for (size_t i = 0; i < parameters.size(); ++i) {
+ capturedArguments[i] = 0;
+ auto pattern = parameters.at(i);
+ if (!pattern->isBindingNode())
+ continue;
+ const Identifier& ident = static_cast<const BindingNode*>(pattern)->boundProperty();
+ if (!functionBody->captures(ident) && !shouldCaptureAllTheThings)
+ continue;
+ capturesAnyArgumentByName = true;
+ capturedArguments[i] = addVar();
+ }
+ }
+
+ if (capturesAnyArgumentByName && !shouldTearOffArgumentsEagerly()) {
+ size_t parameterCount = m_symbolTable->parameterCount();
+ auto slowArguments = std::make_unique<SlowArgument[]>(parameterCount);
+ for (size_t i = 0; i < parameterCount; ++i) {
+ if (!capturedArguments[i]) {
+ ASSERT(slowArguments[i].status == SlowArgument::Normal);
+ slowArguments[i].index = CallFrame::argumentOffset(i);
+ continue;
+ }
+ slowArguments[i].status = SlowArgument::Captured;
+ slowArguments[i].index = capturedArguments[i]->index();
}
+ m_symbolTable->setSlowArguments(WTF::move(slowArguments));
}
+ RegisterID* calleeRegister = resolveCallee(functionBody); // May push to the scope chain and/or add a captured var.
+
const DeclarationStacks::FunctionStack& functionStack = functionBody->functionStack();
const DeclarationStacks::VarStack& varStack = functionBody->varStack();
+ IdentifierSet test;
// Captured variables and functions go first so that activations don't have
// to step over the non-captured locals to mark them.
- m_hasCreatedActivation = false;
if (functionBody->hasCapturedVariables()) {
+ for (size_t i = 0; i < boundParameterProperties.size(); i++) {
+ const Identifier& ident = boundParameterProperties[i];
+ if (functionBody->captures(ident))
+ addVar(ident, IsVariable, IsWatchable);
+ }
for (size_t i = 0; i < functionStack.size(); ++i) {
FunctionBodyNode* function = functionStack[i];
const Identifier& ident = function->ident();
if (functionBody->captures(ident)) {
- if (!m_hasCreatedActivation) {
- m_hasCreatedActivation = true;
- emitOpcode(op_create_activation);
- instructions().append(m_activationRegister->index());
- }
m_functions.add(ident.impl());
- emitNewFunction(addVar(ident, false), function);
+ emitNewFunction(addVar(ident, IsVariable, IsWatchable), IsCaptured, function);
}
}
for (size_t i = 0; i < varStack.size(); ++i) {
- const Identifier& ident = *varStack[i].first;
+ const Identifier& ident = varStack[i].first;
if (functionBody->captures(ident))
- addVar(ident, varStack[i].second & DeclarationStacks::IsConstant);
+ addVar(ident, (varStack[i].second & DeclarationStacks::IsConstant) ? IsConstant : IsVariable, IsWatchable);
}
}
- bool canLazilyCreateFunctions = !functionBody->needsActivationForMoreThanVariables() && !m_shouldEmitDebugHooks;
- if (!canLazilyCreateFunctions && !m_hasCreatedActivation) {
- m_hasCreatedActivation = true;
- emitOpcode(op_create_activation);
- instructions().append(m_activationRegister->index());
- }
- codeBlock->m_numCapturedVars = codeBlock->m_numVars;
+ m_symbolTable->setCaptureEnd(virtualRegisterForLocal(codeBlock->m_numVars).offset());
+
+ bool canLazilyCreateFunctions = !functionBody->needsActivationForMoreThanVariables() && !m_shouldEmitDebugHooks;
m_firstLazyFunction = codeBlock->m_numVars;
for (size_t i = 0; i < functionStack.size(); ++i) {
FunctionBodyNode* function = functionStack[i];
const Identifier& ident = function->ident();
if (!functionBody->captures(ident)) {
m_functions.add(ident.impl());
- RefPtr<RegisterID> reg = addVar(ident, false);
+ RefPtr<RegisterID> reg = addVar(ident, IsVariable, NotWatchable);
// Don't lazily create functions that override the name 'arguments'
// as this would complicate lazy instantiation of actual arguments.
if (!canLazilyCreateFunctions || ident == propertyNames().arguments)
- emitNewFunction(reg.get(), function);
+ emitNewFunction(reg.get(), NotCaptured, function);
else {
emitInitLazyRegister(reg.get());
- m_lazyFunctions.set(reg->index(), function);
+ m_lazyFunctions.set(reg->virtualRegister().toLocal(), function);
}
}
}
m_lastLazyFunction = canLazilyCreateFunctions ? codeBlock->m_numVars : m_firstLazyFunction;
+ for (size_t i = 0; i < boundParameterProperties.size(); i++) {
+ const Identifier& ident = boundParameterProperties[i];
+ if (!functionBody->captures(ident))
+ addVar(ident, IsVariable, IsWatchable);
+ }
for (size_t i = 0; i < varStack.size(); ++i) {
- const Identifier& ident = *varStack[i].first;
+ const Identifier& ident = varStack[i].first;
if (!functionBody->captures(ident))
- addVar(ident, varStack[i].second & DeclarationStacks::IsConstant);
+ addVar(ident, (varStack[i].second & DeclarationStacks::IsConstant) ? IsConstant : IsVariable, NotWatchable);
}
- if (m_shouldEmitDebugHooks)
- codeBlock->m_numCapturedVars = codeBlock->m_numVars;
+ if (shouldCaptureAllTheThings)
+ m_symbolTable->setCaptureEnd(virtualRegisterForLocal(codeBlock->m_numVars).offset());
- FunctionParameters& parameters = *functionBody->parameters();
+ if (m_symbolTable->captureCount())
+ emitOpcode(op_touch_entry);
+
m_parameters.grow(parameters.size() + 1); // reserve space for "this"
// Add "this" as a parameter
int nextParameterIndex = CallFrame::thisArgumentOffset();
- m_thisRegister.setIndex(nextParameterIndex--);
+ m_thisRegister.setIndex(nextParameterIndex++);
m_codeBlock->addParameter();
-
- for (size_t i = 0; i < parameters.size(); ++i)
- addParameter(parameters[i], nextParameterIndex--);
-
+ for (size_t i = 0; i < parameters.size(); ++i, ++nextParameterIndex) {
+ int index = nextParameterIndex;
+ auto pattern = parameters.at(i);
+ if (!pattern->isBindingNode()) {
+ m_codeBlock->addParameter();
+ RegisterID& parameter = registerFor(index);
+ parameter.setIndex(index);
+ m_deconstructedParameters.append(std::make_pair(¶meter, pattern));
+ continue;
+ }
+ auto simpleParameter = static_cast<const BindingNode*>(pattern);
+ if (capturedArguments.size() && capturedArguments[i]) {
+ ASSERT((functionBody->hasCapturedVariables() && functionBody->captures(simpleParameter->boundProperty())) || shouldCaptureAllTheThings);
+ index = capturedArguments[i]->index();
+ RegisterID original(nextParameterIndex);
+ emitMove(capturedArguments[i], &original);
+ }
+ addParameter(simpleParameter->boundProperty(), index);
+ }
preserveLastVar();
- if (isConstructor()) {
- RefPtr<RegisterID> func = newTemporary();
- RefPtr<RegisterID> funcProto = newTemporary();
-
- emitOpcode(op_get_callee);
- instructions().append(func->index());
- // Load prototype.
- emitGetById(funcProto.get(), func.get(), globalData()->propertyNames->prototype);
+ // We declare the callee's name last because it should lose to a var, function, and/or parameter declaration.
+ addCallee(functionBody, calleeRegister);
- emitOpcode(op_create_this);
- instructions().append(m_thisRegister.index());
- instructions().append(funcProto->index());
- } else if (!codeBlock->isStrictMode() && (functionBody->usesThis() || codeBlock->usesEval() || m_shouldEmitDebugHooks)) {
- emitOpcode(op_convert_this);
- instructions().append(m_thisRegister.index());
+ if (isConstructor()) {
+ emitCreateThis(&m_thisRegister);
+ } else if (functionBody->usesThis() || codeBlock->usesEval()) {
+ m_codeBlock->addPropertyAccessInstruction(instructions().size());
+ emitOpcode(op_to_this);
+ instructions().append(kill(&m_thisRegister));
+ instructions().append(0);
}
}
-BytecodeGenerator::BytecodeGenerator(EvalNode* evalNode, ScopeChainNode* scopeChain, SymbolTable* symbolTable, EvalCodeBlock* codeBlock, CompilationKind)
- : m_shouldEmitDebugHooks(scopeChain->globalObject->debugger())
- , m_shouldEmitProfileHooks(scopeChain->globalObject->globalObjectMethodTable()->supportsProfiling(scopeChain->globalObject.get()))
- , m_shouldEmitRichSourceInfo(scopeChain->globalObject->globalObjectMethodTable()->supportsRichSourceInfo(scopeChain->globalObject.get()))
- , m_scopeChain(*scopeChain->globalData, scopeChain)
- , m_symbolTable(symbolTable)
+BytecodeGenerator::BytecodeGenerator(VM& vm, EvalNode* evalNode, UnlinkedEvalCodeBlock* codeBlock, DebuggerMode debuggerMode, ProfilerMode profilerMode)
+ : m_shouldEmitDebugHooks(Options::forceDebuggerBytecodeGeneration() || debuggerMode == DebuggerOn)
+ , m_shouldEmitProfileHooks(Options::forceProfilerBytecodeGeneration() || profilerMode == ProfilerOn)
+ , m_symbolTable(codeBlock->symbolTable())
, m_scopeNode(evalNode)
- , m_codeBlock(codeBlock)
+ , m_codeBlock(vm, codeBlock)
, m_thisRegister(CallFrame::thisArgumentOffset())
+ , m_activationRegister(0)
+ , m_emptyValueRegister(0)
+ , m_globalObjectRegister(0)
, m_finallyDepth(0)
- , m_dynamicScopeDepth(0)
- , m_baseScopeDepth(codeBlock->baseScopeDepth())
+ , m_localScopeDepth(0)
, m_codeType(EvalCode)
, m_nextConstantOffset(0)
, m_globalConstantIndex(0)
- , m_hasCreatedActivation(true)
, m_firstLazyFunction(0)
, m_lastLazyFunction(0)
- , m_globalData(scopeChain->globalData)
+ , m_staticPropertyAnalyzer(&m_instructions)
+ , m_vm(&vm)
, m_lastOpcodeID(op_end)
#ifndef NDEBUG
, m_lastOpcodePosition(0)
#endif
- , m_stack(wtfThreadData().stack())
, m_usesExceptions(false)
, m_expressionTooDeep(false)
+ , m_isBuiltinFunction(false)
{
- m_globalData->startedCompiling(m_codeBlock);
- if (m_shouldEmitDebugHooks || m_baseScopeDepth)
- m_codeBlock->setNeedsFullScopeChain(true);
+ m_symbolTable->setUsesNonStrictEval(codeBlock->usesEval() && !codeBlock->isStrictMode());
+ m_codeBlock->setNumParameters(1);
emitOpcode(op_enter);
- codeBlock->setGlobalData(m_globalData);
- m_codeBlock->setNumParameters(1);
const DeclarationStacks::FunctionStack& functionStack = evalNode->functionStack();
for (size_t i = 0; i < functionStack.size(); ++i)
- m_codeBlock->addFunctionDecl(makeFunction(m_globalData, functionStack[i]));
+ m_codeBlock->addFunctionDecl(makeFunction(functionStack[i]));
const DeclarationStacks::VarStack& varStack = evalNode->varStack();
unsigned numVariables = varStack.size();
- Vector<Identifier> variables;
+ Vector<Identifier, 0, UnsafeVectorOverflow> variables;
variables.reserveCapacity(numVariables);
- for (size_t i = 0; i < numVariables; ++i)
- variables.append(*varStack[i].first);
+ for (size_t i = 0; i < numVariables; ++i) {
+ ASSERT(varStack[i].first.impl()->isAtomic());
+ variables.append(varStack[i].first);
+ }
codeBlock->adoptVariables(variables);
- codeBlock->m_numCapturedVars = codeBlock->m_numVars;
preserveLastVar();
}
BytecodeGenerator::~BytecodeGenerator()
{
- m_globalData->finishedCompiling(m_codeBlock);
}
RegisterID* BytecodeGenerator::emitInitLazyRegister(RegisterID* reg)
{
emitOpcode(op_init_lazy_reg);
instructions().append(reg->index());
+ ASSERT(!hasWatchableVariable(reg->index()));
return reg;
}
+RegisterID* BytecodeGenerator::resolveCallee(FunctionBodyNode* functionBodyNode)
+{
+ if (!functionNameIsInScope(functionBodyNode->ident(), functionBodyNode->functionMode()))
+ return 0;
+
+ if (functionNameScopeIsDynamic(m_codeBlock->usesEval(), m_codeBlock->isStrictMode()))
+ return 0;
+
+ m_calleeRegister.setIndex(JSStack::Callee);
+ if (functionBodyNode->captures(functionBodyNode->ident()))
+ return emitMove(addVar(), IsCaptured, &m_calleeRegister);
+
+ return &m_calleeRegister;
+}
+
+void BytecodeGenerator::addCallee(FunctionBodyNode* functionBodyNode, RegisterID* calleeRegister)
+{
+ if (!calleeRegister)
+ return;
+
+ symbolTable().add(functionBodyNode->ident().impl(), SymbolTableEntry(calleeRegister->index(), ReadOnly));
+}
+
void BytecodeGenerator::addParameter(const Identifier& ident, int parameterIndex)
{
// Parameters overwrite var declarations, but not function declarations.
m_codeBlock->addParameter();
}
-RegisterID* BytecodeGenerator::registerFor(const Identifier& ident)
-{
- if (ident == propertyNames().thisIdentifier)
- return &m_thisRegister;
-
- if (m_codeType == GlobalCode)
- return 0;
-
- if (!shouldOptimizeLocals())
- return 0;
-
- SymbolTableEntry entry = symbolTable().get(ident.impl());
- if (entry.isNull())
- return 0;
-
- if (ident == propertyNames().arguments)
- createArgumentsIfNecessary();
-
- return createLazyRegisterIfNecessary(®isterFor(entry.getIndex()));
-}
-
-RegisterID* BytecodeGenerator::constRegisterFor(const Identifier& ident)
-{
- if (m_codeType == EvalCode)
- return 0;
-
- if (m_codeType == GlobalCode)
- return 0;
-
- SymbolTableEntry entry = symbolTable().get(ident.impl());
- if (entry.isNull())
- return 0;
-
- return createLazyRegisterIfNecessary(®isterFor(entry.getIndex()));
-}
-
bool BytecodeGenerator::willResolveToArguments(const Identifier& ident)
{
if (ident != propertyNames().arguments)
SymbolTableEntry entry = symbolTable().get(ident.impl());
if (entry.isNull())
return false;
-
+
if (m_codeBlock->usesArguments() && m_codeType == FunctionCode)
return true;
RegisterID* BytecodeGenerator::createLazyRegisterIfNecessary(RegisterID* reg)
{
- if (m_lastLazyFunction <= reg->index() || reg->index() < m_firstLazyFunction)
+ if (!reg->virtualRegister().isLocal())
return reg;
- emitLazyNewFunction(reg, m_lazyFunctions.get(reg->index()));
- return reg;
-}
-bool BytecodeGenerator::isLocal(const Identifier& ident)
-{
- if (ident == propertyNames().thisIdentifier)
- return true;
-
- return shouldOptimizeLocals() && symbolTable().contains(ident.impl());
-}
+ int localVariableNumber = reg->virtualRegister().toLocal();
-bool BytecodeGenerator::isLocalConstant(const Identifier& ident)
-{
- return symbolTable().get(ident.impl()).isReadOnly();
+ if (m_lastLazyFunction <= localVariableNumber || localVariableNumber < m_firstLazyFunction)
+ return reg;
+ emitLazyNewFunction(reg, m_lazyFunctions.get(localVariableNumber));
+ return reg;
}
RegisterID* BytecodeGenerator::newRegister()
{
- m_calleeRegisters.append(m_calleeRegisters.size());
- m_codeBlock->m_numCalleeRegisters = max<int>(m_codeBlock->m_numCalleeRegisters, m_calleeRegisters.size());
+ m_calleeRegisters.append(virtualRegisterForLocal(m_calleeRegisters.size()));
+ int numCalleeRegisters = max<int>(m_codeBlock->m_numCalleeRegisters, m_calleeRegisters.size());
+ numCalleeRegisters = WTF::roundUpToMultipleOf(stackAlignmentRegisters(), numCalleeRegisters);
+ m_codeBlock->m_numCalleeRegisters = numCalleeRegisters;
return &m_calleeRegisters.last();
}
return result;
}
-RegisterID* BytecodeGenerator::highestUsedRegister()
-{
- size_t count = m_codeBlock->m_numCalleeRegisters;
- while (m_calleeRegisters.size() < count)
- newRegister();
- return &m_calleeRegisters.last();
-}
-
-PassRefPtr<LabelScope> BytecodeGenerator::newLabelScope(LabelScope::Type type, const Identifier* name)
+LabelScopePtr BytecodeGenerator::newLabelScope(LabelScope::Type type, const Identifier* name)
{
// Reclaim free label scopes.
while (m_labelScopes.size() && !m_labelScopes.last().refCount())
// Allocate new label scope.
LabelScope scope(type, name, scopeDepth(), newLabel(), type == LabelScope::Loop ? newLabel() : PassRefPtr<Label>()); // Only loops have continue targets.
m_labelScopes.append(scope);
- return &m_labelScopes.last();
+ return LabelScopePtr(m_labelScopes, m_labelScopes.size() - 1);
}
PassRefPtr<Label> BytecodeGenerator::newLabel()
ASSERT(opcodePosition - m_lastOpcodePosition == opcodeLength(m_lastOpcodeID) || m_lastOpcodeID == op_end);
m_lastOpcodePosition = opcodePosition;
#endif
- instructions().append(globalData()->interpreter->getOpcode(opcodeID));
+ instructions().append(opcodeID);
m_lastOpcodeID = opcodeID;
}
-ValueProfile* BytecodeGenerator::emitProfiledOpcode(OpcodeID opcodeID)
+UnlinkedArrayProfile BytecodeGenerator::newArrayProfile()
{
-#if ENABLE(VALUE_PROFILER)
- ValueProfile* result = m_codeBlock->addValueProfile(instructions().size());
-#else
- ValueProfile* result = 0;
-#endif
+ return m_codeBlock->addArrayProfile();
+}
+
+UnlinkedArrayAllocationProfile BytecodeGenerator::newArrayAllocationProfile()
+{
+ return m_codeBlock->addArrayAllocationProfile();
+}
+
+UnlinkedObjectAllocationProfile BytecodeGenerator::newObjectAllocationProfile()
+{
+ return m_codeBlock->addObjectAllocationProfile();
+}
+
+UnlinkedValueProfile BytecodeGenerator::emitProfiledOpcode(OpcodeID opcodeID)
+{
+ UnlinkedValueProfile result = m_codeBlock->addValueProfile();
emitOpcode(opcodeID);
return result;
}
void BytecodeGenerator::emitLoopHint()
{
-#if ENABLE(DFG_JIT)
emitOpcode(op_loop_hint);
-#endif
}
void BytecodeGenerator::retrieveLastBinaryOp(int& dstIndex, int& src1Index, int& src2Index)
PassRefPtr<Label> BytecodeGenerator::emitJump(Label* target)
{
size_t begin = instructions().size();
- emitOpcode(target->isForward() ? op_jmp : op_loop);
+ emitOpcode(op_jmp);
instructions().append(target->bind(begin, instructions().size()));
return target;
}
rewindBinaryOp();
size_t begin = instructions().size();
- emitOpcode(target->isForward() ? op_jless : op_loop_if_less);
+ emitOpcode(op_jless);
instructions().append(src1Index);
instructions().append(src2Index);
instructions().append(target->bind(begin, instructions().size()));
rewindBinaryOp();
size_t begin = instructions().size();
- emitOpcode(target->isForward() ? op_jlesseq : op_loop_if_lesseq);
+ emitOpcode(op_jlesseq);
instructions().append(src1Index);
instructions().append(src2Index);
instructions().append(target->bind(begin, instructions().size()));
rewindBinaryOp();
size_t begin = instructions().size();
- emitOpcode(target->isForward() ? op_jgreater : op_loop_if_greater);
+ emitOpcode(op_jgreater);
instructions().append(src1Index);
instructions().append(src2Index);
instructions().append(target->bind(begin, instructions().size()));
rewindBinaryOp();
size_t begin = instructions().size();
- emitOpcode(target->isForward() ? op_jgreatereq : op_loop_if_greatereq);
+ emitOpcode(op_jgreatereq);
instructions().append(src1Index);
instructions().append(src2Index);
instructions().append(target->bind(begin, instructions().size()));
size_t begin = instructions().size();
- emitOpcode(target->isForward() ? op_jtrue : op_loop_if_true);
+ emitOpcode(op_jtrue);
instructions().append(cond->index());
instructions().append(target->bind(begin, instructions().size()));
return target;
rewindUnaryOp();
size_t begin = instructions().size();
- emitOpcode(target->isForward() ? op_jtrue : op_loop_if_true);
+ emitOpcode(op_jtrue);
instructions().append(srcIndex);
instructions().append(target->bind(begin, instructions().size()));
return target;
}
size_t begin = instructions().size();
- emitOpcode(target->isForward() ? op_jfalse : op_loop_if_false);
+ emitOpcode(op_jfalse);
instructions().append(cond->index());
instructions().append(target->bind(begin, instructions().size()));
return target;
emitOpcode(op_jneq_ptr);
instructions().append(cond->index());
- instructions().append(Instruction(*m_globalData, m_codeBlock->ownerExecutable(), m_scopeChain->globalObject->callFunction()));
+ instructions().append(Special::CallFunction);
instructions().append(target->bind(begin, instructions().size()));
return target;
}
emitOpcode(op_jneq_ptr);
instructions().append(cond->index());
- instructions().append(Instruction(*m_globalData, m_codeBlock->ownerExecutable(), m_scopeChain->globalObject->applyFunction()));
+ instructions().append(Special::ApplyFunction);
instructions().append(target->bind(begin, instructions().size()));
return target;
}
StringImpl* rep = ident.impl();
IdentifierMap::AddResult result = m_identifierMap.add(rep, m_codeBlock->numberOfIdentifiers());
if (result.isNewEntry)
- m_codeBlock->addIdentifier(Identifier(m_globalData, rep));
+ m_codeBlock->addIdentifier(ident);
+
+ return result.iterator->value;
+}
- return result.iterator->second;
+// We can't hash JSValue(), so we use a dedicated data member to cache it.
+RegisterID* BytecodeGenerator::addConstantEmptyValue()
+{
+ if (!m_emptyValueRegister) {
+ int index = m_nextConstantOffset;
+ m_constantPoolRegisters.append(FirstConstantRegisterIndex + m_nextConstantOffset);
+ ++m_nextConstantOffset;
+ m_codeBlock->addConstant(JSValue());
+ m_emptyValueRegister = &m_constantPoolRegisters[index];
+ }
+
+ return m_emptyValueRegister;
}
RegisterID* BytecodeGenerator::addConstantValue(JSValue v)
{
- int index = m_nextConstantOffset;
+ if (!v)
+ return addConstantEmptyValue();
+ int index = m_nextConstantOffset;
JSValueMap::AddResult result = m_jsValueMap.add(JSValue::encode(v), m_nextConstantOffset);
if (result.isNewEntry) {
m_constantPoolRegisters.append(FirstConstantRegisterIndex + m_nextConstantOffset);
++m_nextConstantOffset;
- m_codeBlock->addConstant(JSValue(v));
+ m_codeBlock->addConstant(v);
} else
- index = result.iterator->second;
-
+ index = result.iterator->value;
return &m_constantPoolRegisters[index];
}
return m_codeBlock->addRegExp(r);
}
-RegisterID* BytecodeGenerator::emitMove(RegisterID* dst, RegisterID* src)
+RegisterID* BytecodeGenerator::emitMove(RegisterID* dst, CaptureMode captureMode, RegisterID* src)
{
- emitOpcode(op_mov);
+ m_staticPropertyAnalyzer.mov(dst->index(), src->index());
+
+ emitOpcode(captureMode == IsCaptured ? op_captured_mov : op_mov);
instructions().append(dst->index());
instructions().append(src->index());
+ if (captureMode == IsCaptured)
+ instructions().append(watchableVariable(dst->index()));
return dst;
}
+RegisterID* BytecodeGenerator::emitMove(RegisterID* dst, RegisterID* src)
+{
+ return emitMove(dst, captureMode(dst->index()), src);
+}
+
RegisterID* BytecodeGenerator::emitUnaryOp(OpcodeID opcodeID, RegisterID* dst, RegisterID* src)
{
emitOpcode(opcodeID);
return dst;
}
-RegisterID* BytecodeGenerator::emitPreInc(RegisterID* srcDst)
+RegisterID* BytecodeGenerator::emitInc(RegisterID* srcDst)
{
- emitOpcode(op_pre_inc);
+ emitOpcode(op_inc);
instructions().append(srcDst->index());
return srcDst;
}
-RegisterID* BytecodeGenerator::emitPreDec(RegisterID* srcDst)
+RegisterID* BytecodeGenerator::emitDec(RegisterID* srcDst)
{
- emitOpcode(op_pre_dec);
+ emitOpcode(op_dec);
instructions().append(srcDst->index());
return srcDst;
}
-RegisterID* BytecodeGenerator::emitPostInc(RegisterID* dst, RegisterID* srcDst)
-{
- emitOpcode(op_post_inc);
- instructions().append(dst->index());
- instructions().append(srcDst->index());
- return dst;
-}
-
-RegisterID* BytecodeGenerator::emitPostDec(RegisterID* dst, RegisterID* srcDst)
-{
- emitOpcode(op_post_dec);
- instructions().append(dst->index());
- instructions().append(srcDst->index());
- return dst;
-}
-
RegisterID* BytecodeGenerator::emitBinaryOp(OpcodeID opcodeID, RegisterID* dst, RegisterID* src1, RegisterID* src2, OperandTypes types)
{
emitOpcode(opcodeID);
&& src1->isTemporary()
&& m_codeBlock->isConstantRegisterIndex(src2->index())
&& m_codeBlock->constantRegister(src2->index()).get().isString()) {
- const UString& value = asString(m_codeBlock->constantRegister(src2->index()).get())->tryGetValue();
+ const String& value = asString(m_codeBlock->constantRegister(src2->index()).get())->tryGetValue();
if (value == "undefined") {
rewindUnaryOp();
emitOpcode(op_is_undefined);
// FIXME: Our hash tables won't hold infinity, so we make a new JSValue each time.
// Later we can do the extra work to handle that like the other cases. They also don't
// work correctly with NaN as a key.
- if (isnan(number) || number == HashTraits<double>::emptyValue() || HashTraits<double>::isDeletedValue(number))
+ if (std::isnan(number) || number == HashTraits<double>::emptyValue() || HashTraits<double>::isDeletedValue(number))
return emitLoad(dst, jsNumber(number));
- JSValue& valueInMap = m_numberMap.add(number, JSValue()).iterator->second;
+ JSValue& valueInMap = m_numberMap.add(number, JSValue()).iterator->value;
if (!valueInMap)
valueInMap = jsNumber(number);
return emitLoad(dst, valueInMap);
RegisterID* BytecodeGenerator::emitLoad(RegisterID* dst, const Identifier& identifier)
{
- JSString*& stringInMap = m_stringMap.add(identifier.impl(), 0).iterator->second;
+ JSString*& stringInMap = m_stringMap.add(identifier.impl(), nullptr).iterator->value;
if (!stringInMap)
- stringInMap = jsOwnedString(globalData(), identifier.ustring());
+ stringInMap = jsOwnedString(vm(), identifier.string());
return emitLoad(dst, JSValue(stringInMap));
}
return constantID;
}
-bool BytecodeGenerator::findScopedProperty(const Identifier& property, int& index, size_t& stackDepth, bool forWriting, bool& requiresDynamicChecks, JSObject*& globalObject)
+RegisterID* BytecodeGenerator::emitLoadGlobalObject(RegisterID* dst)
{
- // Cases where we cannot statically optimize the lookup.
- if (property == propertyNames().arguments || !canOptimizeNonLocals()) {
- stackDepth = 0;
- index = missingSymbolMarker();
-
- if (shouldOptimizeLocals() && m_codeType == GlobalCode) {
- ScopeChainIterator iter = m_scopeChain->begin();
- globalObject = iter->get();
- ASSERT((++iter) == m_scopeChain->end());
- }
- return false;
- }
-
- size_t depth = 0;
- requiresDynamicChecks = false;
- ScopeChainIterator iter = m_scopeChain->begin();
- ScopeChainIterator end = m_scopeChain->end();
- for (; iter != end; ++iter, ++depth) {
- JSObject* currentScope = iter->get();
- if (!currentScope->isVariableObject())
- break;
- JSVariableObject* currentVariableObject = jsCast<JSVariableObject*>(currentScope);
- SymbolTableEntry entry = currentVariableObject->symbolTable().get(property.impl());
-
- // Found the property
- if (!entry.isNull()) {
- if (entry.isReadOnly() && forWriting) {
- stackDepth = 0;
- index = missingSymbolMarker();
- if (++iter == end)
- globalObject = currentVariableObject;
- return false;
- }
- stackDepth = depth + m_codeBlock->needsFullScopeChain();
- index = entry.getIndex();
- if (++iter == end)
- globalObject = currentVariableObject;
- return true;
- }
- bool scopeRequiresDynamicChecks = false;
- if (currentVariableObject->isDynamicScope(scopeRequiresDynamicChecks))
- break;
- requiresDynamicChecks |= scopeRequiresDynamicChecks;
+ if (!m_globalObjectRegister) {
+ int index = m_nextConstantOffset;
+ m_constantPoolRegisters.append(FirstConstantRegisterIndex + m_nextConstantOffset);
+ ++m_nextConstantOffset;
+ m_codeBlock->addConstant(JSValue());
+ m_globalObjectRegister = &m_constantPoolRegisters[index];
+ m_codeBlock->setGlobalObjectRegister(VirtualRegister(index));
}
- // Can't locate the property but we're able to avoid a few lookups.
- stackDepth = depth + m_codeBlock->needsFullScopeChain();
- index = missingSymbolMarker();
- JSObject* scope = iter->get();
- if (++iter == end)
- globalObject = scope;
- return true;
-}
-
-void BytecodeGenerator::emitCheckHasInstance(RegisterID* base)
-{
- emitOpcode(op_check_has_instance);
- instructions().append(base->index());
-}
-
-RegisterID* BytecodeGenerator::emitInstanceOf(RegisterID* dst, RegisterID* value, RegisterID* base, RegisterID* basePrototype)
-{
- emitOpcode(op_instanceof);
- instructions().append(dst->index());
- instructions().append(value->index());
- instructions().append(base->index());
- instructions().append(basePrototype->index());
- return dst;
+ if (dst)
+ emitMove(dst, m_globalObjectRegister);
+ return m_globalObjectRegister;
}
-static const unsigned maxGlobalResolves = 128;
-
-bool BytecodeGenerator::shouldAvoidResolveGlobal()
+bool BytecodeGenerator::isCaptured(int operand)
{
- return m_codeBlock->globalResolveInfoCount() > maxGlobalResolves && !m_labelScopes.size();
+ return m_symbolTable && m_symbolTable->isCaptured(operand);
}
-RegisterID* BytecodeGenerator::emitResolve(RegisterID* dst, const Identifier& property)
+Local BytecodeGenerator::local(const Identifier& property)
{
- size_t depth = 0;
- int index = 0;
- JSObject* globalObject = 0;
- bool requiresDynamicChecks = false;
- if (!findScopedProperty(property, index, depth, false, requiresDynamicChecks, globalObject) && !globalObject) {
- // We can't optimise at all :-(
- ValueProfile* profile = emitProfiledOpcode(op_resolve);
- instructions().append(dst->index());
- instructions().append(addConstant(property));
- instructions().append(profile);
- return dst;
- }
- if (shouldAvoidResolveGlobal()) {
- globalObject = 0;
- requiresDynamicChecks = true;
- }
-
- if (globalObject) {
- if (index != missingSymbolMarker() && !requiresDynamicChecks) {
- // Directly index the property lookup across multiple scopes.
- return emitGetScopedVar(dst, depth, index, globalObject);
- }
-
-#if ENABLE(JIT)
- m_codeBlock->addGlobalResolveInfo(instructions().size());
-#endif
- m_codeBlock->addGlobalResolveInstruction(instructions().size());
- ValueProfile* profile = emitProfiledOpcode(requiresDynamicChecks ? op_resolve_global_dynamic : op_resolve_global);
- instructions().append(dst->index());
- instructions().append(addConstant(property));
- instructions().append(0);
- instructions().append(0);
- if (requiresDynamicChecks)
- instructions().append(depth);
- instructions().append(profile);
- return dst;
- }
+ if (property == propertyNames().thisIdentifier)
+ return Local(thisRegister(), ReadOnly, NotCaptured);
+
+ if (property == propertyNames().arguments)
+ createArgumentsIfNecessary();
- if (requiresDynamicChecks) {
- // If we get here we have eval nested inside a |with| just give up
- ValueProfile* profile = emitProfiledOpcode(op_resolve);
- instructions().append(dst->index());
- instructions().append(addConstant(property));
- instructions().append(profile);
- return dst;
- }
+ if (!shouldOptimizeLocals())
+ return Local();
- if (index != missingSymbolMarker()) {
- // Directly index the property lookup across multiple scopes.
- return emitGetScopedVar(dst, depth, index, globalObject);
- }
+ SymbolTableEntry entry = symbolTable().get(property.impl());
+ if (entry.isNull())
+ return Local();
- // In this case we are at least able to drop a few scope chains from the
- // lookup chain, although we still need to hash from then on.
- ValueProfile* profile = emitProfiledOpcode(op_resolve_skip);
- instructions().append(dst->index());
- instructions().append(addConstant(property));
- instructions().append(depth);
- instructions().append(profile);
- return dst;
+ RegisterID* local = createLazyRegisterIfNecessary(®isterFor(entry.getIndex()));
+ return Local(local, entry.getAttributes(), captureMode(local->index()));
}
-RegisterID* BytecodeGenerator::emitGetScopedVar(RegisterID* dst, size_t depth, int index, JSValue globalObject)
+Local BytecodeGenerator::constLocal(const Identifier& property)
{
- if (globalObject) {
- if (m_lastOpcodeID == op_put_global_var) {
- int dstIndex;
- int srcIndex;
- retrieveLastUnaryOp(dstIndex, srcIndex);
-
- if (dstIndex == index && srcIndex == dst->index())
- return dst;
- }
+ if (m_codeType != FunctionCode)
+ return Local();
- ValueProfile* profile = emitProfiledOpcode(op_get_global_var);
- instructions().append(dst->index());
- instructions().append(index);
- instructions().append(profile);
- return dst;
- }
+ SymbolTableEntry entry = symbolTable().get(property.impl());
+ if (entry.isNull())
+ return Local();
- ValueProfile* profile = emitProfiledOpcode(op_get_scoped_var);
- instructions().append(dst->index());
- instructions().append(index);
- instructions().append(depth);
- instructions().append(profile);
- return dst;
+ RegisterID* local = createLazyRegisterIfNecessary(®isterFor(entry.getIndex()));
+ return Local(local, entry.getAttributes(), captureMode(local->index()));
}
-RegisterID* BytecodeGenerator::emitPutScopedVar(size_t depth, int index, RegisterID* value, JSValue globalObject)
+void BytecodeGenerator::emitCheckHasInstance(RegisterID* dst, RegisterID* value, RegisterID* base, Label* target)
{
- if (globalObject) {
- emitOpcode(op_put_global_var);
- instructions().append(index);
- instructions().append(value->index());
- return value;
- }
- emitOpcode(op_put_scoped_var);
- instructions().append(index);
- instructions().append(depth);
+ size_t begin = instructions().size();
+ emitOpcode(op_check_has_instance);
+ instructions().append(dst->index());
instructions().append(value->index());
- return value;
+ instructions().append(base->index());
+ instructions().append(target->bind(begin, instructions().size()));
}
-RegisterID* BytecodeGenerator::emitResolveBase(RegisterID* dst, const Identifier& property)
-{
- size_t depth = 0;
- int index = 0;
- JSObject* globalObject = 0;
- bool requiresDynamicChecks = false;
- findScopedProperty(property, index, depth, false, requiresDynamicChecks, globalObject);
- if (!globalObject || requiresDynamicChecks) {
- // We can't optimise at all :-(
- ValueProfile* profile = emitProfiledOpcode(op_resolve_base);
- instructions().append(dst->index());
- instructions().append(addConstant(property));
- instructions().append(false);
- instructions().append(profile);
- return dst;
- }
+// Indicates the least upper bound of resolve type based on local scope. The bytecode linker
+// will start with this ResolveType and compute the least upper bound including intercepting scopes.
+ResolveType BytecodeGenerator::resolveType()
+{
+ if (m_localScopeDepth)
+ return Dynamic;
+ if (m_symbolTable && m_symbolTable->usesNonStrictEval())
+ return GlobalPropertyWithVarInjectionChecks;
+ return GlobalProperty;
+}
- // Global object is the base
- return emitLoad(dst, JSValue(globalObject));
-}
-
-RegisterID* BytecodeGenerator::emitResolveBaseForPut(RegisterID* dst, const Identifier& property)
-{
- if (!m_codeBlock->isStrictMode())
- return emitResolveBase(dst, property);
- size_t depth = 0;
- int index = 0;
- JSObject* globalObject = 0;
- bool requiresDynamicChecks = false;
- findScopedProperty(property, index, depth, false, requiresDynamicChecks, globalObject);
- if (!globalObject || requiresDynamicChecks) {
- // We can't optimise at all :-(
- ValueProfile* profile = emitProfiledOpcode(op_resolve_base);
- instructions().append(dst->index());
- instructions().append(addConstant(property));
- instructions().append(true);
- instructions().append(profile);
- return dst;
- }
+RegisterID* BytecodeGenerator::emitResolveScope(RegisterID* dst, const Identifier& identifier)
+{
+ m_codeBlock->addPropertyAccessInstruction(instructions().size());
- // Global object is the base
- RefPtr<RegisterID> result = emitLoad(dst, JSValue(globalObject));
- emitOpcode(op_ensure_property_exists);
- instructions().append(dst->index());
- instructions().append(addConstant(property));
- return result.get();
-}
-
-RegisterID* BytecodeGenerator::emitResolveWithBase(RegisterID* baseDst, RegisterID* propDst, const Identifier& property)
-{
- size_t depth = 0;
- int index = 0;
- JSObject* globalObject = 0;
- bool requiresDynamicChecks = false;
- if (!findScopedProperty(property, index, depth, false, requiresDynamicChecks, globalObject) || !globalObject || requiresDynamicChecks) {
- // We can't optimise at all :-(
- ValueProfile* profile = emitProfiledOpcode(op_resolve_with_base);
- instructions().append(baseDst->index());
- instructions().append(propDst->index());
- instructions().append(addConstant(property));
- instructions().append(profile);
- return baseDst;
- }
+ ASSERT(!m_symbolTable || !m_symbolTable->contains(identifier.impl()) || resolveType() == Dynamic);
- bool forceGlobalResolve = false;
+ // resolve_scope dst, id, ResolveType, depth
+ emitOpcode(op_resolve_scope);
+ instructions().append(kill(dst));
+ instructions().append(addConstant(identifier));
+ instructions().append(resolveType());
+ instructions().append(0);
+ instructions().append(0);
+ return dst;
+}
- // Global object is the base
- emitLoad(baseDst, JSValue(globalObject));
+RegisterID* BytecodeGenerator::emitGetFromScope(RegisterID* dst, RegisterID* scope, const Identifier& identifier, ResolveMode resolveMode)
+{
+ m_codeBlock->addPropertyAccessInstruction(instructions().size());
- if (index != missingSymbolMarker() && !forceGlobalResolve) {
- // Directly index the property lookup across multiple scopes.
- emitGetScopedVar(propDst, depth, index, globalObject);
- return baseDst;
- }
- if (shouldAvoidResolveGlobal()) {
- ValueProfile* profile = emitProfiledOpcode(op_resolve);
- instructions().append(propDst->index());
- instructions().append(addConstant(property));
- instructions().append(profile);
- return baseDst;
- }
-#if ENABLE(JIT)
- m_codeBlock->addGlobalResolveInfo(instructions().size());
-#endif
-#if ENABLE(CLASSIC_INTERPRETER)
- m_codeBlock->addGlobalResolveInstruction(instructions().size());
-#endif
- ValueProfile* profile = emitProfiledOpcode(requiresDynamicChecks ? op_resolve_global_dynamic : op_resolve_global);
- instructions().append(propDst->index());
- instructions().append(addConstant(property));
+ // get_from_scope dst, scope, id, ResolveModeAndType, Structure, Operand
+ UnlinkedValueProfile profile = emitProfiledOpcode(op_get_from_scope);
+ instructions().append(kill(dst));
+ instructions().append(scope->index());
+ instructions().append(addConstant(identifier));
+ instructions().append(ResolveModeAndType(resolveMode, resolveType()).operand());
instructions().append(0);
instructions().append(0);
- if (requiresDynamicChecks)
- instructions().append(depth);
instructions().append(profile);
- return baseDst;
-}
-
-RegisterID* BytecodeGenerator::emitResolveWithThis(RegisterID* baseDst, RegisterID* propDst, const Identifier& property)
-{
- size_t depth = 0;
- int index = 0;
- JSObject* globalObject = 0;
- bool requiresDynamicChecks = false;
- if (!findScopedProperty(property, index, depth, false, requiresDynamicChecks, globalObject) || !globalObject || requiresDynamicChecks) {
- // We can't optimise at all :-(
- ValueProfile* profile = emitProfiledOpcode(op_resolve_with_this);
- instructions().append(baseDst->index());
- instructions().append(propDst->index());
- instructions().append(addConstant(property));
- instructions().append(profile);
- return baseDst;
- }
-
- bool forceGlobalResolve = false;
+ return dst;
+}
- // Global object is the base
- emitLoad(baseDst, jsUndefined());
+RegisterID* BytecodeGenerator::emitPutToScope(RegisterID* scope, const Identifier& identifier, RegisterID* value, ResolveMode resolveMode)
+{
+ m_codeBlock->addPropertyAccessInstruction(instructions().size());
- if (index != missingSymbolMarker() && !forceGlobalResolve) {
- // Directly index the property lookup across multiple scopes.
- emitGetScopedVar(propDst, depth, index, globalObject);
- return baseDst;
- }
- if (shouldAvoidResolveGlobal()) {
- ValueProfile* profile = emitProfiledOpcode(op_resolve);
- instructions().append(propDst->index());
- instructions().append(addConstant(property));
- instructions().append(profile);
- return baseDst;
- }
-#if ENABLE(JIT)
- m_codeBlock->addGlobalResolveInfo(instructions().size());
-#endif
-#if ENABLE(CLASSIC_INTERPRETER)
- m_codeBlock->addGlobalResolveInstruction(instructions().size());
-#endif
- ValueProfile* profile = emitProfiledOpcode(requiresDynamicChecks ? op_resolve_global_dynamic : op_resolve_global);
- instructions().append(propDst->index());
- instructions().append(addConstant(property));
+ // put_to_scope scope, id, value, ResolveModeAndType, Structure, Operand
+ emitOpcode(op_put_to_scope);
+ instructions().append(scope->index());
+ instructions().append(addConstant(identifier));
+ instructions().append(value->index());
+ instructions().append(ResolveModeAndType(resolveMode, resolveType()).operand());
instructions().append(0);
instructions().append(0);
- if (requiresDynamicChecks)
- instructions().append(depth);
- instructions().append(profile);
- return baseDst;
+ return value;
}
-void BytecodeGenerator::emitMethodCheck()
+RegisterID* BytecodeGenerator::emitInstanceOf(RegisterID* dst, RegisterID* value, RegisterID* basePrototype)
+{
+ emitOpcode(op_instanceof);
+ instructions().append(dst->index());
+ instructions().append(value->index());
+ instructions().append(basePrototype->index());
+ return dst;
+}
+
+RegisterID* BytecodeGenerator::emitInitGlobalConst(const Identifier& identifier, RegisterID* value)
{
- emitOpcode(op_method_check);
+ ASSERT(m_codeType == GlobalCode);
+ emitOpcode(op_init_global_const_nop);
+ instructions().append(0);
+ instructions().append(value->index());
+ instructions().append(0);
+ instructions().append(addConstant(identifier));
+ return value;
}
RegisterID* BytecodeGenerator::emitGetById(RegisterID* dst, RegisterID* base, const Identifier& property)
{
m_codeBlock->addPropertyAccessInstruction(instructions().size());
- ValueProfile* profile = emitProfiledOpcode(op_get_by_id);
- instructions().append(dst->index());
+ UnlinkedValueProfile profile = emitProfiledOpcode(op_get_by_id);
+ instructions().append(kill(dst));
instructions().append(base->index());
instructions().append(addConstant(property));
instructions().append(0);
{
emitOpcode(op_get_arguments_length);
instructions().append(dst->index());
- ASSERT(base->index() == m_codeBlock->argumentsRegister());
+ ASSERT(base->virtualRegister() == m_codeBlock->argumentsRegister());
instructions().append(base->index());
instructions().append(addConstant(propertyNames().length));
return dst;
RegisterID* BytecodeGenerator::emitPutById(RegisterID* base, const Identifier& property, RegisterID* value)
{
+ unsigned propertyIndex = addConstant(property);
+
+ m_staticPropertyAnalyzer.putById(base->index(), propertyIndex);
+
m_codeBlock->addPropertyAccessInstruction(instructions().size());
emitOpcode(op_put_by_id);
instructions().append(base->index());
- instructions().append(addConstant(property));
+ instructions().append(propertyIndex);
instructions().append(value->index());
instructions().append(0);
instructions().append(0);
RegisterID* BytecodeGenerator::emitDirectPutById(RegisterID* base, const Identifier& property, RegisterID* value)
{
+ unsigned propertyIndex = addConstant(property);
+
+ m_staticPropertyAnalyzer.putById(base->index(), propertyIndex);
+
m_codeBlock->addPropertyAccessInstruction(instructions().size());
emitOpcode(op_put_by_id);
instructions().append(base->index());
- instructions().append(addConstant(property));
+ instructions().append(propertyIndex);
instructions().append(value->index());
instructions().append(0);
instructions().append(0);
instructions().append(0);
instructions().append(0);
- instructions().append(property != m_globalData->propertyNames->underscoreProto);
+ instructions().append(
+ property != m_vm->propertyNames->underscoreProto
+ && PropertyName(property).asIndex() == PropertyName::NotAnIndex);
return value;
}
void BytecodeGenerator::emitPutGetterSetter(RegisterID* base, const Identifier& property, RegisterID* getter, RegisterID* setter)
{
+ unsigned propertyIndex = addConstant(property);
+
+ m_staticPropertyAnalyzer.putById(base->index(), propertyIndex);
+
emitOpcode(op_put_getter_setter);
instructions().append(base->index());
- instructions().append(addConstant(property));
+ instructions().append(propertyIndex);
instructions().append(getter->index());
instructions().append(setter->index());
}
RegisterID* BytecodeGenerator::emitGetArgumentByVal(RegisterID* dst, RegisterID* base, RegisterID* property)
{
- ValueProfile* profile = emitProfiledOpcode(op_get_argument_by_val);
- instructions().append(dst->index());
- ASSERT(base->index() == m_codeBlock->argumentsRegister());
+ UnlinkedArrayProfile arrayProfile = newArrayProfile();
+ UnlinkedValueProfile profile = emitProfiledOpcode(op_get_argument_by_val);
+ instructions().append(kill(dst));
+ ASSERT(base->virtualRegister() == m_codeBlock->argumentsRegister());
instructions().append(base->index());
instructions().append(property->index());
+ instructions().append(arrayProfile);
instructions().append(profile);
return dst;
}
return dst;
}
}
- ValueProfile* profile = emitProfiledOpcode(op_get_by_val);
- instructions().append(dst->index());
+ UnlinkedArrayProfile arrayProfile = newArrayProfile();
+ UnlinkedValueProfile profile = emitProfiledOpcode(op_get_by_val);
+ instructions().append(kill(dst));
instructions().append(base->index());
instructions().append(property->index());
+ instructions().append(arrayProfile);
instructions().append(profile);
return dst;
}
RegisterID* BytecodeGenerator::emitPutByVal(RegisterID* base, RegisterID* property, RegisterID* value)
{
- emitOpcode(op_put_by_val);
+ UnlinkedArrayProfile arrayProfile = newArrayProfile();
+ if (m_isBuiltinFunction)
+ emitOpcode(op_put_by_val_direct);
+ else
+ emitOpcode(op_put_by_val);
+ instructions().append(base->index());
+ instructions().append(property->index());
+ instructions().append(value->index());
+ instructions().append(arrayProfile);
+ return value;
+}
+
+RegisterID* BytecodeGenerator::emitDirectPutByVal(RegisterID* base, RegisterID* property, RegisterID* value)
+{
+ UnlinkedArrayProfile arrayProfile = newArrayProfile();
+ emitOpcode(op_put_by_val_direct);
instructions().append(base->index());
instructions().append(property->index());
instructions().append(value->index());
+ instructions().append(arrayProfile);
return value;
}
return value;
}
+RegisterID* BytecodeGenerator::emitCreateThis(RegisterID* dst)
+{
+ RefPtr<RegisterID> func = newTemporary();
+
+ m_codeBlock->addPropertyAccessInstruction(instructions().size());
+ emitOpcode(op_get_callee);
+ instructions().append(func->index());
+ instructions().append(0);
+
+ size_t begin = instructions().size();
+ m_staticPropertyAnalyzer.createThis(m_thisRegister.index(), begin + 3);
+
+ emitOpcode(op_create_this);
+ instructions().append(m_thisRegister.index());
+ instructions().append(func->index());
+ instructions().append(0);
+ return dst;
+}
+
RegisterID* BytecodeGenerator::emitNewObject(RegisterID* dst)
{
+ size_t begin = instructions().size();
+ m_staticPropertyAnalyzer.newObject(dst->index(), begin + 2);
+
emitOpcode(op_new_object);
instructions().append(dst->index());
+ instructions().append(0);
+ instructions().append(newObjectAllocationProfile());
return dst;
}
JSString* BytecodeGenerator::addStringConstant(const Identifier& identifier)
{
- JSString*& stringInMap = m_stringMap.add(identifier.impl(), 0).iterator->second;
+ JSString*& stringInMap = m_stringMap.add(identifier.impl(), nullptr).iterator->value;
if (!stringInMap) {
- stringInMap = jsString(globalData(), identifier.ustring());
+ stringInMap = jsString(vm(), identifier.string());
addConstantValue(stringInMap);
}
return stringInMap;
bool hadVariableExpression = false;
if (length) {
for (ElementNode* n = elements; n; n = n->next()) {
- if (!n->value()->isNumber() && !n->value()->isString()) {
+ if (!n->value()->isConstant()) {
hadVariableExpression = true;
break;
}
if (!hadVariableExpression) {
ASSERT(length == checkLength);
unsigned constantBufferIndex = addConstantBuffer(length);
- JSValue* constantBuffer = m_codeBlock->constantBuffer(constantBufferIndex);
+ JSValue* constantBuffer = m_codeBlock->constantBuffer(constantBufferIndex).data();
unsigned index = 0;
for (ElementNode* n = elements; index < length; n = n->next()) {
- if (n->value()->isNumber())
- constantBuffer[index++] = jsNumber(static_cast<NumberNode*>(n->value())->value());
- else {
- ASSERT(n->value()->isString());
- constantBuffer[index++] = addStringConstant(static_cast<StringNode*>(n->value())->value());
- }
+ ASSERT(n->value()->isConstant());
+ constantBuffer[index++] = static_cast<ConstantNode*>(n->value())->jsValue(*this);
}
emitOpcode(op_new_array_buffer);
instructions().append(dst->index());
instructions().append(constantBufferIndex);
instructions().append(length);
+ instructions().append(newArrayAllocationProfile());
return dst;
}
}
- Vector<RefPtr<RegisterID>, 16> argv;
+ Vector<RefPtr<RegisterID>, 16, UnsafeVectorOverflow> argv;
for (ElementNode* n = elements; n; n = n->next()) {
- if (n->elision())
+ if (!length)
break;
+ length--;
+ ASSERT(!n->value()->isSpreadExpression());
argv.append(newTemporary());
// op_new_array requires the initial values to be a sequential range of registers
- ASSERT(argv.size() == 1 || argv[argv.size() - 1]->index() == argv[argv.size() - 2]->index() + 1);
+ ASSERT(argv.size() == 1 || argv[argv.size() - 1]->index() == argv[argv.size() - 2]->index() - 1);
emitNode(argv.last().get(), n->value());
}
+ ASSERT(!length);
emitOpcode(op_new_array);
instructions().append(dst->index());
instructions().append(argv.size() ? argv[0]->index() : 0); // argv
instructions().append(argv.size()); // argc
+ instructions().append(newArrayAllocationProfile());
return dst;
}
-RegisterID* BytecodeGenerator::emitNewFunction(RegisterID* dst, FunctionBodyNode* function)
+RegisterID* BytecodeGenerator::emitNewFunction(RegisterID* dst, CaptureMode captureMode, FunctionBodyNode* function)
{
- return emitNewFunctionInternal(dst, m_codeBlock->addFunctionDecl(makeFunction(m_globalData, function)), false);
+ return emitNewFunctionInternal(dst, captureMode, m_codeBlock->addFunctionDecl(makeFunction(function)), false);
}
RegisterID* BytecodeGenerator::emitLazyNewFunction(RegisterID* dst, FunctionBodyNode* function)
{
FunctionOffsetMap::AddResult ptr = m_functionOffsets.add(function, 0);
if (ptr.isNewEntry)
- ptr.iterator->second = m_codeBlock->addFunctionDecl(makeFunction(m_globalData, function));
- return emitNewFunctionInternal(dst, ptr.iterator->second, true);
+ ptr.iterator->value = m_codeBlock->addFunctionDecl(makeFunction(function));
+ return emitNewFunctionInternal(dst, NotCaptured, ptr.iterator->value, true);
}
-RegisterID* BytecodeGenerator::emitNewFunctionInternal(RegisterID* dst, unsigned index, bool doNullCheck)
+RegisterID* BytecodeGenerator::emitNewFunctionInternal(RegisterID* dst, CaptureMode captureMode, unsigned index, bool doNullCheck)
{
createActivationIfNecessary();
- emitOpcode(op_new_func);
+ emitOpcode(captureMode == IsCaptured ? op_new_captured_func : op_new_func);
instructions().append(dst->index());
instructions().append(index);
- instructions().append(doNullCheck);
+ if (captureMode == IsCaptured) {
+ ASSERT(!doNullCheck);
+ instructions().append(watchableVariable(dst->index()));
+ } else
+ instructions().append(doNullCheck);
return dst;
}
RegisterID* BytecodeGenerator::emitNewFunctionExpression(RegisterID* r0, FuncExprNode* n)
{
FunctionBodyNode* function = n->body();
- unsigned index = m_codeBlock->addFunctionExpr(makeFunction(m_globalData, function));
+ unsigned index = m_codeBlock->addFunctionExpr(makeFunction(function));
createActivationIfNecessary();
emitOpcode(op_new_func_exp);
return r0;
}
-RegisterID* BytecodeGenerator::emitCall(RegisterID* dst, RegisterID* func, CallArguments& callArguments, unsigned divot, unsigned startOffset, unsigned endOffset)
+RegisterID* BytecodeGenerator::emitCall(RegisterID* dst, RegisterID* func, ExpectedFunction expectedFunction, CallArguments& callArguments, const JSTextPosition& divot, const JSTextPosition& divotStart, const JSTextPosition& divotEnd)
{
- return emitCall(op_call, dst, func, callArguments, divot, startOffset, endOffset);
+ return emitCall(op_call, dst, func, expectedFunction, callArguments, divot, divotStart, divotEnd);
}
void BytecodeGenerator::createArgumentsIfNecessary()
if (!m_codeBlock->usesArguments())
return;
- // If we're in strict mode we tear off the arguments on function
- // entry, so there's no need to check if we need to create them
- // now
- if (m_codeBlock->isStrictMode())
+ if (shouldTearOffArgumentsEagerly())
return;
emitOpcode(op_create_arguments);
- instructions().append(m_codeBlock->argumentsRegister());
+ instructions().append(m_codeBlock->argumentsRegister().offset());
+ ASSERT(!hasWatchableVariable(m_codeBlock->argumentsRegister().offset()));
}
void BytecodeGenerator::createActivationIfNecessary()
{
- if (m_hasCreatedActivation)
- return;
- if (!m_codeBlock->needsFullScopeChain())
+ if (!m_activationRegister)
return;
emitOpcode(op_create_activation);
instructions().append(m_activationRegister->index());
}
-RegisterID* BytecodeGenerator::emitCallEval(RegisterID* dst, RegisterID* func, CallArguments& callArguments, unsigned divot, unsigned startOffset, unsigned endOffset)
+RegisterID* BytecodeGenerator::emitCallEval(RegisterID* dst, RegisterID* func, CallArguments& callArguments, const JSTextPosition& divot, const JSTextPosition& divotStart, const JSTextPosition& divotEnd)
{
- return emitCall(op_call_eval, dst, func, callArguments, divot, startOffset, endOffset);
+ createActivationIfNecessary();
+ return emitCall(op_call_eval, dst, func, NoExpectedFunction, callArguments, divot, divotStart, divotEnd);
+}
+
+ExpectedFunction BytecodeGenerator::expectedFunctionForIdentifier(const Identifier& identifier)
+{
+ if (identifier == m_vm->propertyNames->Object)
+ return ExpectObjectConstructor;
+ if (identifier == m_vm->propertyNames->Array)
+ return ExpectArrayConstructor;
+ return NoExpectedFunction;
}
-RegisterID* BytecodeGenerator::emitCall(OpcodeID opcodeID, RegisterID* dst, RegisterID* func, CallArguments& callArguments, unsigned divot, unsigned startOffset, unsigned endOffset)
+ExpectedFunction BytecodeGenerator::emitExpectedFunctionSnippet(RegisterID* dst, RegisterID* func, ExpectedFunction expectedFunction, CallArguments& callArguments, Label* done)
+{
+ RefPtr<Label> realCall = newLabel();
+ switch (expectedFunction) {
+ case ExpectObjectConstructor: {
+ // If the number of arguments is non-zero, then we can't do anything interesting.
+ if (callArguments.argumentCountIncludingThis() >= 2)
+ return NoExpectedFunction;
+
+ size_t begin = instructions().size();
+ emitOpcode(op_jneq_ptr);
+ instructions().append(func->index());
+ instructions().append(Special::ObjectConstructor);
+ instructions().append(realCall->bind(begin, instructions().size()));
+
+ if (dst != ignoredResult())
+ emitNewObject(dst);
+ break;
+ }
+
+ case ExpectArrayConstructor: {
+ // If you're doing anything other than "new Array()" or "new Array(foo)" then we
+ // don't do inline it, for now. The only reason is that call arguments are in
+ // the opposite order of what op_new_array expects, so we'd either need to change
+ // how op_new_array works or we'd need an op_new_array_reverse. Neither of these
+ // things sounds like it's worth it.
+ if (callArguments.argumentCountIncludingThis() > 2)
+ return NoExpectedFunction;
+
+ size_t begin = instructions().size();
+ emitOpcode(op_jneq_ptr);
+ instructions().append(func->index());
+ instructions().append(Special::ArrayConstructor);
+ instructions().append(realCall->bind(begin, instructions().size()));
+
+ if (dst != ignoredResult()) {
+ if (callArguments.argumentCountIncludingThis() == 2) {
+ emitOpcode(op_new_array_with_size);
+ instructions().append(dst->index());
+ instructions().append(callArguments.argumentRegister(0)->index());
+ instructions().append(newArrayAllocationProfile());
+ } else {
+ ASSERT(callArguments.argumentCountIncludingThis() == 1);
+ emitOpcode(op_new_array);
+ instructions().append(dst->index());
+ instructions().append(0);
+ instructions().append(0);
+ instructions().append(newArrayAllocationProfile());
+ }
+ }
+ break;
+ }
+
+ default:
+ ASSERT(expectedFunction == NoExpectedFunction);
+ return NoExpectedFunction;
+ }
+
+ size_t begin = instructions().size();
+ emitOpcode(op_jmp);
+ instructions().append(done->bind(begin, instructions().size()));
+ emitLabel(realCall.get());
+
+ return expectedFunction;
+}
+
+RegisterID* BytecodeGenerator::emitCall(OpcodeID opcodeID, RegisterID* dst, RegisterID* func, ExpectedFunction expectedFunction, CallArguments& callArguments, const JSTextPosition& divot, const JSTextPosition& divotStart, const JSTextPosition& divotEnd)
{
ASSERT(opcodeID == op_call || opcodeID == op_call_eval);
ASSERT(func->refCount());
// Generate code for arguments.
unsigned argument = 0;
- for (ArgumentListNode* n = callArguments.argumentsNode()->m_listNode; n; n = n->m_next)
- emitNode(callArguments.argumentRegister(argument++), n);
-
+ if (callArguments.argumentsNode()) {
+ ArgumentListNode* n = callArguments.argumentsNode()->m_listNode;
+ if (n && n->m_expr->isSpreadExpression()) {
+ RELEASE_ASSERT(!n->m_next);
+ auto expression = static_cast<SpreadExpressionNode*>(n->m_expr)->expression();
+ RefPtr<RegisterID> argumentRegister;
+ if (expression->isResolveNode() && willResolveToArguments(static_cast<ResolveNode*>(expression)->identifier()) && !symbolTable().slowArguments())
+ argumentRegister = uncheckedRegisterForArguments();
+ else
+ argumentRegister = expression->emitBytecode(*this, callArguments.argumentRegister(0));
+ RefPtr<RegisterID> thisRegister = emitMove(newTemporary(), callArguments.thisRegister());
+ return emitCallVarargs(dst, func, callArguments.thisRegister(), argumentRegister.get(), newTemporary(), 0, callArguments.profileHookRegister(), divot, divotStart, divotEnd);
+ }
+ for (; n; n = n->m_next)
+ emitNode(callArguments.argumentRegister(argument++), n);
+ }
+
// Reserve space for call frame.
- Vector<RefPtr<RegisterID>, RegisterFile::CallFrameHeaderSize> callFrame;
- for (int i = 0; i < RegisterFile::CallFrameHeaderSize; ++i)
+ Vector<RefPtr<RegisterID>, JSStack::CallFrameHeaderSize, UnsafeVectorOverflow> callFrame;
+ for (int i = 0; i < JSStack::CallFrameHeaderSize; ++i)
callFrame.append(newTemporary());
if (m_shouldEmitProfileHooks) {
instructions().append(callArguments.profileHookRegister()->index());
}
- emitExpressionInfo(divot, startOffset, endOffset);
+ emitExpressionInfo(divot, divotStart, divotEnd);
+ RefPtr<Label> done = newLabel();
+ expectedFunction = emitExpectedFunctionSnippet(dst, func, expectedFunction, callArguments, done.get());
+
// Emit call.
- emitOpcode(opcodeID);
- instructions().append(func->index()); // func
- instructions().append(callArguments.argumentCountIncludingThis()); // argCount
- instructions().append(callArguments.registerOffset()); // registerOffset
-#if ENABLE(LLINT)
+ UnlinkedArrayProfile arrayProfile = newArrayProfile();
+ UnlinkedValueProfile profile = emitProfiledOpcode(opcodeID);
+ ASSERT(dst);
+ ASSERT(dst != ignoredResult());
+ instructions().append(dst->index());
+ instructions().append(func->index());
+ instructions().append(callArguments.argumentCountIncludingThis());
+ instructions().append(callArguments.stackOffset());
instructions().append(m_codeBlock->addLLIntCallLinkInfo());
-#else
- instructions().append(0);
-#endif
instructions().append(0);
- if (dst != ignoredResult()) {
- ValueProfile* profile = emitProfiledOpcode(op_call_put_result);
- instructions().append(dst->index()); // dst
- instructions().append(profile);
- }
+ instructions().append(arrayProfile);
+ instructions().append(profile);
+
+ if (expectedFunction != NoExpectedFunction)
+ emitLabel(done.get());
if (m_shouldEmitProfileHooks) {
emitOpcode(op_profile_did_call);
return dst;
}
-RegisterID* BytecodeGenerator::emitCallVarargs(RegisterID* dst, RegisterID* func, RegisterID* thisRegister, RegisterID* arguments, RegisterID* firstFreeRegister, RegisterID* profileHookRegister, unsigned divot, unsigned startOffset, unsigned endOffset)
+RegisterID* BytecodeGenerator::emitCallVarargs(RegisterID* dst, RegisterID* func, RegisterID* thisRegister, RegisterID* arguments, RegisterID* firstFreeRegister, int32_t firstVarArgOffset, RegisterID* profileHookRegister, const JSTextPosition& divot, const JSTextPosition& divotStart, const JSTextPosition& divotEnd)
+{
+ return emitCallVarargs(op_call_varargs, dst, func, thisRegister, arguments, firstFreeRegister, firstVarArgOffset, profileHookRegister, divot, divotStart, divotEnd);
+}
+
+RegisterID* BytecodeGenerator::emitConstructVarargs(RegisterID* dst, RegisterID* func, RegisterID* arguments, RegisterID* firstFreeRegister, int32_t firstVarArgOffset, RegisterID* profileHookRegister, const JSTextPosition& divot, const JSTextPosition& divotStart, const JSTextPosition& divotEnd)
+{
+ return emitCallVarargs(op_construct_varargs, dst, func, 0, arguments, firstFreeRegister, firstVarArgOffset, profileHookRegister, divot, divotStart, divotEnd);
+}
+
+RegisterID* BytecodeGenerator::emitCallVarargs(OpcodeID opcode, RegisterID* dst, RegisterID* func, RegisterID* thisRegister, RegisterID* arguments, RegisterID* firstFreeRegister, int32_t firstVarArgOffset, RegisterID* profileHookRegister, const JSTextPosition& divot, const JSTextPosition& divotStart, const JSTextPosition& divotEnd)
{
if (m_shouldEmitProfileHooks) {
emitMove(profileHookRegister, func);
instructions().append(profileHookRegister->index());
}
- emitExpressionInfo(divot, startOffset, endOffset);
+ emitExpressionInfo(divot, divotStart, divotEnd);
// Emit call.
- emitOpcode(op_call_varargs);
+ UnlinkedArrayProfile arrayProfile = newArrayProfile();
+ UnlinkedValueProfile profile = emitProfiledOpcode(opcode);
+ ASSERT(dst != ignoredResult());
+ instructions().append(dst->index());
instructions().append(func->index());
- instructions().append(thisRegister->index());
+ instructions().append(thisRegister ? thisRegister->index() : 0);
instructions().append(arguments->index());
instructions().append(firstFreeRegister->index());
- if (dst != ignoredResult()) {
- ValueProfile* profile = emitProfiledOpcode(op_call_put_result);
- instructions().append(dst->index());
- instructions().append(profile);
- }
+ instructions().append(firstVarArgOffset);
+ instructions().append(arrayProfile);
+ instructions().append(profile);
if (m_shouldEmitProfileHooks) {
emitOpcode(op_profile_did_call);
instructions().append(profileHookRegister->index());
RegisterID* BytecodeGenerator::emitReturn(RegisterID* src)
{
- if (m_codeBlock->needsFullScopeChain()) {
+ if (m_activationRegister) {
emitOpcode(op_tear_off_activation);
instructions().append(m_activationRegister->index());
- instructions().append(m_codeBlock->argumentsRegister());
- } else if (m_codeBlock->usesArguments() && m_codeBlock->numParameters() != 1 && !m_codeBlock->isStrictMode()) {
+ }
+
+ if (m_codeBlock->usesArguments() && m_codeBlock->numParameters() != 1 && !isStrictMode()) {
emitOpcode(op_tear_off_arguments);
- instructions().append(m_codeBlock->argumentsRegister());
+ instructions().append(m_codeBlock->argumentsRegister().offset());
+ instructions().append(m_activationRegister ? m_activationRegister->index() : emitLoad(0, JSValue())->index());
}
// Constructors use op_ret_object_or_this to check the result is an
return src;
}
-RegisterID* BytecodeGenerator::emitConstruct(RegisterID* dst, RegisterID* func, CallArguments& callArguments, unsigned divot, unsigned startOffset, unsigned endOffset)
+RegisterID* BytecodeGenerator::emitConstruct(RegisterID* dst, RegisterID* func, ExpectedFunction expectedFunction, CallArguments& callArguments, const JSTextPosition& divot, const JSTextPosition& divotStart, const JSTextPosition& divotEnd)
{
ASSERT(func->refCount());
// Generate code for arguments.
unsigned argument = 0;
if (ArgumentsNode* argumentsNode = callArguments.argumentsNode()) {
+
+ ArgumentListNode* n = callArguments.argumentsNode()->m_listNode;
+ if (n && n->m_expr->isSpreadExpression()) {
+ RELEASE_ASSERT(!n->m_next);
+ auto expression = static_cast<SpreadExpressionNode*>(n->m_expr)->expression();
+ RefPtr<RegisterID> argumentRegister;
+ if (expression->isResolveNode() && willResolveToArguments(static_cast<ResolveNode*>(expression)->identifier()) && !symbolTable().slowArguments())
+ argumentRegister = uncheckedRegisterForArguments();
+ else
+ argumentRegister = expression->emitBytecode(*this, callArguments.argumentRegister(0));
+ return emitConstructVarargs(dst, func, argumentRegister.get(), newTemporary(), 0, callArguments.profileHookRegister(), divot, divotStart, divotEnd);
+ }
+
for (ArgumentListNode* n = argumentsNode->m_listNode; n; n = n->m_next)
emitNode(callArguments.argumentRegister(argument++), n);
}
}
// Reserve space for call frame.
- Vector<RefPtr<RegisterID>, RegisterFile::CallFrameHeaderSize> callFrame;
- for (int i = 0; i < RegisterFile::CallFrameHeaderSize; ++i)
+ Vector<RefPtr<RegisterID>, JSStack::CallFrameHeaderSize, UnsafeVectorOverflow> callFrame;
+ for (int i = 0; i < JSStack::CallFrameHeaderSize; ++i)
callFrame.append(newTemporary());
- emitExpressionInfo(divot, startOffset, endOffset);
+ emitExpressionInfo(divot, divotStart, divotEnd);
+
+ RefPtr<Label> done = newLabel();
+ expectedFunction = emitExpectedFunctionSnippet(dst, func, expectedFunction, callArguments, done.get());
- emitOpcode(op_construct);
- instructions().append(func->index()); // func
- instructions().append(callArguments.argumentCountIncludingThis()); // argCount
- instructions().append(callArguments.registerOffset()); // registerOffset
-#if ENABLE(LLINT)
+ UnlinkedValueProfile profile = emitProfiledOpcode(op_construct);
+ ASSERT(dst != ignoredResult());
+ instructions().append(dst->index());
+ instructions().append(func->index());
+ instructions().append(callArguments.argumentCountIncludingThis());
+ instructions().append(callArguments.stackOffset());
instructions().append(m_codeBlock->addLLIntCallLinkInfo());
-#else
instructions().append(0);
-#endif
instructions().append(0);
- if (dst != ignoredResult()) {
- ValueProfile* profile = emitProfiledOpcode(op_call_put_result);
- instructions().append(dst->index()); // dst
- instructions().append(profile);
- }
+ instructions().append(profile);
+
+ if (expectedFunction != NoExpectedFunction)
+ emitLabel(done.get());
if (m_shouldEmitProfileHooks) {
emitOpcode(op_profile_did_call);
instructions().append(src->index());
}
-RegisterID* BytecodeGenerator::emitPushScope(RegisterID* scope)
+RegisterID* BytecodeGenerator::emitPushWithScope(RegisterID* scope)
{
- ASSERT(scope->isTemporary());
ControlFlowContext context;
context.isFinallyBlock = false;
m_scopeContextStack.append(context);
- m_dynamicScopeDepth++;
+ m_localScopeDepth++;
- return emitUnaryNoDstOp(op_push_scope, scope);
+ createActivationIfNecessary();
+ return emitUnaryNoDstOp(op_push_with_scope, scope);
}
void BytecodeGenerator::emitPopScope()
emitOpcode(op_pop_scope);
m_scopeContextStack.removeLast();
- m_dynamicScopeDepth--;
+ m_localScopeDepth--;
}
-void BytecodeGenerator::emitDebugHook(DebugHookID debugHookID, int firstLine, int lastLine)
+void BytecodeGenerator::emitDebugHook(DebugHookID debugHookID, unsigned line, unsigned charOffset, unsigned lineStart)
{
#if ENABLE(DEBUG_WITH_BREAKPOINT)
if (debugHookID != DidReachBreakpoint)
if (!m_shouldEmitDebugHooks)
return;
#endif
+ JSTextPosition divot(line, charOffset, lineStart);
+ emitExpressionInfo(divot, divot, divot);
emitOpcode(op_debug);
instructions().append(debugHookID);
- instructions().append(firstLine);
- instructions().append(lastLine);
+ instructions().append(false);
}
void BytecodeGenerator::pushFinallyContext(StatementNode* finallyBlock)
{
+ // Reclaim free label scopes.
+ while (m_labelScopes.size() && !m_labelScopes.last().refCount())
+ m_labelScopes.removeLast();
+
ControlFlowContext scope;
scope.isFinallyBlock = true;
FinallyContext context = {
finallyBlock,
- m_scopeContextStack.size(),
- m_switchContextStack.size(),
- m_forInContextStack.size(),
- m_labelScopes.size(),
+ static_cast<unsigned>(m_scopeContextStack.size()),
+ static_cast<unsigned>(m_switchContextStack.size()),
+ static_cast<unsigned>(m_forInContextStack.size()),
+ static_cast<unsigned>(m_tryContextStack.size()),
+ static_cast<unsigned>(m_labelScopes.size()),
m_finallyDepth,
- m_dynamicScopeDepth
+ m_localScopeDepth
};
scope.finallyContext = context;
m_scopeContextStack.append(scope);
m_finallyDepth--;
}
-LabelScope* BytecodeGenerator::breakTarget(const Identifier& name)
+LabelScopePtr BytecodeGenerator::breakTarget(const Identifier& name)
{
// Reclaim free label scopes.
//
}
if (!m_labelScopes.size())
- return 0;
+ return LabelScopePtr::null();
// We special-case the following, which is a syntax error in Firefox:
// label:
LabelScope* scope = &m_labelScopes[i];
if (scope->type() != LabelScope::NamedLabel) {
ASSERT(scope->breakTarget());
- return scope;
+ return LabelScopePtr(m_labelScopes, i);
}
}
- return 0;
+ return LabelScopePtr::null();
}
for (int i = m_labelScopes.size() - 1; i >= 0; --i) {
LabelScope* scope = &m_labelScopes[i];
if (scope->name() && *scope->name() == name) {
ASSERT(scope->breakTarget());
- return scope;
+ return LabelScopePtr(m_labelScopes, i);
}
}
- return 0;
+ return LabelScopePtr::null();
}
-LabelScope* BytecodeGenerator::continueTarget(const Identifier& name)
+LabelScopePtr BytecodeGenerator::continueTarget(const Identifier& name)
{
// Reclaim free label scopes.
while (m_labelScopes.size() && !m_labelScopes.last().refCount())
m_labelScopes.removeLast();
if (!m_labelScopes.size())
- return 0;
+ return LabelScopePtr::null();
if (name.isEmpty()) {
for (int i = m_labelScopes.size() - 1; i >= 0; --i) {
LabelScope* scope = &m_labelScopes[i];
if (scope->type() == LabelScope::Loop) {
ASSERT(scope->continueTarget());
- return scope;
+ return LabelScopePtr(m_labelScopes, i);
}
}
- return 0;
+ return LabelScopePtr::null();
}
// Continue to the loop nested nearest to the label scope that matches
// 'name'.
- LabelScope* result = 0;
+ LabelScopePtr result = LabelScopePtr::null();
for (int i = m_labelScopes.size() - 1; i >= 0; --i) {
LabelScope* scope = &m_labelScopes[i];
if (scope->type() == LabelScope::Loop) {
ASSERT(scope->continueTarget());
- result = scope;
+ result = LabelScopePtr(m_labelScopes, i);
}
if (scope->name() && *scope->name() == name)
- return result; // may be 0
+ return result; // may be null.
}
- return 0;
+ return LabelScopePtr::null();
}
-PassRefPtr<Label> BytecodeGenerator::emitComplexJumpScopes(Label* target, ControlFlowContext* topScope, ControlFlowContext* bottomScope)
+void BytecodeGenerator::emitComplexPopScopes(ControlFlowContext* topScope, ControlFlowContext* bottomScope)
{
while (topScope > bottomScope) {
// First we count the number of dynamic scopes we need to remove to get
}
if (nNormalScopes) {
- size_t begin = instructions().size();
-
// We need to remove a number of dynamic scopes to get to the next
// finally block
- emitOpcode(op_jmp_scopes);
- instructions().append(nNormalScopes);
-
- // If topScope == bottomScope then there isn't actually a finally block
- // left to emit, so make the jmp_scopes jump directly to the target label
- if (topScope == bottomScope) {
- instructions().append(target->bind(begin, instructions().size()));
- return target;
- }
+ while (nNormalScopes--)
+ emitOpcode(op_pop_scope);
- // Otherwise we just use jmp_scopes to pop a group of scopes and go
- // to the next instruction
- RefPtr<Label> nextInsn = newLabel();
- instructions().append(nextInsn->bind(begin, instructions().size()));
- emitLabel(nextInsn.get());
+ // If topScope == bottomScope then there isn't a finally block left to emit.
+ if (topScope == bottomScope)
+ return;
}
Vector<ControlFlowContext> savedScopeContextStack;
Vector<SwitchInfo> savedSwitchContextStack;
Vector<ForInContext> savedForInContextStack;
- SegmentedVector<LabelScope, 8> savedLabelScopes;
+ Vector<TryContext> poppedTryContexts;
+ LabelScopeStore savedLabelScopes;
while (topScope > bottomScope && topScope->isFinallyBlock) {
+ RefPtr<Label> beforeFinally = emitLabel(newLabel().get());
+
// Save the current state of the world while instating the state of the world
// for the finally block.
FinallyContext finallyContext = topScope->finallyContext;
bool flipScopes = finallyContext.scopeContextStackSize != m_scopeContextStack.size();
bool flipSwitches = finallyContext.switchContextStackSize != m_switchContextStack.size();
bool flipForIns = finallyContext.forInContextStackSize != m_forInContextStack.size();
+ bool flipTries = finallyContext.tryContextStackSize != m_tryContextStack.size();
bool flipLabelScopes = finallyContext.labelScopesSize != m_labelScopes.size();
int topScopeIndex = -1;
int bottomScopeIndex = -1;
savedForInContextStack = m_forInContextStack;
m_forInContextStack.shrink(finallyContext.forInContextStackSize);
}
+ if (flipTries) {
+ while (m_tryContextStack.size() != finallyContext.tryContextStackSize) {
+ ASSERT(m_tryContextStack.size() > finallyContext.tryContextStackSize);
+ TryContext context = m_tryContextStack.last();
+ m_tryContextStack.removeLast();
+ TryRange range;
+ range.start = context.start;
+ range.end = beforeFinally;
+ range.tryData = context.tryData;
+ m_tryRanges.append(range);
+ poppedTryContexts.append(context);
+ }
+ }
if (flipLabelScopes) {
savedLabelScopes = m_labelScopes;
while (m_labelScopes.size() > finallyContext.labelScopesSize)
}
int savedFinallyDepth = m_finallyDepth;
m_finallyDepth = finallyContext.finallyDepth;
- int savedDynamicScopeDepth = m_dynamicScopeDepth;
- m_dynamicScopeDepth = finallyContext.dynamicScopeDepth;
+ int savedDynamicScopeDepth = m_localScopeDepth;
+ m_localScopeDepth = finallyContext.dynamicScopeDepth;
// Emit the finally block.
emitNode(finallyContext.finallyBlock);
+ RefPtr<Label> afterFinally = emitLabel(newLabel().get());
+
// Restore the state of the world.
if (flipScopes) {
m_scopeContextStack = savedScopeContextStack;
m_switchContextStack = savedSwitchContextStack;
if (flipForIns)
m_forInContextStack = savedForInContextStack;
+ if (flipTries) {
+ ASSERT(m_tryContextStack.size() == finallyContext.tryContextStackSize);
+ for (unsigned i = poppedTryContexts.size(); i--;) {
+ TryContext context = poppedTryContexts[i];
+ context.start = afterFinally;
+ m_tryContextStack.append(context);
+ }
+ poppedTryContexts.clear();
+ }
if (flipLabelScopes)
m_labelScopes = savedLabelScopes;
m_finallyDepth = savedFinallyDepth;
- m_dynamicScopeDepth = savedDynamicScopeDepth;
+ m_localScopeDepth = savedDynamicScopeDepth;
--topScope;
}
}
- return emitJump(target);
}
-PassRefPtr<Label> BytecodeGenerator::emitJumpScopes(Label* target, int targetScopeDepth)
+void BytecodeGenerator::emitPopScopes(int targetScopeDepth)
{
ASSERT(scopeDepth() - targetScopeDepth >= 0);
- ASSERT(target->isForward());
size_t scopeDelta = scopeDepth() - targetScopeDepth;
ASSERT(scopeDelta <= m_scopeContextStack.size());
if (!scopeDelta)
- return emitJump(target);
-
- if (m_finallyDepth)
- return emitComplexJumpScopes(target, &m_scopeContextStack.last(), &m_scopeContextStack.last() - scopeDelta);
+ return;
- size_t begin = instructions().size();
+ if (!m_finallyDepth) {
+ while (scopeDelta--)
+ emitOpcode(op_pop_scope);
+ return;
+ }
- emitOpcode(op_jmp_scopes);
- instructions().append(scopeDelta);
- instructions().append(target->bind(begin, instructions().size()));
- return target;
+ emitComplexPopScopes(&m_scopeContextStack.last(), &m_scopeContextStack.last() - scopeDelta);
}
RegisterID* BytecodeGenerator::emitGetPropertyNames(RegisterID* dst, RegisterID* base, RegisterID* i, RegisterID* size, Label* breakTarget)
return dst;
}
-RegisterID* BytecodeGenerator::emitCatch(RegisterID* targetRegister, Label* start, Label* end)
+TryData* BytecodeGenerator::pushTry(Label* start)
+{
+ TryData tryData;
+ tryData.target = newLabel();
+ tryData.targetScopeDepth = UINT_MAX;
+ m_tryData.append(tryData);
+ TryData* result = &m_tryData.last();
+
+ TryContext tryContext;
+ tryContext.start = start;
+ tryContext.tryData = result;
+
+ m_tryContextStack.append(tryContext);
+
+ return result;
+}
+
+RegisterID* BytecodeGenerator::popTryAndEmitCatch(TryData* tryData, RegisterID* targetRegister, Label* end)
{
m_usesExceptions = true;
-#if ENABLE(JIT)
-#if ENABLE(LLINT)
- HandlerInfo info = { start->bind(0, 0), end->bind(0, 0), instructions().size(), m_dynamicScopeDepth + m_baseScopeDepth, CodeLocationLabel(MacroAssemblerCodePtr::createFromExecutableAddress(bitwise_cast<void*>(&llint_op_catch))) };
-#else
- HandlerInfo info = { start->bind(0, 0), end->bind(0, 0), instructions().size(), m_dynamicScopeDepth + m_baseScopeDepth, CodeLocationLabel() };
-#endif
-#else
- HandlerInfo info = { start->bind(0, 0), end->bind(0, 0), instructions().size(), m_dynamicScopeDepth + m_baseScopeDepth };
-#endif
+
+ ASSERT_UNUSED(tryData, m_tryContextStack.last().tryData == tryData);
+
+ TryRange tryRange;
+ tryRange.start = m_tryContextStack.last().start;
+ tryRange.end = end;
+ tryRange.tryData = m_tryContextStack.last().tryData;
+ m_tryRanges.append(tryRange);
+ m_tryContextStack.removeLast();
+
+ emitLabel(tryRange.tryData->target.get());
+ tryRange.tryData->targetScopeDepth = m_localScopeDepth;
- m_codeBlock->addExceptionHandler(info);
emitOpcode(op_catch);
instructions().append(targetRegister->index());
return targetRegister;
}
-void BytecodeGenerator::emitThrowReferenceError(const UString& message)
+void BytecodeGenerator::emitThrowReferenceError(const String& message)
{
- emitOpcode(op_throw_reference_error);
- instructions().append(addConstantValue(jsString(globalData(), message))->index());
+ emitOpcode(op_throw_static_error);
+ instructions().append(addConstantValue(addStringConstant(Identifier(m_vm, message)))->index());
+ instructions().append(true);
}
-void BytecodeGenerator::emitPushNewScope(RegisterID* dst, const Identifier& property, RegisterID* value)
+void BytecodeGenerator::emitPushFunctionNameScope(const Identifier& property, RegisterID* value, unsigned attributes)
{
+ emitOpcode(op_push_name_scope);
+ instructions().append(addConstant(property));
+ instructions().append(value->index());
+ instructions().append(attributes);
+}
+
+void BytecodeGenerator::emitPushCatchScope(const Identifier& property, RegisterID* value, unsigned attributes)
+{
+ createActivationIfNecessary();
+
ControlFlowContext context;
context.isFinallyBlock = false;
m_scopeContextStack.append(context);
- m_dynamicScopeDepth++;
+ m_localScopeDepth++;
- emitOpcode(op_push_new_scope);
- instructions().append(dst->index());
+ emitOpcode(op_push_name_scope);
instructions().append(addConstant(property));
instructions().append(value->index());
+ instructions().append(attributes);
}
void BytecodeGenerator::beginSwitch(RegisterID* scrutineeRegister, SwitchInfo::SwitchType type)
{
- SwitchInfo info = { instructions().size(), type };
+ SwitchInfo info = { static_cast<uint32_t>(instructions().size()), type };
switch (type) {
case SwitchInfo::SwitchImmediate:
emitOpcode(op_switch_imm);
emitOpcode(op_switch_string);
break;
default:
- ASSERT_NOT_REACHED();
+ RELEASE_ASSERT_NOT_REACHED();
}
instructions().append(0); // place holder for table index
return key - min;
}
-static void prepareJumpTableForImmediateSwitch(SimpleJumpTable& jumpTable, int32_t switchAddress, uint32_t clauseCount, RefPtr<Label>* labels, ExpressionNode** nodes, int32_t min, int32_t max)
-{
- jumpTable.min = min;
- jumpTable.branchOffsets.resize(max - min + 1);
- jumpTable.branchOffsets.fill(0);
- for (uint32_t i = 0; i < clauseCount; ++i) {
- // We're emitting this after the clause labels should have been fixed, so
- // the labels should not be "forward" references
- ASSERT(!labels[i]->isForward());
- jumpTable.add(keyForImmediateSwitch(nodes[i], min, max), labels[i]->bind(switchAddress, switchAddress + 3));
- }
-}
-
static int32_t keyForCharacterSwitch(ExpressionNode* node, int32_t min, int32_t max)
{
UNUSED_PARAM(max);
return key - min;
}
-static void prepareJumpTableForCharacterSwitch(SimpleJumpTable& jumpTable, int32_t switchAddress, uint32_t clauseCount, RefPtr<Label>* labels, ExpressionNode** nodes, int32_t min, int32_t max)
+static void prepareJumpTableForSwitch(
+ UnlinkedSimpleJumpTable& jumpTable, int32_t switchAddress, uint32_t clauseCount,
+ RefPtr<Label>* labels, ExpressionNode** nodes, int32_t min, int32_t max,
+ int32_t (*keyGetter)(ExpressionNode*, int32_t min, int32_t max))
{
jumpTable.min = min;
jumpTable.branchOffsets.resize(max - min + 1);
// We're emitting this after the clause labels should have been fixed, so
// the labels should not be "forward" references
ASSERT(!labels[i]->isForward());
- jumpTable.add(keyForCharacterSwitch(nodes[i], min, max), labels[i]->bind(switchAddress, switchAddress + 3));
+ jumpTable.add(keyGetter(nodes[i], min, max), labels[i]->bind(switchAddress, switchAddress + 3));
}
}
-static void prepareJumpTableForStringSwitch(StringJumpTable& jumpTable, int32_t switchAddress, uint32_t clauseCount, RefPtr<Label>* labels, ExpressionNode** nodes)
+static void prepareJumpTableForStringSwitch(UnlinkedStringJumpTable& jumpTable, int32_t switchAddress, uint32_t clauseCount, RefPtr<Label>* labels, ExpressionNode** nodes)
{
for (uint32_t i = 0; i < clauseCount; ++i) {
// We're emitting this after the clause labels should have been fixed, so
ASSERT(nodes[i]->isString());
StringImpl* clause = static_cast<StringNode*>(nodes[i])->value().impl();
- OffsetLocation location;
- location.branchOffset = labels[i]->bind(switchAddress, switchAddress + 3);
- jumpTable.offsetTable.add(clause, location);
+ jumpTable.offsetTable.add(clause, labels[i]->bind(switchAddress, switchAddress + 3));
}
}
{
SwitchInfo switchInfo = m_switchContextStack.last();
m_switchContextStack.removeLast();
- if (switchInfo.switchType == SwitchInfo::SwitchImmediate) {
- instructions()[switchInfo.bytecodeOffset + 1] = m_codeBlock->numberOfImmediateSwitchJumpTables();
+
+ switch (switchInfo.switchType) {
+ case SwitchInfo::SwitchImmediate:
+ case SwitchInfo::SwitchCharacter: {
+ instructions()[switchInfo.bytecodeOffset + 1] = m_codeBlock->numberOfSwitchJumpTables();
instructions()[switchInfo.bytecodeOffset + 2] = defaultLabel->bind(switchInfo.bytecodeOffset, switchInfo.bytecodeOffset + 3);
- SimpleJumpTable& jumpTable = m_codeBlock->addImmediateSwitchJumpTable();
- prepareJumpTableForImmediateSwitch(jumpTable, switchInfo.bytecodeOffset, clauseCount, labels, nodes, min, max);
- } else if (switchInfo.switchType == SwitchInfo::SwitchCharacter) {
- instructions()[switchInfo.bytecodeOffset + 1] = m_codeBlock->numberOfCharacterSwitchJumpTables();
- instructions()[switchInfo.bytecodeOffset + 2] = defaultLabel->bind(switchInfo.bytecodeOffset, switchInfo.bytecodeOffset + 3);
+ UnlinkedSimpleJumpTable& jumpTable = m_codeBlock->addSwitchJumpTable();
+ prepareJumpTableForSwitch(
+ jumpTable, switchInfo.bytecodeOffset, clauseCount, labels, nodes, min, max,
+ switchInfo.switchType == SwitchInfo::SwitchImmediate
+ ? keyForImmediateSwitch
+ : keyForCharacterSwitch);
+ break;
+ }
- SimpleJumpTable& jumpTable = m_codeBlock->addCharacterSwitchJumpTable();
- prepareJumpTableForCharacterSwitch(jumpTable, switchInfo.bytecodeOffset, clauseCount, labels, nodes, min, max);
- } else {
- ASSERT(switchInfo.switchType == SwitchInfo::SwitchString);
+ case SwitchInfo::SwitchString: {
instructions()[switchInfo.bytecodeOffset + 1] = m_codeBlock->numberOfStringSwitchJumpTables();
instructions()[switchInfo.bytecodeOffset + 2] = defaultLabel->bind(switchInfo.bytecodeOffset, switchInfo.bytecodeOffset + 3);
- StringJumpTable& jumpTable = m_codeBlock->addStringSwitchJumpTable();
+ UnlinkedStringJumpTable& jumpTable = m_codeBlock->addStringSwitchJumpTable();
prepareJumpTableForStringSwitch(jumpTable, switchInfo.bytecodeOffset, clauseCount, labels, nodes);
+ break;
+ }
+
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ break;
}
}
bool BytecodeGenerator::isArgumentNumber(const Identifier& ident, int argumentNumber)
{
- RegisterID* registerID = registerFor(ident);
+ RegisterID* registerID = local(ident).get();
if (!registerID || registerID->index() >= 0)
return 0;
return registerID->index() == CallFrame::argumentOffset(argumentNumber);
}
+void BytecodeGenerator::emitReadOnlyExceptionIfNeeded()
+{
+ if (!isStrictMode())
+ return;
+ emitOpcode(op_throw_static_error);
+ instructions().append(addConstantValue(addStringConstant(Identifier(m_vm, StrictModeReadonlyPropertyWriteError)))->index());
+ instructions().append(false);
+}
+
+void BytecodeGenerator::emitEnumeration(ThrowableExpressionData* node, ExpressionNode* subjectNode, const std::function<void(BytecodeGenerator&, RegisterID*)>& callBack)
+{
+ if (subjectNode->isResolveNode()
+ && willResolveToArguments(static_cast<ResolveNode*>(subjectNode)->identifier())
+ && !symbolTable().slowArguments()) {
+ RefPtr<RegisterID> index = emitLoad(newTemporary(), jsNumber(0));
+
+ LabelScopePtr scope = newLabelScope(LabelScope::Loop);
+ RefPtr<RegisterID> value = emitLoad(newTemporary(), jsUndefined());
+
+ RefPtr<Label> loopCondition = newLabel();
+ RefPtr<Label> loopStart = newLabel();
+ emitJump(loopCondition.get());
+ emitLabel(loopStart.get());
+ emitLoopHint();
+ emitGetArgumentByVal(value.get(), uncheckedRegisterForArguments(), index.get());
+ callBack(*this, value.get());
+
+ emitLabel(scope->continueTarget());
+ emitInc(index.get());
+ emitLabel(loopCondition.get());
+ RefPtr<RegisterID> length = emitGetArgumentsLength(newTemporary(), uncheckedRegisterForArguments());
+ emitJumpIfTrue(emitEqualityOp(op_less, newTemporary(), index.get(), length.get()), loopStart.get());
+ emitLabel(scope->breakTarget());
+ return;
+ }
+
+ LabelScopePtr scope = newLabelScope(LabelScope::Loop);
+ RefPtr<RegisterID> subject = newTemporary();
+ emitNode(subject.get(), subjectNode);
+ RefPtr<RegisterID> iterator = emitGetById(newTemporary(), subject.get(), propertyNames().iteratorPrivateName);
+ {
+ CallArguments args(*this, 0);
+ emitMove(args.thisRegister(), subject.get());
+ emitCall(iterator.get(), iterator.get(), NoExpectedFunction, args, node->divot(), node->divotStart(), node->divotEnd());
+ }
+ RefPtr<RegisterID> iteratorNext = emitGetById(newTemporary(), iterator.get(), propertyNames().iteratorNextPrivateName);
+ RefPtr<RegisterID> value = newTemporary();
+ emitLoad(value.get(), jsUndefined());
+
+ emitJump(scope->continueTarget());
+
+ RefPtr<Label> loopStart = newLabel();
+ emitLabel(loopStart.get());
+ emitLoopHint();
+ callBack(*this, value.get());
+ emitLabel(scope->continueTarget());
+ CallArguments nextArguments(*this, 0, 1);
+ emitMove(nextArguments.thisRegister(), iterator.get());
+ emitMove(nextArguments.argumentRegister(0), value.get());
+ emitCall(value.get(), iteratorNext.get(), NoExpectedFunction, nextArguments, node->divot(), node->divotStart(), node->divotEnd());
+ RefPtr<RegisterID> result = newTemporary();
+ emitJumpIfFalse(emitEqualityOp(op_stricteq, result.get(), value.get(), emitLoad(0, JSValue(vm()->iterationTerminator.get()))), loopStart.get());
+ emitLabel(scope->breakTarget());
+}
+
} // namespace JSC