/*
- * Copyright (C) 2008, 2009 Apple Inc. All rights reserved.
+ * Copyright (C) 2008, 2009, 2012, 2013, 2014 Apple Inc. All rights reserved.
* Copyright (C) 2008 Cameron Zwarich <cwzwarich@uwaterloo.ca>
+ * Copyright (C) 2012 Igalia, S.L.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
#include "config.h"
#include "BytecodeGenerator.h"
-#include "BatchedTransitionOptimizer.h"
-#include "JSFunction.h"
#include "Interpreter.h"
-#include "ScopeChain.h"
-#include "UString.h"
+#include "JSActivation.h"
+#include "JSFunction.h"
+#include "JSNameScope.h"
+#include "LowLevelInterpreter.h"
+#include "JSCInlines.h"
+#include "Options.h"
+#include "StackAlignment.h"
+#include "StrongInlines.h"
+#include "UnlinkedCodeBlock.h"
+#include "UnlinkedInstructionStream.h"
+#include <wtf/StdLibExtras.h>
+#include <wtf/text/WTFString.h>
using namespace std;
namespace JSC {
-/*
- The layout of a register frame looks like this:
-
- For
-
- function f(x, y) {
- var v1;
- function g() { }
- var v2;
- return (x) * (y);
- }
-
- assuming (x) and (y) generated temporaries t1 and t2, you would have
-
- ------------------------------------
- | x | y | g | v2 | v1 | t1 | t2 | <-- value held
- ------------------------------------
- | -5 | -4 | -3 | -2 | -1 | +0 | +1 | <-- register index
- ------------------------------------
- | params->|<-locals | temps->
-
- Because temporary registers are allocated in a stack-like fashion, we
- can reclaim them with a simple popping algorithm. The same goes for labels.
- (We never reclaim parameter or local registers, because parameters and
- locals are DontDelete.)
-
- The register layout before a function call looks like this:
-
- For
-
- function f(x, y)
- {
- }
-
- f(1);
-
- > <------------------------------
- < > reserved: call frame | 1 | <-- value held
- > >snip< <------------------------------
- < > +0 | +1 | +2 | +3 | +4 | +5 | <-- register index
- > <------------------------------
- | params->|<-locals | temps->
-
- The call instruction fills in the "call frame" registers. It also pads
- missing arguments at the end of the call:
-
- > <-----------------------------------
- < > reserved: call frame | 1 | ? | <-- value held ("?" stands for "undefined")
- > >snip< <-----------------------------------
- < > +0 | +1 | +2 | +3 | +4 | +5 | +6 | <-- register index
- > <-----------------------------------
- | params->|<-locals | temps->
-
- After filling in missing arguments, the call instruction sets up the new
- stack frame to overlap the end of the old stack frame:
-
- |----------------------------------> <
- | reserved: call frame | 1 | ? < > <-- value held ("?" stands for "undefined")
- |----------------------------------> >snip< <
- | -7 | -6 | -5 | -4 | -3 | -2 | -1 < > <-- register index
- |----------------------------------> <
- | | params->|<-locals | temps->
-
- That way, arguments are "copied" into the callee's stack frame for free.
-
- If the caller supplies too many arguments, this trick doesn't work. The
- extra arguments protrude into space reserved for locals and temporaries.
- In that case, the call instruction makes a real copy of the call frame header,
- along with just the arguments expected by the callee, leaving the original
- call frame header and arguments behind. (The call instruction can't just discard
- extra arguments, because the "arguments" object may access them later.)
- This copying strategy ensures that all named values will be at the indices
- expected by the callee.
-*/
-
-#ifndef NDEBUG
-static bool s_dumpsGeneratedCode = false;
-#endif
-
-void BytecodeGenerator::setDumpsGeneratedCode(bool dumpsGeneratedCode)
-{
-#ifndef NDEBUG
- s_dumpsGeneratedCode = dumpsGeneratedCode;
-#else
- UNUSED_PARAM(dumpsGeneratedCode);
-#endif
-}
-
-bool BytecodeGenerator::dumpsGeneratedCode()
+void Label::setLocation(unsigned location)
{
-#ifndef NDEBUG
- return s_dumpsGeneratedCode;
-#else
- return false;
-#endif
+ m_location = location;
+
+ unsigned size = m_unresolvedJumps.size();
+ for (unsigned i = 0; i < size; ++i)
+ m_generator->m_instructions[m_unresolvedJumps[i].second].u.operand = m_location - m_unresolvedJumps[i].first;
}
-JSObject* BytecodeGenerator::generate()
+ParserError BytecodeGenerator::generate()
{
- m_codeBlock->setThisRegister(m_thisRegister.index());
+ SamplingRegion samplingRegion("Bytecode Generation");
+
+ m_codeBlock->setThisRegister(m_thisRegister.virtualRegister());
+ for (size_t i = 0; i < m_deconstructedParameters.size(); i++) {
+ auto& entry = m_deconstructedParameters[i];
+ entry.second->bindValue(*this, entry.first.get());
+ }
m_scopeNode->emitBytecode(*this);
-#ifndef NDEBUG
- m_codeBlock->setInstructionCount(m_codeBlock->instructions().size());
-
- if (s_dumpsGeneratedCode)
- m_codeBlock->dump(m_scopeChain->globalObject->globalExec());
-#endif
+ m_staticPropertyAnalyzer.kill();
- if ((m_codeType == FunctionCode && !m_codeBlock->needsFullScopeChain() && !m_codeBlock->usesArguments()) || m_codeType == EvalCode)
- symbolTable().clear();
+ for (unsigned i = 0; i < m_tryRanges.size(); ++i) {
+ TryRange& range = m_tryRanges[i];
+ int start = range.start->bind();
+ int end = range.end->bind();
+
+ // This will happen for empty try blocks and for some cases of finally blocks:
+ //
+ // try {
+ // try {
+ // } finally {
+ // return 42;
+ // // *HERE*
+ // }
+ // } finally {
+ // print("things");
+ // }
+ //
+ // The return will pop scopes to execute the outer finally block. But this includes
+ // popping the try context for the inner try. The try context is live in the fall-through
+ // part of the finally block not because we will emit a handler that overlaps the finally,
+ // but because we haven't yet had a chance to plant the catch target. Then when we finish
+ // emitting code for the outer finally block, we repush the try contex, this time with a
+ // new start index. But that means that the start index for the try range corresponding
+ // to the inner-finally-following-the-return (marked as "*HERE*" above) will be greater
+ // than the end index of the try block. This is harmless since end < start handlers will
+ // never get matched in our logic, but we do the runtime a favor and choose to not emit
+ // such handlers at all.
+ if (end <= start)
+ continue;
+
+ ASSERT(range.tryData->targetScopeDepth != UINT_MAX);
+ UnlinkedHandlerInfo info = {
+ static_cast<uint32_t>(start), static_cast<uint32_t>(end),
+ static_cast<uint32_t>(range.tryData->target->bind()),
+ range.tryData->targetScopeDepth
+ };
+ m_codeBlock->addExceptionHandler(info);
+ }
+
+ m_codeBlock->setInstructions(std::make_unique<UnlinkedInstructionStream>(m_instructions));
m_codeBlock->shrinkToFit();
+ if (m_codeBlock->symbolTable())
+ m_codeBlock->setSymbolTable(m_codeBlock->symbolTable()->cloneCapturedNames(*m_codeBlock->vm()));
+
if (m_expressionTooDeep)
- return createOutOfMemoryError(m_scopeChain->globalObject.get());
- return 0;
+ return ParserError(ParserError::OutOfMemory);
+ return ParserError(ParserError::ErrorNone);
}
-bool BytecodeGenerator::addVar(const Identifier& ident, bool isConstant, RegisterID*& r0)
+bool BytecodeGenerator::addVar(
+ const Identifier& ident, ConstantMode constantMode, WatchMode watchMode, RegisterID*& r0)
{
- int index = m_calleeRegisters.size();
- SymbolTableEntry newEntry(index, isConstant ? ReadOnly : 0);
- pair<SymbolTable::iterator, bool> result = symbolTable().add(ident.impl(), newEntry);
+ ASSERT(static_cast<size_t>(m_codeBlock->m_numVars) == m_calleeRegisters.size());
+
+ ConcurrentJITLocker locker(symbolTable().m_lock);
+ int index = virtualRegisterForLocal(m_calleeRegisters.size()).offset();
+ SymbolTableEntry newEntry(index, constantMode == IsConstant ? ReadOnly : 0);
+ SymbolTable::Map::AddResult result = symbolTable().add(locker, ident.impl(), newEntry);
- if (!result.second) {
- r0 = ®isterFor(result.first->second.getIndex());
+ if (!result.isNewEntry) {
+ r0 = ®isterFor(result.iterator->value.getIndex());
return false;
}
-
+
+ if (watchMode == IsWatchable) {
+ while (m_watchableVariables.size() < static_cast<size_t>(m_codeBlock->m_numVars))
+ m_watchableVariables.append(Identifier());
+ m_watchableVariables.append(ident);
+ }
+
r0 = addVar();
+
+ ASSERT(watchMode == NotWatchable || static_cast<size_t>(m_codeBlock->m_numVars) == m_watchableVariables.size());
+
return true;
}
-bool BytecodeGenerator::addGlobalVar(const Identifier& ident, bool isConstant, RegisterID*& r0)
-{
- int index = m_nextGlobalIndex;
- SymbolTableEntry newEntry(index, isConstant ? ReadOnly : 0);
- pair<SymbolTable::iterator, bool> result = symbolTable().add(ident.impl(), newEntry);
-
- if (!result.second)
- index = result.first->second.getIndex();
- else {
- --m_nextGlobalIndex;
- m_globals.append(index + m_globalVarStorageOffset);
- }
-
- r0 = ®isterFor(index);
- return result.second;
-}
-
void BytecodeGenerator::preserveLastVar()
{
if ((m_firstConstantIndex = m_calleeRegisters.size()) != 0)
m_lastVar = &m_calleeRegisters.last();
}
-BytecodeGenerator::BytecodeGenerator(ProgramNode* programNode, ScopeChainNode* scopeChain, SymbolTable* symbolTable, ProgramCodeBlock* codeBlock)
- : m_shouldEmitDebugHooks(scopeChain->globalObject->debugger())
- , m_shouldEmitProfileHooks(scopeChain->globalObject->supportsProfiling())
- , m_shouldEmitRichSourceInfo(scopeChain->globalObject->supportsRichSourceInfo())
- , m_scopeChain(*scopeChain->globalData, scopeChain)
- , m_symbolTable(symbolTable)
+BytecodeGenerator::BytecodeGenerator(VM& vm, ProgramNode* programNode, UnlinkedProgramCodeBlock* codeBlock, DebuggerMode debuggerMode, ProfilerMode profilerMode)
+ : m_shouldEmitDebugHooks(Options::forceDebuggerBytecodeGeneration() || debuggerMode == DebuggerOn)
+ , m_shouldEmitProfileHooks(Options::forceProfilerBytecodeGeneration() || profilerMode == ProfilerOn)
+ , m_symbolTable(0)
, m_scopeNode(programNode)
- , m_codeBlock(codeBlock)
- , m_thisRegister(RegisterFile::ProgramCodeThisRegister)
+ , m_codeBlock(vm, codeBlock)
+ , m_thisRegister(CallFrame::thisArgumentOffset())
+ , m_activationRegister(0)
+ , m_emptyValueRegister(0)
+ , m_globalObjectRegister(0)
, m_finallyDepth(0)
- , m_dynamicScopeDepth(0)
- , m_baseScopeDepth(0)
+ , m_localScopeDepth(0)
, m_codeType(GlobalCode)
- , m_nextGlobalIndex(-1)
, m_nextConstantOffset(0)
, m_globalConstantIndex(0)
- , m_hasCreatedActivation(true)
, m_firstLazyFunction(0)
, m_lastLazyFunction(0)
- , m_globalData(scopeChain->globalData)
+ , m_staticPropertyAnalyzer(&m_instructions)
+ , m_vm(&vm)
, m_lastOpcodeID(op_end)
#ifndef NDEBUG
, m_lastOpcodePosition(0)
#endif
- , m_stack(m_globalData->stack())
, m_usesExceptions(false)
, m_expressionTooDeep(false)
+ , m_isBuiltinFunction(false)
{
- if (m_shouldEmitDebugHooks)
- m_codeBlock->setNeedsFullScopeChain(true);
+ m_codeBlock->setNumParameters(1); // Allocate space for "this"
emitOpcode(op_enter);
- codeBlock->setGlobalData(m_globalData);
-
- // FIXME: Move code that modifies the global object to Interpreter::execute.
-
- m_codeBlock->m_numParameters = 1; // Allocate space for "this"
-
- JSGlobalObject* globalObject = scopeChain->globalObject.get();
- ExecState* exec = globalObject->globalExec();
- RegisterFile* registerFile = &exec->globalData().interpreter->registerFile();
-
- // Shift register indexes in generated code to elide registers allocated by intermediate stack frames.
- m_globalVarStorageOffset = -RegisterFile::CallFrameHeaderSize - m_codeBlock->m_numParameters - registerFile->size();
-
- // Add previously defined symbols to bookkeeping.
- m_globals.grow(symbolTable->size());
- SymbolTable::iterator end = symbolTable->end();
- for (SymbolTable::iterator it = symbolTable->begin(); it != end; ++it)
- registerFor(it->second.getIndex()).setIndex(it->second.getIndex() + m_globalVarStorageOffset);
-
- BatchedTransitionOptimizer optimizer(*m_globalData, globalObject);
const VarStack& varStack = programNode->varStack();
const FunctionStack& functionStack = programNode->functionStack();
- bool canOptimizeNewGlobals = symbolTable->size() + functionStack.size() + varStack.size() < registerFile->maxGlobals();
- if (canOptimizeNewGlobals) {
- // Shift new symbols so they get stored prior to existing symbols.
- m_nextGlobalIndex -= symbolTable->size();
- HashSet<StringImpl*, IdentifierRepHash> newGlobals;
- Vector<std::pair<int, bool>, 16> functionInfo(functionStack.size());
- for (size_t i = 0; i < functionStack.size(); ++i) {
- FunctionBodyNode* function = functionStack[i];
- globalObject->removeDirect(*m_globalData, function->ident()); // Make sure our new function is not shadowed by an old property.
- SymbolTableEntry entry = symbolTable->inlineGet(function->ident().impl());
-
- if (entry.isNull())
- newGlobals.add(function->ident().impl());
- functionInfo[i] = make_pair(entry.getIndex(), entry.isReadOnly());
- }
-
- Vector<bool, 16> shouldCreateVar(varStack.size());
- for (size_t i = 0; i < varStack.size(); ++i) {
- if (newGlobals.contains(varStack[i].first->impl()) || globalObject->hasProperty(exec, *varStack[i].first)) {
- shouldCreateVar[i] = false;
- continue;
- }
- shouldCreateVar[i] = true;
- newGlobals.add(varStack[i].first->impl());
- }
-
- int expectedSize = symbolTable->size() + newGlobals.size();
- globalObject->resizeRegisters(symbolTable->size(), expectedSize);
-
- for (size_t i = 0; i < functionStack.size(); ++i) {
- FunctionBodyNode* function = functionStack[i];
- if (functionInfo[i].second)
- continue;
- RegisterID* dst = addGlobalVar(function->ident(), false);
- JSValue value = new (exec) JSFunction(exec, makeFunction(exec, function), scopeChain);
- globalObject->registerAt(dst->index() - m_globalVarStorageOffset).set(*m_globalData, globalObject, value);
- }
+ for (size_t i = 0; i < functionStack.size(); ++i) {
+ FunctionBodyNode* function = functionStack[i];
+ UnlinkedFunctionExecutable* unlinkedFunction = makeFunction(function);
+ codeBlock->addFunctionDeclaration(*m_vm, function->ident(), unlinkedFunction);
+ }
- for (size_t i = 0; i < varStack.size(); ++i) {
- if (!shouldCreateVar[i])
- continue;
- addGlobalVar(*varStack[i].first, varStack[i].second & DeclarationStacks::IsConstant);
- }
- if (symbolTable->size() != expectedSize)
- CRASH();
+ for (size_t i = 0; i < varStack.size(); ++i)
+ codeBlock->addVariableDeclaration(varStack[i].first, !!(varStack[i].second & DeclarationStacks::IsConstant));
- preserveLastVar();
- } else {
- for (size_t i = 0; i < functionStack.size(); ++i) {
- FunctionBodyNode* function = functionStack[i];
- globalObject->putWithAttributes(exec, function->ident(), new (exec) JSFunction(exec, makeFunction(exec, function), scopeChain), DontDelete);
- }
- for (size_t i = 0; i < varStack.size(); ++i) {
- if (globalObject->symbolTableHasProperty(*varStack[i].first) || globalObject->hasProperty(exec, *varStack[i].first))
- continue;
- int attributes = DontDelete;
- if (varStack[i].second & DeclarationStacks::IsConstant)
- attributes |= ReadOnly;
- globalObject->putWithAttributes(exec, *varStack[i].first, jsUndefined(), attributes);
- }
-
- preserveLastVar();
- }
- codeBlock->m_numCapturedVars = codeBlock->m_numVars;
}
-BytecodeGenerator::BytecodeGenerator(FunctionBodyNode* functionBody, ScopeChainNode* scopeChain, SymbolTable* symbolTable, CodeBlock* codeBlock)
- : m_shouldEmitDebugHooks(scopeChain->globalObject->debugger())
- , m_shouldEmitProfileHooks(scopeChain->globalObject->supportsProfiling())
- , m_shouldEmitRichSourceInfo(scopeChain->globalObject->supportsRichSourceInfo())
- , m_scopeChain(*scopeChain->globalData, scopeChain)
- , m_symbolTable(symbolTable)
+BytecodeGenerator::BytecodeGenerator(VM& vm, FunctionBodyNode* functionBody, UnlinkedFunctionCodeBlock* codeBlock, DebuggerMode debuggerMode, ProfilerMode profilerMode)
+ : m_shouldEmitDebugHooks(Options::forceDebuggerBytecodeGeneration() || debuggerMode == DebuggerOn)
+ , m_shouldEmitProfileHooks(Options::forceProfilerBytecodeGeneration() || profilerMode == ProfilerOn)
+ , m_symbolTable(codeBlock->symbolTable())
, m_scopeNode(functionBody)
- , m_codeBlock(codeBlock)
+ , m_codeBlock(vm, codeBlock)
, m_activationRegister(0)
+ , m_emptyValueRegister(0)
+ , m_globalObjectRegister(0)
, m_finallyDepth(0)
- , m_dynamicScopeDepth(0)
- , m_baseScopeDepth(0)
+ , m_localScopeDepth(0)
, m_codeType(FunctionCode)
, m_nextConstantOffset(0)
, m_globalConstantIndex(0)
- , m_hasCreatedActivation(false)
, m_firstLazyFunction(0)
, m_lastLazyFunction(0)
- , m_globalData(scopeChain->globalData)
+ , m_staticPropertyAnalyzer(&m_instructions)
+ , m_vm(&vm)
, m_lastOpcodeID(op_end)
#ifndef NDEBUG
, m_lastOpcodePosition(0)
#endif
- , m_stack(m_globalData->stack())
, m_usesExceptions(false)
, m_expressionTooDeep(false)
+ , m_isBuiltinFunction(codeBlock->isBuiltinFunction())
{
- if (m_shouldEmitDebugHooks)
- m_codeBlock->setNeedsFullScopeChain(true);
+ if (m_isBuiltinFunction)
+ m_shouldEmitDebugHooks = false;
+
+ m_symbolTable->setUsesNonStrictEval(codeBlock->usesEval() && !codeBlock->isStrictMode());
+ Vector<Identifier> boundParameterProperties;
+ FunctionParameters& parameters = *functionBody->parameters();
+ for (size_t i = 0; i < parameters.size(); i++) {
+ auto pattern = parameters.at(i);
+ if (pattern->isBindingNode())
+ continue;
+ pattern->collectBoundIdentifiers(boundParameterProperties);
+ continue;
+ }
+ m_symbolTable->setParameterCountIncludingThis(functionBody->parameters()->size() + 1);
- codeBlock->setGlobalData(m_globalData);
-
emitOpcode(op_enter);
- if (m_codeBlock->needsFullScopeChain()) {
+ if (m_codeBlock->needsFullScopeChain() || m_shouldEmitDebugHooks) {
m_activationRegister = addVar();
emitInitLazyRegister(m_activationRegister);
- m_codeBlock->setActivationRegister(m_activationRegister->index());
+ m_codeBlock->setActivationRegister(m_activationRegister->virtualRegister());
}
- // Both op_tear_off_activation and op_tear_off_arguments tear off the 'arguments'
- // object, if created.
- if (m_codeBlock->needsFullScopeChain() || functionBody->usesArguments()) {
+ m_symbolTable->setCaptureStart(virtualRegisterForLocal(m_codeBlock->m_numVars).offset());
+
+ if (functionBody->usesArguments() || codeBlock->usesEval()) { // May reify arguments object.
RegisterID* unmodifiedArgumentsRegister = addVar(); // Anonymous, so it can't be modified by user code.
- RegisterID* argumentsRegister = addVar(propertyNames().arguments, false); // Can be changed by assigning to 'arguments'.
+ RegisterID* argumentsRegister = addVar(propertyNames().arguments, IsVariable, NotWatchable); // Can be changed by assigning to 'arguments'.
// We can save a little space by hard-coding the knowledge that the two
// 'arguments' values are stored in consecutive registers, and storing
// only the index of the assignable one.
- codeBlock->setArgumentsRegister(argumentsRegister->index());
- ASSERT_UNUSED(unmodifiedArgumentsRegister, unmodifiedArgumentsRegister->index() == JSC::unmodifiedArgumentsRegister(codeBlock->argumentsRegister()));
+ codeBlock->setArgumentsRegister(argumentsRegister->virtualRegister());
+ ASSERT_UNUSED(unmodifiedArgumentsRegister, unmodifiedArgumentsRegister->virtualRegister() == JSC::unmodifiedArgumentsRegister(codeBlock->argumentsRegister()));
emitInitLazyRegister(argumentsRegister);
emitInitLazyRegister(unmodifiedArgumentsRegister);
- if (m_codeBlock->isStrictMode()) {
+ if (shouldTearOffArgumentsEagerly()) {
emitOpcode(op_create_arguments);
instructions().append(argumentsRegister->index());
}
+ }
- // The debugger currently retrieves the arguments object from an activation rather than pulling
- // it from a call frame. In the long-term it should stop doing that (<rdar://problem/6911886>),
- // but for now we force eager creation of the arguments object when debugging.
- if (m_shouldEmitDebugHooks) {
- emitOpcode(op_create_arguments);
- instructions().append(argumentsRegister->index());
+ bool shouldCaptureAllTheThings = m_shouldEmitDebugHooks || codeBlock->usesEval();
+
+ bool capturesAnyArgumentByName = false;
+ Vector<RegisterID*, 0, UnsafeVectorOverflow> capturedArguments;
+ if (functionBody->hasCapturedVariables() || shouldCaptureAllTheThings) {
+ FunctionParameters& parameters = *functionBody->parameters();
+ capturedArguments.resize(parameters.size());
+ for (size_t i = 0; i < parameters.size(); ++i) {
+ capturedArguments[i] = 0;
+ auto pattern = parameters.at(i);
+ if (!pattern->isBindingNode())
+ continue;
+ const Identifier& ident = static_cast<const BindingNode*>(pattern)->boundProperty();
+ if (!functionBody->captures(ident) && !shouldCaptureAllTheThings)
+ continue;
+ capturesAnyArgumentByName = true;
+ capturedArguments[i] = addVar();
+ }
+ }
+
+ if (capturesAnyArgumentByName && !shouldTearOffArgumentsEagerly()) {
+ size_t parameterCount = m_symbolTable->parameterCount();
+ auto slowArguments = std::make_unique<SlowArgument[]>(parameterCount);
+ for (size_t i = 0; i < parameterCount; ++i) {
+ if (!capturedArguments[i]) {
+ ASSERT(slowArguments[i].status == SlowArgument::Normal);
+ slowArguments[i].index = CallFrame::argumentOffset(i);
+ continue;
+ }
+ slowArguments[i].status = SlowArgument::Captured;
+ slowArguments[i].index = capturedArguments[i]->index();
}
+ m_symbolTable->setSlowArguments(WTF::move(slowArguments));
}
+ RegisterID* calleeRegister = resolveCallee(functionBody); // May push to the scope chain and/or add a captured var.
+
const DeclarationStacks::FunctionStack& functionStack = functionBody->functionStack();
const DeclarationStacks::VarStack& varStack = functionBody->varStack();
+ IdentifierSet test;
// Captured variables and functions go first so that activations don't have
// to step over the non-captured locals to mark them.
- m_hasCreatedActivation = false;
if (functionBody->hasCapturedVariables()) {
+ for (size_t i = 0; i < boundParameterProperties.size(); i++) {
+ const Identifier& ident = boundParameterProperties[i];
+ if (functionBody->captures(ident))
+ addVar(ident, IsVariable, IsWatchable);
+ }
for (size_t i = 0; i < functionStack.size(); ++i) {
FunctionBodyNode* function = functionStack[i];
const Identifier& ident = function->ident();
if (functionBody->captures(ident)) {
- if (!m_hasCreatedActivation) {
- m_hasCreatedActivation = true;
- emitOpcode(op_create_activation);
- instructions().append(m_activationRegister->index());
- }
m_functions.add(ident.impl());
- emitNewFunction(addVar(ident, false), function);
+ emitNewFunction(addVar(ident, IsVariable, IsWatchable), IsCaptured, function);
}
}
for (size_t i = 0; i < varStack.size(); ++i) {
- const Identifier& ident = *varStack[i].first;
+ const Identifier& ident = varStack[i].first;
if (functionBody->captures(ident))
- addVar(ident, varStack[i].second & DeclarationStacks::IsConstant);
+ addVar(ident, (varStack[i].second & DeclarationStacks::IsConstant) ? IsConstant : IsVariable, IsWatchable);
}
}
- bool canLazilyCreateFunctions = !functionBody->needsActivationForMoreThanVariables() && !m_shouldEmitDebugHooks;
- if (!canLazilyCreateFunctions && !m_hasCreatedActivation) {
- m_hasCreatedActivation = true;
- emitOpcode(op_create_activation);
- instructions().append(m_activationRegister->index());
- }
- codeBlock->m_numCapturedVars = codeBlock->m_numVars;
+ m_symbolTable->setCaptureEnd(virtualRegisterForLocal(codeBlock->m_numVars).offset());
+
+ bool canLazilyCreateFunctions = !functionBody->needsActivationForMoreThanVariables() && !m_shouldEmitDebugHooks;
m_firstLazyFunction = codeBlock->m_numVars;
for (size_t i = 0; i < functionStack.size(); ++i) {
FunctionBodyNode* function = functionStack[i];
const Identifier& ident = function->ident();
if (!functionBody->captures(ident)) {
m_functions.add(ident.impl());
- RefPtr<RegisterID> reg = addVar(ident, false);
+ RefPtr<RegisterID> reg = addVar(ident, IsVariable, NotWatchable);
// Don't lazily create functions that override the name 'arguments'
// as this would complicate lazy instantiation of actual arguments.
if (!canLazilyCreateFunctions || ident == propertyNames().arguments)
- emitNewFunction(reg.get(), function);
+ emitNewFunction(reg.get(), NotCaptured, function);
else {
emitInitLazyRegister(reg.get());
- m_lazyFunctions.set(reg->index(), function);
+ m_lazyFunctions.set(reg->virtualRegister().toLocal(), function);
}
}
}
m_lastLazyFunction = canLazilyCreateFunctions ? codeBlock->m_numVars : m_firstLazyFunction;
+ for (size_t i = 0; i < boundParameterProperties.size(); i++) {
+ const Identifier& ident = boundParameterProperties[i];
+ if (!functionBody->captures(ident))
+ addVar(ident, IsVariable, IsWatchable);
+ }
for (size_t i = 0; i < varStack.size(); ++i) {
- const Identifier& ident = *varStack[i].first;
+ const Identifier& ident = varStack[i].first;
if (!functionBody->captures(ident))
- addVar(ident, varStack[i].second & DeclarationStacks::IsConstant);
+ addVar(ident, (varStack[i].second & DeclarationStacks::IsConstant) ? IsConstant : IsVariable, NotWatchable);
}
-
- if (m_shouldEmitDebugHooks)
- codeBlock->m_numCapturedVars = codeBlock->m_numVars;
- FunctionParameters& parameters = *functionBody->parameters();
- size_t parameterCount = parameters.size();
- int nextParameterIndex = -RegisterFile::CallFrameHeaderSize - parameterCount - 1;
- m_parameters.grow(1 + parameterCount); // reserve space for "this"
+ if (shouldCaptureAllTheThings)
+ m_symbolTable->setCaptureEnd(virtualRegisterForLocal(codeBlock->m_numVars).offset());
- // Add "this" as a parameter
- m_thisRegister.setIndex(nextParameterIndex);
- ++m_codeBlock->m_numParameters;
+ if (m_symbolTable->captureCount())
+ emitOpcode(op_touch_entry);
- for (size_t i = 0; i < parameterCount; ++i)
- addParameter(parameters[i], ++nextParameterIndex);
+ m_parameters.grow(parameters.size() + 1); // reserve space for "this"
+ // Add "this" as a parameter
+ int nextParameterIndex = CallFrame::thisArgumentOffset();
+ m_thisRegister.setIndex(nextParameterIndex++);
+ m_codeBlock->addParameter();
+ for (size_t i = 0; i < parameters.size(); ++i, ++nextParameterIndex) {
+ int index = nextParameterIndex;
+ auto pattern = parameters.at(i);
+ if (!pattern->isBindingNode()) {
+ m_codeBlock->addParameter();
+ RegisterID& parameter = registerFor(index);
+ parameter.setIndex(index);
+ m_deconstructedParameters.append(std::make_pair(¶meter, pattern));
+ continue;
+ }
+ auto simpleParameter = static_cast<const BindingNode*>(pattern);
+ if (capturedArguments.size() && capturedArguments[i]) {
+ ASSERT((functionBody->hasCapturedVariables() && functionBody->captures(simpleParameter->boundProperty())) || shouldCaptureAllTheThings);
+ index = capturedArguments[i]->index();
+ RegisterID original(nextParameterIndex);
+ emitMove(capturedArguments[i], &original);
+ }
+ addParameter(simpleParameter->boundProperty(), index);
+ }
preserveLastVar();
- if (isConstructor()) {
- RefPtr<RegisterID> func = newTemporary();
- RefPtr<RegisterID> funcProto = newTemporary();
-
- emitOpcode(op_get_callee);
- instructions().append(func->index());
- // Load prototype.
- emitGetById(funcProto.get(), func.get(), globalData()->propertyNames->prototype);
+ // We declare the callee's name last because it should lose to a var, function, and/or parameter declaration.
+ addCallee(functionBody, calleeRegister);
- emitOpcode(op_create_this);
- instructions().append(m_thisRegister.index());
- instructions().append(funcProto->index());
- } else if (functionBody->usesThis() || m_shouldEmitDebugHooks) {
- if (codeBlock->isStrictMode())
- emitOpcode(op_convert_this_strict);
- else
- emitOpcode(op_convert_this);
- instructions().append(m_thisRegister.index());
+ if (isConstructor()) {
+ emitCreateThis(&m_thisRegister);
+ } else if (functionBody->usesThis() || codeBlock->usesEval()) {
+ m_codeBlock->addPropertyAccessInstruction(instructions().size());
+ emitOpcode(op_to_this);
+ instructions().append(kill(&m_thisRegister));
+ instructions().append(0);
}
}
-BytecodeGenerator::BytecodeGenerator(EvalNode* evalNode, ScopeChainNode* scopeChain, SymbolTable* symbolTable, EvalCodeBlock* codeBlock)
- : m_shouldEmitDebugHooks(scopeChain->globalObject->debugger())
- , m_shouldEmitProfileHooks(scopeChain->globalObject->supportsProfiling())
- , m_shouldEmitRichSourceInfo(scopeChain->globalObject->supportsRichSourceInfo())
- , m_scopeChain(*scopeChain->globalData, scopeChain)
- , m_symbolTable(symbolTable)
+BytecodeGenerator::BytecodeGenerator(VM& vm, EvalNode* evalNode, UnlinkedEvalCodeBlock* codeBlock, DebuggerMode debuggerMode, ProfilerMode profilerMode)
+ : m_shouldEmitDebugHooks(Options::forceDebuggerBytecodeGeneration() || debuggerMode == DebuggerOn)
+ , m_shouldEmitProfileHooks(Options::forceProfilerBytecodeGeneration() || profilerMode == ProfilerOn)
+ , m_symbolTable(codeBlock->symbolTable())
, m_scopeNode(evalNode)
- , m_codeBlock(codeBlock)
- , m_thisRegister(RegisterFile::ProgramCodeThisRegister)
+ , m_codeBlock(vm, codeBlock)
+ , m_thisRegister(CallFrame::thisArgumentOffset())
+ , m_activationRegister(0)
+ , m_emptyValueRegister(0)
+ , m_globalObjectRegister(0)
, m_finallyDepth(0)
- , m_dynamicScopeDepth(0)
- , m_baseScopeDepth(codeBlock->baseScopeDepth())
+ , m_localScopeDepth(0)
, m_codeType(EvalCode)
, m_nextConstantOffset(0)
, m_globalConstantIndex(0)
- , m_hasCreatedActivation(true)
, m_firstLazyFunction(0)
, m_lastLazyFunction(0)
- , m_globalData(scopeChain->globalData)
+ , m_staticPropertyAnalyzer(&m_instructions)
+ , m_vm(&vm)
, m_lastOpcodeID(op_end)
#ifndef NDEBUG
, m_lastOpcodePosition(0)
#endif
- , m_stack(m_globalData->stack())
, m_usesExceptions(false)
, m_expressionTooDeep(false)
+ , m_isBuiltinFunction(false)
{
- if (m_shouldEmitDebugHooks || m_baseScopeDepth)
- m_codeBlock->setNeedsFullScopeChain(true);
+ m_symbolTable->setUsesNonStrictEval(codeBlock->usesEval() && !codeBlock->isStrictMode());
+ m_codeBlock->setNumParameters(1);
emitOpcode(op_enter);
- codeBlock->setGlobalData(m_globalData);
- m_codeBlock->m_numParameters = 1; // Allocate space for "this"
const DeclarationStacks::FunctionStack& functionStack = evalNode->functionStack();
for (size_t i = 0; i < functionStack.size(); ++i)
- m_codeBlock->addFunctionDecl(makeFunction(m_globalData, functionStack[i]));
+ m_codeBlock->addFunctionDecl(makeFunction(functionStack[i]));
const DeclarationStacks::VarStack& varStack = evalNode->varStack();
unsigned numVariables = varStack.size();
- Vector<Identifier> variables;
+ Vector<Identifier, 0, UnsafeVectorOverflow> variables;
variables.reserveCapacity(numVariables);
- for (size_t i = 0; i < numVariables; ++i)
- variables.append(*varStack[i].first);
+ for (size_t i = 0; i < numVariables; ++i) {
+ ASSERT(varStack[i].first.impl()->isAtomic());
+ variables.append(varStack[i].first);
+ }
codeBlock->adoptVariables(variables);
- codeBlock->m_numCapturedVars = codeBlock->m_numVars;
preserveLastVar();
}
+BytecodeGenerator::~BytecodeGenerator()
+{
+}
+
RegisterID* BytecodeGenerator::emitInitLazyRegister(RegisterID* reg)
{
emitOpcode(op_init_lazy_reg);
instructions().append(reg->index());
+ ASSERT(!hasWatchableVariable(reg->index()));
return reg;
}
+RegisterID* BytecodeGenerator::resolveCallee(FunctionBodyNode* functionBodyNode)
+{
+ if (!functionNameIsInScope(functionBodyNode->ident(), functionBodyNode->functionMode()))
+ return 0;
+
+ if (functionNameScopeIsDynamic(m_codeBlock->usesEval(), m_codeBlock->isStrictMode()))
+ return 0;
+
+ m_calleeRegister.setIndex(JSStack::Callee);
+ if (functionBodyNode->captures(functionBodyNode->ident()))
+ return emitMove(addVar(), IsCaptured, &m_calleeRegister);
+
+ return &m_calleeRegister;
+}
+
+void BytecodeGenerator::addCallee(FunctionBodyNode* functionBodyNode, RegisterID* calleeRegister)
+{
+ if (!calleeRegister)
+ return;
+
+ symbolTable().add(functionBodyNode->ident().impl(), SymbolTableEntry(calleeRegister->index(), ReadOnly));
+}
+
void BytecodeGenerator::addParameter(const Identifier& ident, int parameterIndex)
{
// Parameters overwrite var declarations, but not function declarations.
// To maintain the calling convention, we have to allocate unique space for
// each parameter, even if the parameter doesn't make it into the symbol table.
- ++m_codeBlock->m_numParameters;
-}
-
-RegisterID* BytecodeGenerator::registerFor(const Identifier& ident)
-{
- if (ident == propertyNames().thisIdentifier)
- return &m_thisRegister;
-
- if (!shouldOptimizeLocals())
- return 0;
-
- SymbolTableEntry entry = symbolTable().get(ident.impl());
- if (entry.isNull())
- return 0;
-
- if (ident == propertyNames().arguments)
- createArgumentsIfNecessary();
-
- return createLazyRegisterIfNecessary(®isterFor(entry.getIndex()));
+ m_codeBlock->addParameter();
}
bool BytecodeGenerator::willResolveToArguments(const Identifier& ident)
SymbolTableEntry entry = symbolTable().get(ident.impl());
if (entry.isNull())
return false;
-
+
if (m_codeBlock->usesArguments() && m_codeType == FunctionCode)
return true;
RegisterID* BytecodeGenerator::createLazyRegisterIfNecessary(RegisterID* reg)
{
- if (m_lastLazyFunction <= reg->index() || reg->index() < m_firstLazyFunction)
+ if (!reg->virtualRegister().isLocal())
return reg;
- emitLazyNewFunction(reg, m_lazyFunctions.get(reg->index()));
- return reg;
-}
-RegisterID* BytecodeGenerator::constRegisterFor(const Identifier& ident)
-{
- if (m_codeType == EvalCode)
- return 0;
-
- SymbolTableEntry entry = symbolTable().get(ident.impl());
- if (entry.isNull())
- return 0;
+ int localVariableNumber = reg->virtualRegister().toLocal();
- return createLazyRegisterIfNecessary(®isterFor(entry.getIndex()));
-}
-
-bool BytecodeGenerator::isLocal(const Identifier& ident)
-{
- if (ident == propertyNames().thisIdentifier)
- return true;
-
- return shouldOptimizeLocals() && symbolTable().contains(ident.impl());
-}
-
-bool BytecodeGenerator::isLocalConstant(const Identifier& ident)
-{
- return symbolTable().get(ident.impl()).isReadOnly();
+ if (m_lastLazyFunction <= localVariableNumber || localVariableNumber < m_firstLazyFunction)
+ return reg;
+ emitLazyNewFunction(reg, m_lazyFunctions.get(localVariableNumber));
+ return reg;
}
RegisterID* BytecodeGenerator::newRegister()
{
- m_calleeRegisters.append(m_calleeRegisters.size());
- m_codeBlock->m_numCalleeRegisters = max<int>(m_codeBlock->m_numCalleeRegisters, m_calleeRegisters.size());
+ m_calleeRegisters.append(virtualRegisterForLocal(m_calleeRegisters.size()));
+ int numCalleeRegisters = max<int>(m_codeBlock->m_numCalleeRegisters, m_calleeRegisters.size());
+ numCalleeRegisters = WTF::roundUpToMultipleOf(stackAlignmentRegisters(), numCalleeRegisters);
+ m_codeBlock->m_numCalleeRegisters = numCalleeRegisters;
return &m_calleeRegisters.last();
}
return result;
}
-RegisterID* BytecodeGenerator::highestUsedRegister()
-{
- size_t count = m_codeBlock->m_numCalleeRegisters;
- while (m_calleeRegisters.size() < count)
- newRegister();
- return &m_calleeRegisters.last();
-}
-
-PassRefPtr<LabelScope> BytecodeGenerator::newLabelScope(LabelScope::Type type, const Identifier* name)
+LabelScopePtr BytecodeGenerator::newLabelScope(LabelScope::Type type, const Identifier* name)
{
// Reclaim free label scopes.
while (m_labelScopes.size() && !m_labelScopes.last().refCount())
// Allocate new label scope.
LabelScope scope(type, name, scopeDepth(), newLabel(), type == LabelScope::Loop ? newLabel() : PassRefPtr<Label>()); // Only loops have continue targets.
m_labelScopes.append(scope);
- return &m_labelScopes.last();
+ return LabelScopePtr(m_labelScopes, m_labelScopes.size() - 1);
}
PassRefPtr<Label> BytecodeGenerator::newLabel()
m_labels.removeLast();
// Allocate new label ID.
- m_labels.append(m_codeBlock);
+ m_labels.append(this);
return &m_labels.last();
}
ASSERT(opcodePosition - m_lastOpcodePosition == opcodeLength(m_lastOpcodeID) || m_lastOpcodeID == op_end);
m_lastOpcodePosition = opcodePosition;
#endif
- instructions().append(globalData()->interpreter->getOpcode(opcodeID));
+ instructions().append(opcodeID);
m_lastOpcodeID = opcodeID;
}
+UnlinkedArrayProfile BytecodeGenerator::newArrayProfile()
+{
+ return m_codeBlock->addArrayProfile();
+}
+
+UnlinkedArrayAllocationProfile BytecodeGenerator::newArrayAllocationProfile()
+{
+ return m_codeBlock->addArrayAllocationProfile();
+}
+
+UnlinkedObjectAllocationProfile BytecodeGenerator::newObjectAllocationProfile()
+{
+ return m_codeBlock->addObjectAllocationProfile();
+}
+
+UnlinkedValueProfile BytecodeGenerator::emitProfiledOpcode(OpcodeID opcodeID)
+{
+ UnlinkedValueProfile result = m_codeBlock->addValueProfile();
+ emitOpcode(opcodeID);
+ return result;
+}
+
+void BytecodeGenerator::emitLoopHint()
+{
+ emitOpcode(op_loop_hint);
+}
+
void BytecodeGenerator::retrieveLastBinaryOp(int& dstIndex, int& src1Index, int& src2Index)
{
ASSERT(instructions().size() >= 4);
PassRefPtr<Label> BytecodeGenerator::emitJump(Label* target)
{
size_t begin = instructions().size();
- emitOpcode(target->isForward() ? op_jmp : op_loop);
+ emitOpcode(op_jmp);
instructions().append(target->bind(begin, instructions().size()));
return target;
}
rewindBinaryOp();
size_t begin = instructions().size();
- emitOpcode(target->isForward() ? op_jless : op_loop_if_less);
+ emitOpcode(op_jless);
instructions().append(src1Index);
instructions().append(src2Index);
instructions().append(target->bind(begin, instructions().size()));
rewindBinaryOp();
size_t begin = instructions().size();
- emitOpcode(target->isForward() ? op_jlesseq : op_loop_if_lesseq);
+ emitOpcode(op_jlesseq);
+ instructions().append(src1Index);
+ instructions().append(src2Index);
+ instructions().append(target->bind(begin, instructions().size()));
+ return target;
+ }
+ } else if (m_lastOpcodeID == op_greater) {
+ int dstIndex;
+ int src1Index;
+ int src2Index;
+
+ retrieveLastBinaryOp(dstIndex, src1Index, src2Index);
+
+ if (cond->index() == dstIndex && cond->isTemporary() && !cond->refCount()) {
+ rewindBinaryOp();
+
+ size_t begin = instructions().size();
+ emitOpcode(op_jgreater);
+ instructions().append(src1Index);
+ instructions().append(src2Index);
+ instructions().append(target->bind(begin, instructions().size()));
+ return target;
+ }
+ } else if (m_lastOpcodeID == op_greatereq) {
+ int dstIndex;
+ int src1Index;
+ int src2Index;
+
+ retrieveLastBinaryOp(dstIndex, src1Index, src2Index);
+
+ if (cond->index() == dstIndex && cond->isTemporary() && !cond->refCount()) {
+ rewindBinaryOp();
+
+ size_t begin = instructions().size();
+ emitOpcode(op_jgreatereq);
instructions().append(src1Index);
instructions().append(src2Index);
instructions().append(target->bind(begin, instructions().size()));
size_t begin = instructions().size();
- emitOpcode(target->isForward() ? op_jtrue : op_loop_if_true);
+ emitOpcode(op_jtrue);
instructions().append(cond->index());
instructions().append(target->bind(begin, instructions().size()));
return target;
instructions().append(target->bind(begin, instructions().size()));
return target;
}
+ } else if (m_lastOpcodeID == op_greater && target->isForward()) {
+ int dstIndex;
+ int src1Index;
+ int src2Index;
+
+ retrieveLastBinaryOp(dstIndex, src1Index, src2Index);
+
+ if (cond->index() == dstIndex && cond->isTemporary() && !cond->refCount()) {
+ rewindBinaryOp();
+
+ size_t begin = instructions().size();
+ emitOpcode(op_jngreater);
+ instructions().append(src1Index);
+ instructions().append(src2Index);
+ instructions().append(target->bind(begin, instructions().size()));
+ return target;
+ }
+ } else if (m_lastOpcodeID == op_greatereq && target->isForward()) {
+ int dstIndex;
+ int src1Index;
+ int src2Index;
+
+ retrieveLastBinaryOp(dstIndex, src1Index, src2Index);
+
+ if (cond->index() == dstIndex && cond->isTemporary() && !cond->refCount()) {
+ rewindBinaryOp();
+
+ size_t begin = instructions().size();
+ emitOpcode(op_jngreatereq);
+ instructions().append(src1Index);
+ instructions().append(src2Index);
+ instructions().append(target->bind(begin, instructions().size()));
+ return target;
+ }
} else if (m_lastOpcodeID == op_not) {
int dstIndex;
int srcIndex;
rewindUnaryOp();
size_t begin = instructions().size();
- emitOpcode(target->isForward() ? op_jtrue : op_loop_if_true);
+ emitOpcode(op_jtrue);
instructions().append(srcIndex);
instructions().append(target->bind(begin, instructions().size()));
return target;
}
size_t begin = instructions().size();
- emitOpcode(target->isForward() ? op_jfalse : op_loop_if_false);
+ emitOpcode(op_jfalse);
instructions().append(cond->index());
instructions().append(target->bind(begin, instructions().size()));
return target;
emitOpcode(op_jneq_ptr);
instructions().append(cond->index());
- instructions().append(Instruction(*m_globalData, m_codeBlock->ownerExecutable(), m_scopeChain->globalObject->callFunction()));
+ instructions().append(Special::CallFunction);
instructions().append(target->bind(begin, instructions().size()));
return target;
}
emitOpcode(op_jneq_ptr);
instructions().append(cond->index());
- instructions().append(Instruction(*m_globalData, m_codeBlock->ownerExecutable(), m_scopeChain->globalObject->applyFunction()));
+ instructions().append(Special::ApplyFunction);
instructions().append(target->bind(begin, instructions().size()));
return target;
}
unsigned BytecodeGenerator::addConstant(const Identifier& ident)
{
StringImpl* rep = ident.impl();
- pair<IdentifierMap::iterator, bool> result = m_identifierMap.add(rep, m_codeBlock->numberOfIdentifiers());
- if (result.second) // new entry
- m_codeBlock->addIdentifier(Identifier(m_globalData, rep));
+ IdentifierMap::AddResult result = m_identifierMap.add(rep, m_codeBlock->numberOfIdentifiers());
+ if (result.isNewEntry)
+ m_codeBlock->addIdentifier(ident);
+
+ return result.iterator->value;
+}
+
+// We can't hash JSValue(), so we use a dedicated data member to cache it.
+RegisterID* BytecodeGenerator::addConstantEmptyValue()
+{
+ if (!m_emptyValueRegister) {
+ int index = m_nextConstantOffset;
+ m_constantPoolRegisters.append(FirstConstantRegisterIndex + m_nextConstantOffset);
+ ++m_nextConstantOffset;
+ m_codeBlock->addConstant(JSValue());
+ m_emptyValueRegister = &m_constantPoolRegisters[index];
+ }
- return result.first->second;
+ return m_emptyValueRegister;
}
RegisterID* BytecodeGenerator::addConstantValue(JSValue v)
{
- int index = m_nextConstantOffset;
+ if (!v)
+ return addConstantEmptyValue();
- pair<JSValueMap::iterator, bool> result = m_jsValueMap.add(JSValue::encode(v), m_nextConstantOffset);
- if (result.second) {
+ int index = m_nextConstantOffset;
+ JSValueMap::AddResult result = m_jsValueMap.add(JSValue::encode(v), m_nextConstantOffset);
+ if (result.isNewEntry) {
m_constantPoolRegisters.append(FirstConstantRegisterIndex + m_nextConstantOffset);
++m_nextConstantOffset;
- m_codeBlock->addConstant(JSValue(v));
+ m_codeBlock->addConstant(v);
} else
- index = result.first->second;
-
+ index = result.iterator->value;
return &m_constantPoolRegisters[index];
}
return m_codeBlock->addRegExp(r);
}
-RegisterID* BytecodeGenerator::emitMove(RegisterID* dst, RegisterID* src)
+RegisterID* BytecodeGenerator::emitMove(RegisterID* dst, CaptureMode captureMode, RegisterID* src)
{
- emitOpcode(op_mov);
+ m_staticPropertyAnalyzer.mov(dst->index(), src->index());
+
+ emitOpcode(captureMode == IsCaptured ? op_captured_mov : op_mov);
instructions().append(dst->index());
instructions().append(src->index());
+ if (captureMode == IsCaptured)
+ instructions().append(watchableVariable(dst->index()));
return dst;
}
+RegisterID* BytecodeGenerator::emitMove(RegisterID* dst, RegisterID* src)
+{
+ return emitMove(dst, captureMode(dst->index()), src);
+}
+
RegisterID* BytecodeGenerator::emitUnaryOp(OpcodeID opcodeID, RegisterID* dst, RegisterID* src)
{
emitOpcode(opcodeID);
return dst;
}
-RegisterID* BytecodeGenerator::emitPreInc(RegisterID* srcDst)
+RegisterID* BytecodeGenerator::emitInc(RegisterID* srcDst)
{
- emitOpcode(op_pre_inc);
+ emitOpcode(op_inc);
instructions().append(srcDst->index());
return srcDst;
}
-RegisterID* BytecodeGenerator::emitPreDec(RegisterID* srcDst)
+RegisterID* BytecodeGenerator::emitDec(RegisterID* srcDst)
{
- emitOpcode(op_pre_dec);
+ emitOpcode(op_dec);
instructions().append(srcDst->index());
return srcDst;
}
-RegisterID* BytecodeGenerator::emitPostInc(RegisterID* dst, RegisterID* srcDst)
-{
- emitOpcode(op_post_inc);
- instructions().append(dst->index());
- instructions().append(srcDst->index());
- return dst;
-}
-
-RegisterID* BytecodeGenerator::emitPostDec(RegisterID* dst, RegisterID* srcDst)
-{
- emitOpcode(op_post_dec);
- instructions().append(dst->index());
- instructions().append(srcDst->index());
- return dst;
-}
-
RegisterID* BytecodeGenerator::emitBinaryOp(OpcodeID opcodeID, RegisterID* dst, RegisterID* src1, RegisterID* src2, OperandTypes types)
{
emitOpcode(opcodeID);
&& src1->isTemporary()
&& m_codeBlock->isConstantRegisterIndex(src2->index())
&& m_codeBlock->constantRegister(src2->index()).get().isString()) {
- const UString& value = asString(m_codeBlock->constantRegister(src2->index()).get())->tryGetValue();
+ const String& value = asString(m_codeBlock->constantRegister(src2->index()).get())->tryGetValue();
if (value == "undefined") {
rewindUnaryOp();
emitOpcode(op_is_undefined);
// FIXME: Our hash tables won't hold infinity, so we make a new JSValue each time.
// Later we can do the extra work to handle that like the other cases. They also don't
// work correctly with NaN as a key.
- if (isnan(number) || number == HashTraits<double>::emptyValue() || HashTraits<double>::isDeletedValue(number))
+ if (std::isnan(number) || number == HashTraits<double>::emptyValue() || HashTraits<double>::isDeletedValue(number))
return emitLoad(dst, jsNumber(number));
- JSValue& valueInMap = m_numberMap.add(number, JSValue()).first->second;
+ JSValue& valueInMap = m_numberMap.add(number, JSValue()).iterator->value;
if (!valueInMap)
valueInMap = jsNumber(number);
return emitLoad(dst, valueInMap);
RegisterID* BytecodeGenerator::emitLoad(RegisterID* dst, const Identifier& identifier)
{
- JSString*& stringInMap = m_stringMap.add(identifier.impl(), 0).first->second;
+ JSString*& stringInMap = m_stringMap.add(identifier.impl(), nullptr).iterator->value;
if (!stringInMap)
- stringInMap = jsOwnedString(globalData(), identifier.ustring());
+ stringInMap = jsOwnedString(vm(), identifier.string());
return emitLoad(dst, JSValue(stringInMap));
}
return constantID;
}
-bool BytecodeGenerator::findScopedProperty(const Identifier& property, int& index, size_t& stackDepth, bool forWriting, bool& requiresDynamicChecks, JSObject*& globalObject)
+RegisterID* BytecodeGenerator::emitLoadGlobalObject(RegisterID* dst)
{
- // Cases where we cannot statically optimize the lookup.
- if (property == propertyNames().arguments || !canOptimizeNonLocals()) {
- stackDepth = 0;
- index = missingSymbolMarker();
-
- if (shouldOptimizeLocals() && m_codeType == GlobalCode) {
- ScopeChainIterator iter = m_scopeChain->begin();
- globalObject = iter->get();
- ASSERT((++iter) == m_scopeChain->end());
- }
- return false;
+ if (!m_globalObjectRegister) {
+ int index = m_nextConstantOffset;
+ m_constantPoolRegisters.append(FirstConstantRegisterIndex + m_nextConstantOffset);
+ ++m_nextConstantOffset;
+ m_codeBlock->addConstant(JSValue());
+ m_globalObjectRegister = &m_constantPoolRegisters[index];
+ m_codeBlock->setGlobalObjectRegister(VirtualRegister(index));
}
+ if (dst)
+ emitMove(dst, m_globalObjectRegister);
+ return m_globalObjectRegister;
+}
- size_t depth = 0;
- requiresDynamicChecks = false;
- ScopeChainIterator iter = m_scopeChain->begin();
- ScopeChainIterator end = m_scopeChain->end();
- for (; iter != end; ++iter, ++depth) {
- JSObject* currentScope = iter->get();
- if (!currentScope->isVariableObject())
- break;
- JSVariableObject* currentVariableObject = static_cast<JSVariableObject*>(currentScope);
- SymbolTableEntry entry = currentVariableObject->symbolTable().get(property.impl());
-
- // Found the property
- if (!entry.isNull()) {
- if (entry.isReadOnly() && forWriting) {
- stackDepth = 0;
- index = missingSymbolMarker();
- if (++iter == end)
- globalObject = currentVariableObject;
- return false;
- }
- stackDepth = depth + m_codeBlock->needsFullScopeChain();
- index = entry.getIndex();
- if (++iter == end)
- globalObject = currentVariableObject;
- return true;
- }
- bool scopeRequiresDynamicChecks = false;
- if (currentVariableObject->isDynamicScope(scopeRequiresDynamicChecks))
- break;
- requiresDynamicChecks |= scopeRequiresDynamicChecks;
- }
- // Can't locate the property but we're able to avoid a few lookups.
- stackDepth = depth + m_codeBlock->needsFullScopeChain();
- index = missingSymbolMarker();
- JSObject* scope = iter->get();
- if (++iter == end)
- globalObject = scope;
- return true;
+bool BytecodeGenerator::isCaptured(int operand)
+{
+ return m_symbolTable && m_symbolTable->isCaptured(operand);
}
-void BytecodeGenerator::emitCheckHasInstance(RegisterID* base)
-{
- emitOpcode(op_check_has_instance);
- instructions().append(base->index());
+Local BytecodeGenerator::local(const Identifier& property)
+{
+ if (property == propertyNames().thisIdentifier)
+ return Local(thisRegister(), ReadOnly, NotCaptured);
+
+ if (property == propertyNames().arguments)
+ createArgumentsIfNecessary();
+
+ if (!shouldOptimizeLocals())
+ return Local();
+
+ SymbolTableEntry entry = symbolTable().get(property.impl());
+ if (entry.isNull())
+ return Local();
+
+ RegisterID* local = createLazyRegisterIfNecessary(®isterFor(entry.getIndex()));
+ return Local(local, entry.getAttributes(), captureMode(local->index()));
}
-RegisterID* BytecodeGenerator::emitInstanceOf(RegisterID* dst, RegisterID* value, RegisterID* base, RegisterID* basePrototype)
-{
- emitOpcode(op_instanceof);
+Local BytecodeGenerator::constLocal(const Identifier& property)
+{
+ if (m_codeType != FunctionCode)
+ return Local();
+
+ SymbolTableEntry entry = symbolTable().get(property.impl());
+ if (entry.isNull())
+ return Local();
+
+ RegisterID* local = createLazyRegisterIfNecessary(®isterFor(entry.getIndex()));
+ return Local(local, entry.getAttributes(), captureMode(local->index()));
+}
+
+void BytecodeGenerator::emitCheckHasInstance(RegisterID* dst, RegisterID* value, RegisterID* base, Label* target)
+{
+ size_t begin = instructions().size();
+ emitOpcode(op_check_has_instance);
instructions().append(dst->index());
instructions().append(value->index());
instructions().append(base->index());
- instructions().append(basePrototype->index());
- return dst;
+ instructions().append(target->bind(begin, instructions().size()));
}
-static const unsigned maxGlobalResolves = 128;
-
-bool BytecodeGenerator::shouldAvoidResolveGlobal()
+// Indicates the least upper bound of resolve type based on local scope. The bytecode linker
+// will start with this ResolveType and compute the least upper bound including intercepting scopes.
+ResolveType BytecodeGenerator::resolveType()
{
- return m_codeBlock->globalResolveInfoCount() > maxGlobalResolves && !m_labelScopes.size();
+ if (m_localScopeDepth)
+ return Dynamic;
+ if (m_symbolTable && m_symbolTable->usesNonStrictEval())
+ return GlobalPropertyWithVarInjectionChecks;
+ return GlobalProperty;
}
-RegisterID* BytecodeGenerator::emitResolve(RegisterID* dst, const Identifier& property)
+RegisterID* BytecodeGenerator::emitResolveScope(RegisterID* dst, const Identifier& identifier)
{
- size_t depth = 0;
- int index = 0;
- JSObject* globalObject = 0;
- bool requiresDynamicChecks = false;
- if (!findScopedProperty(property, index, depth, false, requiresDynamicChecks, globalObject) && !globalObject) {
- // We can't optimise at all :-(
- emitOpcode(op_resolve);
- instructions().append(dst->index());
- instructions().append(addConstant(property));
- return dst;
- }
- if (shouldAvoidResolveGlobal()) {
- globalObject = 0;
- requiresDynamicChecks = true;
- }
-
- if (globalObject) {
- bool forceGlobalResolve = false;
-
- if (index != missingSymbolMarker() && !forceGlobalResolve && !requiresDynamicChecks) {
- // Directly index the property lookup across multiple scopes.
- return emitGetScopedVar(dst, depth, index, globalObject);
- }
-
-#if ENABLE(JIT)
- m_codeBlock->addGlobalResolveInfo(instructions().size());
-#endif
-#if ENABLE(INTERPRETER)
- m_codeBlock->addGlobalResolveInstruction(instructions().size());
-#endif
- emitOpcode(requiresDynamicChecks ? op_resolve_global_dynamic : op_resolve_global);
- instructions().append(dst->index());
- instructions().append(addConstant(property));
- instructions().append(0);
- instructions().append(0);
- if (requiresDynamicChecks)
- instructions().append(depth);
- return dst;
- }
-
- if (requiresDynamicChecks) {
- // If we get here we have eval nested inside a |with| just give up
- emitOpcode(op_resolve);
- instructions().append(dst->index());
- instructions().append(addConstant(property));
- return dst;
- }
+ m_codeBlock->addPropertyAccessInstruction(instructions().size());
- if (index != missingSymbolMarker()) {
- // Directly index the property lookup across multiple scopes.
- return emitGetScopedVar(dst, depth, index, globalObject);
- }
+ ASSERT(!m_symbolTable || !m_symbolTable->contains(identifier.impl()) || resolveType() == Dynamic);
- // In this case we are at least able to drop a few scope chains from the
- // lookup chain, although we still need to hash from then on.
- emitOpcode(op_resolve_skip);
- instructions().append(dst->index());
- instructions().append(addConstant(property));
- instructions().append(depth);
+ // resolve_scope dst, id, ResolveType, depth
+ emitOpcode(op_resolve_scope);
+ instructions().append(kill(dst));
+ instructions().append(addConstant(identifier));
+ instructions().append(resolveType());
+ instructions().append(0);
+ instructions().append(0);
return dst;
}
-RegisterID* BytecodeGenerator::emitGetScopedVar(RegisterID* dst, size_t depth, int index, JSValue globalObject)
+RegisterID* BytecodeGenerator::emitGetFromScope(RegisterID* dst, RegisterID* scope, const Identifier& identifier, ResolveMode resolveMode)
{
- if (globalObject) {
- emitOpcode(op_get_global_var);
- instructions().append(dst->index());
- instructions().append(index);
- return dst;
- }
+ m_codeBlock->addPropertyAccessInstruction(instructions().size());
- emitOpcode(op_get_scoped_var);
- instructions().append(dst->index());
- instructions().append(index);
- instructions().append(depth);
+ // get_from_scope dst, scope, id, ResolveModeAndType, Structure, Operand
+ UnlinkedValueProfile profile = emitProfiledOpcode(op_get_from_scope);
+ instructions().append(kill(dst));
+ instructions().append(scope->index());
+ instructions().append(addConstant(identifier));
+ instructions().append(ResolveModeAndType(resolveMode, resolveType()).operand());
+ instructions().append(0);
+ instructions().append(0);
+ instructions().append(profile);
return dst;
}
-RegisterID* BytecodeGenerator::emitPutScopedVar(size_t depth, int index, RegisterID* value, JSValue globalObject)
+RegisterID* BytecodeGenerator::emitPutToScope(RegisterID* scope, const Identifier& identifier, RegisterID* value, ResolveMode resolveMode)
{
- if (globalObject) {
- emitOpcode(op_put_global_var);
- instructions().append(index);
- instructions().append(value->index());
- return value;
- }
- emitOpcode(op_put_scoped_var);
- instructions().append(index);
- instructions().append(depth);
+ m_codeBlock->addPropertyAccessInstruction(instructions().size());
+
+ // put_to_scope scope, id, value, ResolveModeAndType, Structure, Operand
+ emitOpcode(op_put_to_scope);
+ instructions().append(scope->index());
+ instructions().append(addConstant(identifier));
instructions().append(value->index());
+ instructions().append(ResolveModeAndType(resolveMode, resolveType()).operand());
+ instructions().append(0);
+ instructions().append(0);
return value;
}
-RegisterID* BytecodeGenerator::emitResolveBase(RegisterID* dst, const Identifier& property)
-{
- size_t depth = 0;
- int index = 0;
- JSObject* globalObject = 0;
- bool requiresDynamicChecks = false;
- findScopedProperty(property, index, depth, false, requiresDynamicChecks, globalObject);
- if (!globalObject || requiresDynamicChecks) {
- // We can't optimise at all :-(
- emitOpcode(op_resolve_base);
- instructions().append(dst->index());
- instructions().append(addConstant(property));
- instructions().append(false);
- return dst;
- }
-
- // Global object is the base
- return emitLoad(dst, JSValue(globalObject));
-}
-
-RegisterID* BytecodeGenerator::emitResolveBaseForPut(RegisterID* dst, const Identifier& property)
-{
- if (!m_codeBlock->isStrictMode())
- return emitResolveBase(dst, property);
- size_t depth = 0;
- int index = 0;
- JSObject* globalObject = 0;
- bool requiresDynamicChecks = false;
- findScopedProperty(property, index, depth, false, requiresDynamicChecks, globalObject);
- if (!globalObject || requiresDynamicChecks) {
- // We can't optimise at all :-(
- emitOpcode(op_resolve_base);
- instructions().append(dst->index());
- instructions().append(addConstant(property));
- instructions().append(true);
- return dst;
- }
-
- // Global object is the base
- RefPtr<RegisterID> result = emitLoad(dst, JSValue(globalObject));
- emitOpcode(op_ensure_property_exists);
+RegisterID* BytecodeGenerator::emitInstanceOf(RegisterID* dst, RegisterID* value, RegisterID* basePrototype)
+{
+ emitOpcode(op_instanceof);
instructions().append(dst->index());
- instructions().append(addConstant(property));
- return result.get();
-}
-
-RegisterID* BytecodeGenerator::emitResolveWithBase(RegisterID* baseDst, RegisterID* propDst, const Identifier& property)
-{
- size_t depth = 0;
- int index = 0;
- JSObject* globalObject = 0;
- bool requiresDynamicChecks = false;
- if (!findScopedProperty(property, index, depth, false, requiresDynamicChecks, globalObject) || !globalObject || requiresDynamicChecks) {
- // We can't optimise at all :-(
- emitOpcode(op_resolve_with_base);
- instructions().append(baseDst->index());
- instructions().append(propDst->index());
- instructions().append(addConstant(property));
- return baseDst;
- }
-
- bool forceGlobalResolve = false;
-
- // Global object is the base
- emitLoad(baseDst, JSValue(globalObject));
-
- if (index != missingSymbolMarker() && !forceGlobalResolve) {
- // Directly index the property lookup across multiple scopes.
- emitGetScopedVar(propDst, depth, index, globalObject);
- return baseDst;
- }
- if (shouldAvoidResolveGlobal()) {
- emitOpcode(op_resolve);
- instructions().append(propDst->index());
- instructions().append(addConstant(property));
- return baseDst;
- }
-#if ENABLE(JIT)
- m_codeBlock->addGlobalResolveInfo(instructions().size());
-#endif
-#if ENABLE(INTERPRETER)
- m_codeBlock->addGlobalResolveInstruction(instructions().size());
-#endif
- emitOpcode(requiresDynamicChecks ? op_resolve_global_dynamic : op_resolve_global);
- instructions().append(propDst->index());
- instructions().append(addConstant(property));
- instructions().append(0);
- instructions().append(0);
- if (requiresDynamicChecks)
- instructions().append(depth);
- return baseDst;
+ instructions().append(value->index());
+ instructions().append(basePrototype->index());
+ return dst;
}
-void BytecodeGenerator::emitMethodCheck()
+RegisterID* BytecodeGenerator::emitInitGlobalConst(const Identifier& identifier, RegisterID* value)
{
- emitOpcode(op_method_check);
+ ASSERT(m_codeType == GlobalCode);
+ emitOpcode(op_init_global_const_nop);
+ instructions().append(0);
+ instructions().append(value->index());
+ instructions().append(0);
+ instructions().append(addConstant(identifier));
+ return value;
}
RegisterID* BytecodeGenerator::emitGetById(RegisterID* dst, RegisterID* base, const Identifier& property)
{
-#if ENABLE(JIT)
- m_codeBlock->addStructureStubInfo(StructureStubInfo(access_get_by_id));
-#endif
-
-#if ENABLE(INTERPRETER)
m_codeBlock->addPropertyAccessInstruction(instructions().size());
-#endif
- emitOpcode(op_get_by_id);
- instructions().append(dst->index());
+ UnlinkedValueProfile profile = emitProfiledOpcode(op_get_by_id);
+ instructions().append(kill(dst));
instructions().append(base->index());
instructions().append(addConstant(property));
instructions().append(0);
instructions().append(0);
instructions().append(0);
instructions().append(0);
+ instructions().append(profile);
return dst;
}
{
emitOpcode(op_get_arguments_length);
instructions().append(dst->index());
- ASSERT(base->index() == m_codeBlock->argumentsRegister());
+ ASSERT(base->virtualRegister() == m_codeBlock->argumentsRegister());
instructions().append(base->index());
instructions().append(addConstant(propertyNames().length));
return dst;
RegisterID* BytecodeGenerator::emitPutById(RegisterID* base, const Identifier& property, RegisterID* value)
{
-#if ENABLE(JIT)
- m_codeBlock->addStructureStubInfo(StructureStubInfo(access_put_by_id));
-#endif
-#if ENABLE(INTERPRETER)
+ unsigned propertyIndex = addConstant(property);
+
+ m_staticPropertyAnalyzer.putById(base->index(), propertyIndex);
+
m_codeBlock->addPropertyAccessInstruction(instructions().size());
-#endif
emitOpcode(op_put_by_id);
instructions().append(base->index());
- instructions().append(addConstant(property));
+ instructions().append(propertyIndex);
instructions().append(value->index());
instructions().append(0);
instructions().append(0);
RegisterID* BytecodeGenerator::emitDirectPutById(RegisterID* base, const Identifier& property, RegisterID* value)
{
-#if ENABLE(JIT)
- m_codeBlock->addStructureStubInfo(StructureStubInfo(access_put_by_id));
-#endif
-#if ENABLE(INTERPRETER)
+ unsigned propertyIndex = addConstant(property);
+
+ m_staticPropertyAnalyzer.putById(base->index(), propertyIndex);
+
m_codeBlock->addPropertyAccessInstruction(instructions().size());
-#endif
emitOpcode(op_put_by_id);
instructions().append(base->index());
- instructions().append(addConstant(property));
+ instructions().append(propertyIndex);
instructions().append(value->index());
instructions().append(0);
instructions().append(0);
instructions().append(0);
instructions().append(0);
- instructions().append(property != m_globalData->propertyNames->underscoreProto);
+ instructions().append(
+ property != m_vm->propertyNames->underscoreProto
+ && PropertyName(property).asIndex() == PropertyName::NotAnIndex);
return value;
}
-RegisterID* BytecodeGenerator::emitPutGetter(RegisterID* base, const Identifier& property, RegisterID* value)
+void BytecodeGenerator::emitPutGetterSetter(RegisterID* base, const Identifier& property, RegisterID* getter, RegisterID* setter)
{
- emitOpcode(op_put_getter);
- instructions().append(base->index());
- instructions().append(addConstant(property));
- instructions().append(value->index());
- return value;
-}
+ unsigned propertyIndex = addConstant(property);
-RegisterID* BytecodeGenerator::emitPutSetter(RegisterID* base, const Identifier& property, RegisterID* value)
-{
- emitOpcode(op_put_setter);
+ m_staticPropertyAnalyzer.putById(base->index(), propertyIndex);
+
+ emitOpcode(op_put_getter_setter);
instructions().append(base->index());
- instructions().append(addConstant(property));
- instructions().append(value->index());
- return value;
+ instructions().append(propertyIndex);
+ instructions().append(getter->index());
+ instructions().append(setter->index());
}
RegisterID* BytecodeGenerator::emitDeleteById(RegisterID* dst, RegisterID* base, const Identifier& property)
RegisterID* BytecodeGenerator::emitGetArgumentByVal(RegisterID* dst, RegisterID* base, RegisterID* property)
{
- emitOpcode(op_get_argument_by_val);
- instructions().append(dst->index());
- ASSERT(base->index() == m_codeBlock->argumentsRegister());
+ UnlinkedArrayProfile arrayProfile = newArrayProfile();
+ UnlinkedValueProfile profile = emitProfiledOpcode(op_get_argument_by_val);
+ instructions().append(kill(dst));
+ ASSERT(base->virtualRegister() == m_codeBlock->argumentsRegister());
instructions().append(base->index());
instructions().append(property->index());
+ instructions().append(arrayProfile);
+ instructions().append(profile);
return dst;
}
return dst;
}
}
- emitOpcode(op_get_by_val);
- instructions().append(dst->index());
+ UnlinkedArrayProfile arrayProfile = newArrayProfile();
+ UnlinkedValueProfile profile = emitProfiledOpcode(op_get_by_val);
+ instructions().append(kill(dst));
instructions().append(base->index());
instructions().append(property->index());
+ instructions().append(arrayProfile);
+ instructions().append(profile);
return dst;
}
RegisterID* BytecodeGenerator::emitPutByVal(RegisterID* base, RegisterID* property, RegisterID* value)
{
- emitOpcode(op_put_by_val);
+ UnlinkedArrayProfile arrayProfile = newArrayProfile();
+ if (m_isBuiltinFunction)
+ emitOpcode(op_put_by_val_direct);
+ else
+ emitOpcode(op_put_by_val);
+ instructions().append(base->index());
+ instructions().append(property->index());
+ instructions().append(value->index());
+ instructions().append(arrayProfile);
+ return value;
+}
+
+RegisterID* BytecodeGenerator::emitDirectPutByVal(RegisterID* base, RegisterID* property, RegisterID* value)
+{
+ UnlinkedArrayProfile arrayProfile = newArrayProfile();
+ emitOpcode(op_put_by_val_direct);
instructions().append(base->index());
instructions().append(property->index());
instructions().append(value->index());
+ instructions().append(arrayProfile);
return value;
}
return value;
}
+RegisterID* BytecodeGenerator::emitCreateThis(RegisterID* dst)
+{
+ RefPtr<RegisterID> func = newTemporary();
+
+ m_codeBlock->addPropertyAccessInstruction(instructions().size());
+ emitOpcode(op_get_callee);
+ instructions().append(func->index());
+ instructions().append(0);
+
+ size_t begin = instructions().size();
+ m_staticPropertyAnalyzer.createThis(m_thisRegister.index(), begin + 3);
+
+ emitOpcode(op_create_this);
+ instructions().append(m_thisRegister.index());
+ instructions().append(func->index());
+ instructions().append(0);
+ return dst;
+}
+
RegisterID* BytecodeGenerator::emitNewObject(RegisterID* dst)
{
+ size_t begin = instructions().size();
+ m_staticPropertyAnalyzer.newObject(dst->index(), begin + 2);
+
emitOpcode(op_new_object);
instructions().append(dst->index());
+ instructions().append(0);
+ instructions().append(newObjectAllocationProfile());
return dst;
}
JSString* BytecodeGenerator::addStringConstant(const Identifier& identifier)
{
- JSString*& stringInMap = m_stringMap.add(identifier.impl(), 0).first->second;
+ JSString*& stringInMap = m_stringMap.add(identifier.impl(), nullptr).iterator->value;
if (!stringInMap) {
- stringInMap = jsString(globalData(), identifier.ustring());
+ stringInMap = jsString(vm(), identifier.string());
addConstantValue(stringInMap);
}
return stringInMap;
bool hadVariableExpression = false;
if (length) {
for (ElementNode* n = elements; n; n = n->next()) {
- if (!n->value()->isNumber() && !n->value()->isString()) {
+ if (!n->value()->isConstant()) {
hadVariableExpression = true;
break;
}
if (!hadVariableExpression) {
ASSERT(length == checkLength);
unsigned constantBufferIndex = addConstantBuffer(length);
- JSValue* constantBuffer = m_codeBlock->constantBuffer(constantBufferIndex);
+ JSValue* constantBuffer = m_codeBlock->constantBuffer(constantBufferIndex).data();
unsigned index = 0;
for (ElementNode* n = elements; index < length; n = n->next()) {
- if (n->value()->isNumber())
- constantBuffer[index++] = jsNumber(static_cast<NumberNode*>(n->value())->value());
- else {
- ASSERT(n->value()->isString());
- constantBuffer[index++] = addStringConstant(static_cast<StringNode*>(n->value())->value());
- }
+ ASSERT(n->value()->isConstant());
+ constantBuffer[index++] = static_cast<ConstantNode*>(n->value())->jsValue(*this);
}
emitOpcode(op_new_array_buffer);
instructions().append(dst->index());
instructions().append(constantBufferIndex);
instructions().append(length);
+ instructions().append(newArrayAllocationProfile());
return dst;
}
}
- Vector<RefPtr<RegisterID>, 16> argv;
+ Vector<RefPtr<RegisterID>, 16, UnsafeVectorOverflow> argv;
for (ElementNode* n = elements; n; n = n->next()) {
- if (n->elision())
+ if (!length)
break;
+ length--;
+ ASSERT(!n->value()->isSpreadExpression());
argv.append(newTemporary());
// op_new_array requires the initial values to be a sequential range of registers
- ASSERT(argv.size() == 1 || argv[argv.size() - 1]->index() == argv[argv.size() - 2]->index() + 1);
+ ASSERT(argv.size() == 1 || argv[argv.size() - 1]->index() == argv[argv.size() - 2]->index() - 1);
emitNode(argv.last().get(), n->value());
}
+ ASSERT(!length);
emitOpcode(op_new_array);
instructions().append(dst->index());
instructions().append(argv.size() ? argv[0]->index() : 0); // argv
instructions().append(argv.size()); // argc
+ instructions().append(newArrayAllocationProfile());
return dst;
}
-RegisterID* BytecodeGenerator::emitNewFunction(RegisterID* dst, FunctionBodyNode* function)
+RegisterID* BytecodeGenerator::emitNewFunction(RegisterID* dst, CaptureMode captureMode, FunctionBodyNode* function)
{
- return emitNewFunctionInternal(dst, m_codeBlock->addFunctionDecl(makeFunction(m_globalData, function)), false);
+ return emitNewFunctionInternal(dst, captureMode, m_codeBlock->addFunctionDecl(makeFunction(function)), false);
}
RegisterID* BytecodeGenerator::emitLazyNewFunction(RegisterID* dst, FunctionBodyNode* function)
{
- std::pair<FunctionOffsetMap::iterator, bool> ptr = m_functionOffsets.add(function, 0);
- if (ptr.second)
- ptr.first->second = m_codeBlock->addFunctionDecl(makeFunction(m_globalData, function));
- return emitNewFunctionInternal(dst, ptr.first->second, true);
+ FunctionOffsetMap::AddResult ptr = m_functionOffsets.add(function, 0);
+ if (ptr.isNewEntry)
+ ptr.iterator->value = m_codeBlock->addFunctionDecl(makeFunction(function));
+ return emitNewFunctionInternal(dst, NotCaptured, ptr.iterator->value, true);
}
-RegisterID* BytecodeGenerator::emitNewFunctionInternal(RegisterID* dst, unsigned index, bool doNullCheck)
+RegisterID* BytecodeGenerator::emitNewFunctionInternal(RegisterID* dst, CaptureMode captureMode, unsigned index, bool doNullCheck)
{
createActivationIfNecessary();
- emitOpcode(op_new_func);
+ emitOpcode(captureMode == IsCaptured ? op_new_captured_func : op_new_func);
instructions().append(dst->index());
instructions().append(index);
- instructions().append(doNullCheck);
+ if (captureMode == IsCaptured) {
+ ASSERT(!doNullCheck);
+ instructions().append(watchableVariable(dst->index()));
+ } else
+ instructions().append(doNullCheck);
return dst;
}
RegisterID* BytecodeGenerator::emitNewFunctionExpression(RegisterID* r0, FuncExprNode* n)
{
FunctionBodyNode* function = n->body();
- unsigned index = m_codeBlock->addFunctionExpr(makeFunction(m_globalData, function));
+ unsigned index = m_codeBlock->addFunctionExpr(makeFunction(function));
createActivationIfNecessary();
emitOpcode(op_new_func_exp);
return r0;
}
-RegisterID* BytecodeGenerator::emitCall(RegisterID* dst, RegisterID* func, CallArguments& callArguments, unsigned divot, unsigned startOffset, unsigned endOffset)
+RegisterID* BytecodeGenerator::emitCall(RegisterID* dst, RegisterID* func, ExpectedFunction expectedFunction, CallArguments& callArguments, const JSTextPosition& divot, const JSTextPosition& divotStart, const JSTextPosition& divotEnd)
{
- return emitCall(op_call, dst, func, callArguments, divot, startOffset, endOffset);
+ return emitCall(op_call, dst, func, expectedFunction, callArguments, divot, divotStart, divotEnd);
}
void BytecodeGenerator::createArgumentsIfNecessary()
if (!m_codeBlock->usesArguments())
return;
- // If we're in strict mode we tear off the arguments on function
- // entry, so there's no need to check if we need to create them
- // now
- if (m_codeBlock->isStrictMode())
+ if (shouldTearOffArgumentsEagerly())
return;
emitOpcode(op_create_arguments);
- instructions().append(m_codeBlock->argumentsRegister());
+ instructions().append(m_codeBlock->argumentsRegister().offset());
+ ASSERT(!hasWatchableVariable(m_codeBlock->argumentsRegister().offset()));
}
void BytecodeGenerator::createActivationIfNecessary()
{
- if (m_hasCreatedActivation)
- return;
- if (!m_codeBlock->needsFullScopeChain())
+ if (!m_activationRegister)
return;
emitOpcode(op_create_activation);
instructions().append(m_activationRegister->index());
}
-RegisterID* BytecodeGenerator::emitCallEval(RegisterID* dst, RegisterID* func, CallArguments& callArguments, unsigned divot, unsigned startOffset, unsigned endOffset)
+RegisterID* BytecodeGenerator::emitCallEval(RegisterID* dst, RegisterID* func, CallArguments& callArguments, const JSTextPosition& divot, const JSTextPosition& divotStart, const JSTextPosition& divotEnd)
+{
+ createActivationIfNecessary();
+ return emitCall(op_call_eval, dst, func, NoExpectedFunction, callArguments, divot, divotStart, divotEnd);
+}
+
+ExpectedFunction BytecodeGenerator::expectedFunctionForIdentifier(const Identifier& identifier)
+{
+ if (identifier == m_vm->propertyNames->Object)
+ return ExpectObjectConstructor;
+ if (identifier == m_vm->propertyNames->Array)
+ return ExpectArrayConstructor;
+ return NoExpectedFunction;
+}
+
+ExpectedFunction BytecodeGenerator::emitExpectedFunctionSnippet(RegisterID* dst, RegisterID* func, ExpectedFunction expectedFunction, CallArguments& callArguments, Label* done)
{
- return emitCall(op_call_eval, dst, func, callArguments, divot, startOffset, endOffset);
+ RefPtr<Label> realCall = newLabel();
+ switch (expectedFunction) {
+ case ExpectObjectConstructor: {
+ // If the number of arguments is non-zero, then we can't do anything interesting.
+ if (callArguments.argumentCountIncludingThis() >= 2)
+ return NoExpectedFunction;
+
+ size_t begin = instructions().size();
+ emitOpcode(op_jneq_ptr);
+ instructions().append(func->index());
+ instructions().append(Special::ObjectConstructor);
+ instructions().append(realCall->bind(begin, instructions().size()));
+
+ if (dst != ignoredResult())
+ emitNewObject(dst);
+ break;
+ }
+
+ case ExpectArrayConstructor: {
+ // If you're doing anything other than "new Array()" or "new Array(foo)" then we
+ // don't do inline it, for now. The only reason is that call arguments are in
+ // the opposite order of what op_new_array expects, so we'd either need to change
+ // how op_new_array works or we'd need an op_new_array_reverse. Neither of these
+ // things sounds like it's worth it.
+ if (callArguments.argumentCountIncludingThis() > 2)
+ return NoExpectedFunction;
+
+ size_t begin = instructions().size();
+ emitOpcode(op_jneq_ptr);
+ instructions().append(func->index());
+ instructions().append(Special::ArrayConstructor);
+ instructions().append(realCall->bind(begin, instructions().size()));
+
+ if (dst != ignoredResult()) {
+ if (callArguments.argumentCountIncludingThis() == 2) {
+ emitOpcode(op_new_array_with_size);
+ instructions().append(dst->index());
+ instructions().append(callArguments.argumentRegister(0)->index());
+ instructions().append(newArrayAllocationProfile());
+ } else {
+ ASSERT(callArguments.argumentCountIncludingThis() == 1);
+ emitOpcode(op_new_array);
+ instructions().append(dst->index());
+ instructions().append(0);
+ instructions().append(0);
+ instructions().append(newArrayAllocationProfile());
+ }
+ }
+ break;
+ }
+
+ default:
+ ASSERT(expectedFunction == NoExpectedFunction);
+ return NoExpectedFunction;
+ }
+
+ size_t begin = instructions().size();
+ emitOpcode(op_jmp);
+ instructions().append(done->bind(begin, instructions().size()));
+ emitLabel(realCall.get());
+
+ return expectedFunction;
}
-RegisterID* BytecodeGenerator::emitCall(OpcodeID opcodeID, RegisterID* dst, RegisterID* func, CallArguments& callArguments, unsigned divot, unsigned startOffset, unsigned endOffset)
+RegisterID* BytecodeGenerator::emitCall(OpcodeID opcodeID, RegisterID* dst, RegisterID* func, ExpectedFunction expectedFunction, CallArguments& callArguments, const JSTextPosition& divot, const JSTextPosition& divotStart, const JSTextPosition& divotEnd)
{
ASSERT(opcodeID == op_call || opcodeID == op_call_eval);
ASSERT(func->refCount());
emitMove(callArguments.profileHookRegister(), func);
// Generate code for arguments.
- unsigned argumentIndex = 0;
- for (ArgumentListNode* n = callArguments.argumentsNode()->m_listNode; n; n = n->m_next)
- emitNode(callArguments.argumentRegister(argumentIndex++), n);
-
+ unsigned argument = 0;
+ if (callArguments.argumentsNode()) {
+ ArgumentListNode* n = callArguments.argumentsNode()->m_listNode;
+ if (n && n->m_expr->isSpreadExpression()) {
+ RELEASE_ASSERT(!n->m_next);
+ auto expression = static_cast<SpreadExpressionNode*>(n->m_expr)->expression();
+ RefPtr<RegisterID> argumentRegister;
+ if (expression->isResolveNode() && willResolveToArguments(static_cast<ResolveNode*>(expression)->identifier()) && !symbolTable().slowArguments())
+ argumentRegister = uncheckedRegisterForArguments();
+ else
+ argumentRegister = expression->emitBytecode(*this, callArguments.argumentRegister(0));
+ RefPtr<RegisterID> thisRegister = emitMove(newTemporary(), callArguments.thisRegister());
+ return emitCallVarargs(dst, func, callArguments.thisRegister(), argumentRegister.get(), newTemporary(), 0, callArguments.profileHookRegister(), divot, divotStart, divotEnd);
+ }
+ for (; n; n = n->m_next)
+ emitNode(callArguments.argumentRegister(argument++), n);
+ }
+
// Reserve space for call frame.
- Vector<RefPtr<RegisterID>, RegisterFile::CallFrameHeaderSize> callFrame;
- for (int i = 0; i < RegisterFile::CallFrameHeaderSize; ++i)
+ Vector<RefPtr<RegisterID>, JSStack::CallFrameHeaderSize, UnsafeVectorOverflow> callFrame;
+ for (int i = 0; i < JSStack::CallFrameHeaderSize; ++i)
callFrame.append(newTemporary());
if (m_shouldEmitProfileHooks) {
instructions().append(callArguments.profileHookRegister()->index());
}
- emitExpressionInfo(divot, startOffset, endOffset);
-
-#if ENABLE(JIT)
- m_codeBlock->addCallLinkInfo();
-#endif
+ emitExpressionInfo(divot, divotStart, divotEnd);
+ RefPtr<Label> done = newLabel();
+ expectedFunction = emitExpectedFunctionSnippet(dst, func, expectedFunction, callArguments, done.get());
+
// Emit call.
- emitOpcode(opcodeID);
- instructions().append(func->index()); // func
- instructions().append(callArguments.count()); // argCount
- instructions().append(callArguments.callFrame()); // registerOffset
- if (dst != ignoredResult()) {
- emitOpcode(op_call_put_result);
- instructions().append(dst->index()); // dst
- }
+ UnlinkedArrayProfile arrayProfile = newArrayProfile();
+ UnlinkedValueProfile profile = emitProfiledOpcode(opcodeID);
+ ASSERT(dst);
+ ASSERT(dst != ignoredResult());
+ instructions().append(dst->index());
+ instructions().append(func->index());
+ instructions().append(callArguments.argumentCountIncludingThis());
+ instructions().append(callArguments.stackOffset());
+ instructions().append(m_codeBlock->addLLIntCallLinkInfo());
+ instructions().append(0);
+ instructions().append(arrayProfile);
+ instructions().append(profile);
+
+ if (expectedFunction != NoExpectedFunction)
+ emitLabel(done.get());
if (m_shouldEmitProfileHooks) {
emitOpcode(op_profile_did_call);
return dst;
}
-RegisterID* BytecodeGenerator::emitLoadVarargs(RegisterID* argCountDst, RegisterID* thisRegister, RegisterID* arguments)
+RegisterID* BytecodeGenerator::emitCallVarargs(RegisterID* dst, RegisterID* func, RegisterID* thisRegister, RegisterID* arguments, RegisterID* firstFreeRegister, int32_t firstVarArgOffset, RegisterID* profileHookRegister, const JSTextPosition& divot, const JSTextPosition& divotStart, const JSTextPosition& divotEnd)
{
- ASSERT(argCountDst->index() < arguments->index());
- emitOpcode(op_load_varargs);
- instructions().append(argCountDst->index());
- instructions().append(arguments->index());
- instructions().append(thisRegister->index() + RegisterFile::CallFrameHeaderSize); // initial registerOffset
- return argCountDst;
+ return emitCallVarargs(op_call_varargs, dst, func, thisRegister, arguments, firstFreeRegister, firstVarArgOffset, profileHookRegister, divot, divotStart, divotEnd);
}
-RegisterID* BytecodeGenerator::emitCallVarargs(RegisterID* dst, RegisterID* func, RegisterID* thisRegister, RegisterID* argCountRegister, unsigned divot, unsigned startOffset, unsigned endOffset)
+RegisterID* BytecodeGenerator::emitConstructVarargs(RegisterID* dst, RegisterID* func, RegisterID* arguments, RegisterID* firstFreeRegister, int32_t firstVarArgOffset, RegisterID* profileHookRegister, const JSTextPosition& divot, const JSTextPosition& divotStart, const JSTextPosition& divotEnd)
+{
+ return emitCallVarargs(op_construct_varargs, dst, func, 0, arguments, firstFreeRegister, firstVarArgOffset, profileHookRegister, divot, divotStart, divotEnd);
+}
+
+RegisterID* BytecodeGenerator::emitCallVarargs(OpcodeID opcode, RegisterID* dst, RegisterID* func, RegisterID* thisRegister, RegisterID* arguments, RegisterID* firstFreeRegister, int32_t firstVarArgOffset, RegisterID* profileHookRegister, const JSTextPosition& divot, const JSTextPosition& divotStart, const JSTextPosition& divotEnd)
{
- ASSERT(func->refCount());
- ASSERT(thisRegister->refCount());
- ASSERT(dst != func);
if (m_shouldEmitProfileHooks) {
+ emitMove(profileHookRegister, func);
emitOpcode(op_profile_will_call);
- instructions().append(func->index());
+ instructions().append(profileHookRegister->index());
}
- emitExpressionInfo(divot, startOffset, endOffset);
-
+ emitExpressionInfo(divot, divotStart, divotEnd);
+
// Emit call.
- emitOpcode(op_call_varargs);
- instructions().append(func->index()); // func
- instructions().append(argCountRegister->index()); // arg count
- instructions().append(thisRegister->index() + RegisterFile::CallFrameHeaderSize); // initial registerOffset
- if (dst != ignoredResult()) {
- emitOpcode(op_call_put_result);
- instructions().append(dst->index()); // dst
- }
+ UnlinkedArrayProfile arrayProfile = newArrayProfile();
+ UnlinkedValueProfile profile = emitProfiledOpcode(opcode);
+ ASSERT(dst != ignoredResult());
+ instructions().append(dst->index());
+ instructions().append(func->index());
+ instructions().append(thisRegister ? thisRegister->index() : 0);
+ instructions().append(arguments->index());
+ instructions().append(firstFreeRegister->index());
+ instructions().append(firstVarArgOffset);
+ instructions().append(arrayProfile);
+ instructions().append(profile);
if (m_shouldEmitProfileHooks) {
emitOpcode(op_profile_did_call);
- instructions().append(func->index());
+ instructions().append(profileHookRegister->index());
}
return dst;
}
RegisterID* BytecodeGenerator::emitReturn(RegisterID* src)
{
- if (m_codeBlock->needsFullScopeChain()) {
+ if (m_activationRegister) {
emitOpcode(op_tear_off_activation);
instructions().append(m_activationRegister->index());
- instructions().append(m_codeBlock->argumentsRegister());
- } else if (m_codeBlock->usesArguments() && m_codeBlock->m_numParameters > 1
- && !m_codeBlock->isStrictMode()) { // If there are no named parameters, there's nothing to tear off, since extra / unnamed parameters get copied to the arguments object at construct time.
+ }
+
+ if (m_codeBlock->usesArguments() && m_codeBlock->numParameters() != 1 && !isStrictMode()) {
emitOpcode(op_tear_off_arguments);
- instructions().append(m_codeBlock->argumentsRegister());
+ instructions().append(m_codeBlock->argumentsRegister().offset());
+ instructions().append(m_activationRegister ? m_activationRegister->index() : emitLoad(0, JSValue())->index());
}
// Constructors use op_ret_object_or_this to check the result is an
return src;
}
-RegisterID* BytecodeGenerator::emitConstruct(RegisterID* dst, RegisterID* func, CallArguments& callArguments, unsigned divot, unsigned startOffset, unsigned endOffset)
+RegisterID* BytecodeGenerator::emitConstruct(RegisterID* dst, RegisterID* func, ExpectedFunction expectedFunction, CallArguments& callArguments, const JSTextPosition& divot, const JSTextPosition& divotStart, const JSTextPosition& divotEnd)
{
ASSERT(func->refCount());
emitMove(callArguments.profileHookRegister(), func);
// Generate code for arguments.
- unsigned argumentIndex = 0;
+ unsigned argument = 0;
if (ArgumentsNode* argumentsNode = callArguments.argumentsNode()) {
+
+ ArgumentListNode* n = callArguments.argumentsNode()->m_listNode;
+ if (n && n->m_expr->isSpreadExpression()) {
+ RELEASE_ASSERT(!n->m_next);
+ auto expression = static_cast<SpreadExpressionNode*>(n->m_expr)->expression();
+ RefPtr<RegisterID> argumentRegister;
+ if (expression->isResolveNode() && willResolveToArguments(static_cast<ResolveNode*>(expression)->identifier()) && !symbolTable().slowArguments())
+ argumentRegister = uncheckedRegisterForArguments();
+ else
+ argumentRegister = expression->emitBytecode(*this, callArguments.argumentRegister(0));
+ return emitConstructVarargs(dst, func, argumentRegister.get(), newTemporary(), 0, callArguments.profileHookRegister(), divot, divotStart, divotEnd);
+ }
+
for (ArgumentListNode* n = argumentsNode->m_listNode; n; n = n->m_next)
- emitNode(callArguments.argumentRegister(argumentIndex++), n);
+ emitNode(callArguments.argumentRegister(argument++), n);
}
if (m_shouldEmitProfileHooks) {
}
// Reserve space for call frame.
- Vector<RefPtr<RegisterID>, RegisterFile::CallFrameHeaderSize> callFrame;
- for (int i = 0; i < RegisterFile::CallFrameHeaderSize; ++i)
+ Vector<RefPtr<RegisterID>, JSStack::CallFrameHeaderSize, UnsafeVectorOverflow> callFrame;
+ for (int i = 0; i < JSStack::CallFrameHeaderSize; ++i)
callFrame.append(newTemporary());
- emitExpressionInfo(divot, startOffset, endOffset);
+ emitExpressionInfo(divot, divotStart, divotEnd);
+
+ RefPtr<Label> done = newLabel();
+ expectedFunction = emitExpectedFunctionSnippet(dst, func, expectedFunction, callArguments, done.get());
-#if ENABLE(JIT)
- m_codeBlock->addCallLinkInfo();
-#endif
+ UnlinkedValueProfile profile = emitProfiledOpcode(op_construct);
+ ASSERT(dst != ignoredResult());
+ instructions().append(dst->index());
+ instructions().append(func->index());
+ instructions().append(callArguments.argumentCountIncludingThis());
+ instructions().append(callArguments.stackOffset());
+ instructions().append(m_codeBlock->addLLIntCallLinkInfo());
+ instructions().append(0);
+ instructions().append(0);
+ instructions().append(profile);
- emitOpcode(op_construct);
- instructions().append(func->index()); // func
- instructions().append(callArguments.count()); // argCount
- instructions().append(callArguments.callFrame()); // registerOffset
- if (dst != ignoredResult()) {
- emitOpcode(op_call_put_result);
- instructions().append(dst->index()); // dst
- }
+ if (expectedFunction != NoExpectedFunction)
+ emitLabel(done.get());
if (m_shouldEmitProfileHooks) {
emitOpcode(op_profile_did_call);
instructions().append(src->index());
}
-RegisterID* BytecodeGenerator::emitPushScope(RegisterID* scope)
+RegisterID* BytecodeGenerator::emitPushWithScope(RegisterID* scope)
{
- ASSERT(scope->isTemporary());
ControlFlowContext context;
context.isFinallyBlock = false;
m_scopeContextStack.append(context);
- m_dynamicScopeDepth++;
+ m_localScopeDepth++;
- return emitUnaryNoDstOp(op_push_scope, scope);
+ createActivationIfNecessary();
+ return emitUnaryNoDstOp(op_push_with_scope, scope);
}
void BytecodeGenerator::emitPopScope()
emitOpcode(op_pop_scope);
m_scopeContextStack.removeLast();
- m_dynamicScopeDepth--;
+ m_localScopeDepth--;
}
-void BytecodeGenerator::emitDebugHook(DebugHookID debugHookID, int firstLine, int lastLine)
+void BytecodeGenerator::emitDebugHook(DebugHookID debugHookID, unsigned line, unsigned charOffset, unsigned lineStart)
{
#if ENABLE(DEBUG_WITH_BREAKPOINT)
if (debugHookID != DidReachBreakpoint)
if (!m_shouldEmitDebugHooks)
return;
#endif
+ JSTextPosition divot(line, charOffset, lineStart);
+ emitExpressionInfo(divot, divot, divot);
emitOpcode(op_debug);
instructions().append(debugHookID);
- instructions().append(firstLine);
- instructions().append(lastLine);
+ instructions().append(false);
}
-void BytecodeGenerator::pushFinallyContext(Label* target, RegisterID* retAddrDst)
+void BytecodeGenerator::pushFinallyContext(StatementNode* finallyBlock)
{
+ // Reclaim free label scopes.
+ while (m_labelScopes.size() && !m_labelScopes.last().refCount())
+ m_labelScopes.removeLast();
+
ControlFlowContext scope;
scope.isFinallyBlock = true;
- FinallyContext context = { target, retAddrDst };
+ FinallyContext context = {
+ finallyBlock,
+ static_cast<unsigned>(m_scopeContextStack.size()),
+ static_cast<unsigned>(m_switchContextStack.size()),
+ static_cast<unsigned>(m_forInContextStack.size()),
+ static_cast<unsigned>(m_tryContextStack.size()),
+ static_cast<unsigned>(m_labelScopes.size()),
+ m_finallyDepth,
+ m_localScopeDepth
+ };
scope.finallyContext = context;
m_scopeContextStack.append(scope);
m_finallyDepth++;
m_finallyDepth--;
}
-LabelScope* BytecodeGenerator::breakTarget(const Identifier& name)
+LabelScopePtr BytecodeGenerator::breakTarget(const Identifier& name)
{
// Reclaim free label scopes.
//
}
if (!m_labelScopes.size())
- return 0;
+ return LabelScopePtr::null();
// We special-case the following, which is a syntax error in Firefox:
// label:
LabelScope* scope = &m_labelScopes[i];
if (scope->type() != LabelScope::NamedLabel) {
ASSERT(scope->breakTarget());
- return scope;
+ return LabelScopePtr(m_labelScopes, i);
}
}
- return 0;
+ return LabelScopePtr::null();
}
for (int i = m_labelScopes.size() - 1; i >= 0; --i) {
LabelScope* scope = &m_labelScopes[i];
if (scope->name() && *scope->name() == name) {
ASSERT(scope->breakTarget());
- return scope;
+ return LabelScopePtr(m_labelScopes, i);
}
}
- return 0;
+ return LabelScopePtr::null();
}
-LabelScope* BytecodeGenerator::continueTarget(const Identifier& name)
+LabelScopePtr BytecodeGenerator::continueTarget(const Identifier& name)
{
// Reclaim free label scopes.
while (m_labelScopes.size() && !m_labelScopes.last().refCount())
m_labelScopes.removeLast();
if (!m_labelScopes.size())
- return 0;
+ return LabelScopePtr::null();
if (name.isEmpty()) {
for (int i = m_labelScopes.size() - 1; i >= 0; --i) {
LabelScope* scope = &m_labelScopes[i];
if (scope->type() == LabelScope::Loop) {
ASSERT(scope->continueTarget());
- return scope;
+ return LabelScopePtr(m_labelScopes, i);
}
}
- return 0;
+ return LabelScopePtr::null();
}
// Continue to the loop nested nearest to the label scope that matches
// 'name'.
- LabelScope* result = 0;
+ LabelScopePtr result = LabelScopePtr::null();
for (int i = m_labelScopes.size() - 1; i >= 0; --i) {
LabelScope* scope = &m_labelScopes[i];
if (scope->type() == LabelScope::Loop) {
ASSERT(scope->continueTarget());
- result = scope;
+ result = LabelScopePtr(m_labelScopes, i);
}
if (scope->name() && *scope->name() == name)
- return result; // may be 0
+ return result; // may be null.
}
- return 0;
+ return LabelScopePtr::null();
}
-PassRefPtr<Label> BytecodeGenerator::emitComplexJumpScopes(Label* target, ControlFlowContext* topScope, ControlFlowContext* bottomScope)
+void BytecodeGenerator::emitComplexPopScopes(ControlFlowContext* topScope, ControlFlowContext* bottomScope)
{
while (topScope > bottomScope) {
// First we count the number of dynamic scopes we need to remove to get
}
if (nNormalScopes) {
- size_t begin = instructions().size();
-
// We need to remove a number of dynamic scopes to get to the next
// finally block
- emitOpcode(op_jmp_scopes);
- instructions().append(nNormalScopes);
-
- // If topScope == bottomScope then there isn't actually a finally block
- // left to emit, so make the jmp_scopes jump directly to the target label
- if (topScope == bottomScope) {
- instructions().append(target->bind(begin, instructions().size()));
- return target;
- }
+ while (nNormalScopes--)
+ emitOpcode(op_pop_scope);
- // Otherwise we just use jmp_scopes to pop a group of scopes and go
- // to the next instruction
- RefPtr<Label> nextInsn = newLabel();
- instructions().append(nextInsn->bind(begin, instructions().size()));
- emitLabel(nextInsn.get());
+ // If topScope == bottomScope then there isn't a finally block left to emit.
+ if (topScope == bottomScope)
+ return;
}
-
+
+ Vector<ControlFlowContext> savedScopeContextStack;
+ Vector<SwitchInfo> savedSwitchContextStack;
+ Vector<ForInContext> savedForInContextStack;
+ Vector<TryContext> poppedTryContexts;
+ LabelScopeStore savedLabelScopes;
while (topScope > bottomScope && topScope->isFinallyBlock) {
- emitJumpSubroutine(topScope->finallyContext.retAddrDst, topScope->finallyContext.finallyAddr);
+ RefPtr<Label> beforeFinally = emitLabel(newLabel().get());
+
+ // Save the current state of the world while instating the state of the world
+ // for the finally block.
+ FinallyContext finallyContext = topScope->finallyContext;
+ bool flipScopes = finallyContext.scopeContextStackSize != m_scopeContextStack.size();
+ bool flipSwitches = finallyContext.switchContextStackSize != m_switchContextStack.size();
+ bool flipForIns = finallyContext.forInContextStackSize != m_forInContextStack.size();
+ bool flipTries = finallyContext.tryContextStackSize != m_tryContextStack.size();
+ bool flipLabelScopes = finallyContext.labelScopesSize != m_labelScopes.size();
+ int topScopeIndex = -1;
+ int bottomScopeIndex = -1;
+ if (flipScopes) {
+ topScopeIndex = topScope - m_scopeContextStack.begin();
+ bottomScopeIndex = bottomScope - m_scopeContextStack.begin();
+ savedScopeContextStack = m_scopeContextStack;
+ m_scopeContextStack.shrink(finallyContext.scopeContextStackSize);
+ }
+ if (flipSwitches) {
+ savedSwitchContextStack = m_switchContextStack;
+ m_switchContextStack.shrink(finallyContext.switchContextStackSize);
+ }
+ if (flipForIns) {
+ savedForInContextStack = m_forInContextStack;
+ m_forInContextStack.shrink(finallyContext.forInContextStackSize);
+ }
+ if (flipTries) {
+ while (m_tryContextStack.size() != finallyContext.tryContextStackSize) {
+ ASSERT(m_tryContextStack.size() > finallyContext.tryContextStackSize);
+ TryContext context = m_tryContextStack.last();
+ m_tryContextStack.removeLast();
+ TryRange range;
+ range.start = context.start;
+ range.end = beforeFinally;
+ range.tryData = context.tryData;
+ m_tryRanges.append(range);
+ poppedTryContexts.append(context);
+ }
+ }
+ if (flipLabelScopes) {
+ savedLabelScopes = m_labelScopes;
+ while (m_labelScopes.size() > finallyContext.labelScopesSize)
+ m_labelScopes.removeLast();
+ }
+ int savedFinallyDepth = m_finallyDepth;
+ m_finallyDepth = finallyContext.finallyDepth;
+ int savedDynamicScopeDepth = m_localScopeDepth;
+ m_localScopeDepth = finallyContext.dynamicScopeDepth;
+
+ // Emit the finally block.
+ emitNode(finallyContext.finallyBlock);
+
+ RefPtr<Label> afterFinally = emitLabel(newLabel().get());
+
+ // Restore the state of the world.
+ if (flipScopes) {
+ m_scopeContextStack = savedScopeContextStack;
+ topScope = &m_scopeContextStack[topScopeIndex]; // assert it's within bounds
+ bottomScope = m_scopeContextStack.begin() + bottomScopeIndex; // don't assert, since it the index might be -1.
+ }
+ if (flipSwitches)
+ m_switchContextStack = savedSwitchContextStack;
+ if (flipForIns)
+ m_forInContextStack = savedForInContextStack;
+ if (flipTries) {
+ ASSERT(m_tryContextStack.size() == finallyContext.tryContextStackSize);
+ for (unsigned i = poppedTryContexts.size(); i--;) {
+ TryContext context = poppedTryContexts[i];
+ context.start = afterFinally;
+ m_tryContextStack.append(context);
+ }
+ poppedTryContexts.clear();
+ }
+ if (flipLabelScopes)
+ m_labelScopes = savedLabelScopes;
+ m_finallyDepth = savedFinallyDepth;
+ m_localScopeDepth = savedDynamicScopeDepth;
+
--topScope;
}
}
- return emitJump(target);
}
-PassRefPtr<Label> BytecodeGenerator::emitJumpScopes(Label* target, int targetScopeDepth)
+void BytecodeGenerator::emitPopScopes(int targetScopeDepth)
{
ASSERT(scopeDepth() - targetScopeDepth >= 0);
- ASSERT(target->isForward());
size_t scopeDelta = scopeDepth() - targetScopeDepth;
ASSERT(scopeDelta <= m_scopeContextStack.size());
if (!scopeDelta)
- return emitJump(target);
-
- if (m_finallyDepth)
- return emitComplexJumpScopes(target, &m_scopeContextStack.last(), &m_scopeContextStack.last() - scopeDelta);
+ return;
- size_t begin = instructions().size();
+ if (!m_finallyDepth) {
+ while (scopeDelta--)
+ emitOpcode(op_pop_scope);
+ return;
+ }
- emitOpcode(op_jmp_scopes);
- instructions().append(scopeDelta);
- instructions().append(target->bind(begin, instructions().size()));
- return target;
+ emitComplexPopScopes(&m_scopeContextStack.last(), &m_scopeContextStack.last() - scopeDelta);
}
RegisterID* BytecodeGenerator::emitGetPropertyNames(RegisterID* dst, RegisterID* base, RegisterID* i, RegisterID* size, Label* breakTarget)
return dst;
}
-RegisterID* BytecodeGenerator::emitCatch(RegisterID* targetRegister, Label* start, Label* end)
+TryData* BytecodeGenerator::pushTry(Label* start)
+{
+ TryData tryData;
+ tryData.target = newLabel();
+ tryData.targetScopeDepth = UINT_MAX;
+ m_tryData.append(tryData);
+ TryData* result = &m_tryData.last();
+
+ TryContext tryContext;
+ tryContext.start = start;
+ tryContext.tryData = result;
+
+ m_tryContextStack.append(tryContext);
+
+ return result;
+}
+
+RegisterID* BytecodeGenerator::popTryAndEmitCatch(TryData* tryData, RegisterID* targetRegister, Label* end)
{
m_usesExceptions = true;
-#if ENABLE(JIT)
- HandlerInfo info = { start->bind(0, 0), end->bind(0, 0), instructions().size(), m_dynamicScopeDepth + m_baseScopeDepth, CodeLocationLabel() };
-#else
- HandlerInfo info = { start->bind(0, 0), end->bind(0, 0), instructions().size(), m_dynamicScopeDepth + m_baseScopeDepth };
-#endif
+
+ ASSERT_UNUSED(tryData, m_tryContextStack.last().tryData == tryData);
+
+ TryRange tryRange;
+ tryRange.start = m_tryContextStack.last().start;
+ tryRange.end = end;
+ tryRange.tryData = m_tryContextStack.last().tryData;
+ m_tryRanges.append(tryRange);
+ m_tryContextStack.removeLast();
+
+ emitLabel(tryRange.tryData->target.get());
+ tryRange.tryData->targetScopeDepth = m_localScopeDepth;
- m_codeBlock->addExceptionHandler(info);
emitOpcode(op_catch);
instructions().append(targetRegister->index());
return targetRegister;
}
-void BytecodeGenerator::emitThrowReferenceError(const UString& message)
+void BytecodeGenerator::emitThrowReferenceError(const String& message)
{
- emitOpcode(op_throw_reference_error);
- instructions().append(addConstantValue(jsString(globalData(), message))->index());
+ emitOpcode(op_throw_static_error);
+ instructions().append(addConstantValue(addStringConstant(Identifier(m_vm, message)))->index());
+ instructions().append(true);
}
-PassRefPtr<Label> BytecodeGenerator::emitJumpSubroutine(RegisterID* retAddrDst, Label* finally)
+void BytecodeGenerator::emitPushFunctionNameScope(const Identifier& property, RegisterID* value, unsigned attributes)
{
- size_t begin = instructions().size();
-
- emitOpcode(op_jsr);
- instructions().append(retAddrDst->index());
- instructions().append(finally->bind(begin, instructions().size()));
- emitLabel(newLabel().get()); // Record the fact that the next instruction is implicitly labeled, because op_sret will return to it.
- return finally;
+ emitOpcode(op_push_name_scope);
+ instructions().append(addConstant(property));
+ instructions().append(value->index());
+ instructions().append(attributes);
}
-void BytecodeGenerator::emitSubroutineReturn(RegisterID* retAddrSrc)
+void BytecodeGenerator::emitPushCatchScope(const Identifier& property, RegisterID* value, unsigned attributes)
{
- emitOpcode(op_sret);
- instructions().append(retAddrSrc->index());
-}
+ createActivationIfNecessary();
-void BytecodeGenerator::emitPushNewScope(RegisterID* dst, const Identifier& property, RegisterID* value)
-{
ControlFlowContext context;
context.isFinallyBlock = false;
m_scopeContextStack.append(context);
- m_dynamicScopeDepth++;
+ m_localScopeDepth++;
- emitOpcode(op_push_new_scope);
- instructions().append(dst->index());
+ emitOpcode(op_push_name_scope);
instructions().append(addConstant(property));
instructions().append(value->index());
+ instructions().append(attributes);
}
void BytecodeGenerator::beginSwitch(RegisterID* scrutineeRegister, SwitchInfo::SwitchType type)
{
- SwitchInfo info = { instructions().size(), type };
+ SwitchInfo info = { static_cast<uint32_t>(instructions().size()), type };
switch (type) {
case SwitchInfo::SwitchImmediate:
emitOpcode(op_switch_imm);
emitOpcode(op_switch_string);
break;
default:
- ASSERT_NOT_REACHED();
+ RELEASE_ASSERT_NOT_REACHED();
}
instructions().append(0); // place holder for table index
return key - min;
}
-static void prepareJumpTableForImmediateSwitch(SimpleJumpTable& jumpTable, int32_t switchAddress, uint32_t clauseCount, RefPtr<Label>* labels, ExpressionNode** nodes, int32_t min, int32_t max)
-{
- jumpTable.min = min;
- jumpTable.branchOffsets.resize(max - min + 1);
- jumpTable.branchOffsets.fill(0);
- for (uint32_t i = 0; i < clauseCount; ++i) {
- // We're emitting this after the clause labels should have been fixed, so
- // the labels should not be "forward" references
- ASSERT(!labels[i]->isForward());
- jumpTable.add(keyForImmediateSwitch(nodes[i], min, max), labels[i]->bind(switchAddress, switchAddress + 3));
- }
-}
-
static int32_t keyForCharacterSwitch(ExpressionNode* node, int32_t min, int32_t max)
{
UNUSED_PARAM(max);
StringImpl* clause = static_cast<StringNode*>(node)->value().impl();
ASSERT(clause->length() == 1);
- int32_t key = clause->characters()[0];
+ int32_t key = (*clause)[0];
ASSERT(key >= min);
ASSERT(key <= max);
return key - min;
}
-static void prepareJumpTableForCharacterSwitch(SimpleJumpTable& jumpTable, int32_t switchAddress, uint32_t clauseCount, RefPtr<Label>* labels, ExpressionNode** nodes, int32_t min, int32_t max)
+static void prepareJumpTableForSwitch(
+ UnlinkedSimpleJumpTable& jumpTable, int32_t switchAddress, uint32_t clauseCount,
+ RefPtr<Label>* labels, ExpressionNode** nodes, int32_t min, int32_t max,
+ int32_t (*keyGetter)(ExpressionNode*, int32_t min, int32_t max))
{
jumpTable.min = min;
jumpTable.branchOffsets.resize(max - min + 1);
// We're emitting this after the clause labels should have been fixed, so
// the labels should not be "forward" references
ASSERT(!labels[i]->isForward());
- jumpTable.add(keyForCharacterSwitch(nodes[i], min, max), labels[i]->bind(switchAddress, switchAddress + 3));
+ jumpTable.add(keyGetter(nodes[i], min, max), labels[i]->bind(switchAddress, switchAddress + 3));
}
}
-static void prepareJumpTableForStringSwitch(StringJumpTable& jumpTable, int32_t switchAddress, uint32_t clauseCount, RefPtr<Label>* labels, ExpressionNode** nodes)
+static void prepareJumpTableForStringSwitch(UnlinkedStringJumpTable& jumpTable, int32_t switchAddress, uint32_t clauseCount, RefPtr<Label>* labels, ExpressionNode** nodes)
{
for (uint32_t i = 0; i < clauseCount; ++i) {
// We're emitting this after the clause labels should have been fixed, so
ASSERT(nodes[i]->isString());
StringImpl* clause = static_cast<StringNode*>(nodes[i])->value().impl();
- OffsetLocation location;
- location.branchOffset = labels[i]->bind(switchAddress, switchAddress + 3);
- jumpTable.offsetTable.add(clause, location);
+ jumpTable.offsetTable.add(clause, labels[i]->bind(switchAddress, switchAddress + 3));
}
}
{
SwitchInfo switchInfo = m_switchContextStack.last();
m_switchContextStack.removeLast();
- if (switchInfo.switchType == SwitchInfo::SwitchImmediate) {
- instructions()[switchInfo.bytecodeOffset + 1] = m_codeBlock->numberOfImmediateSwitchJumpTables();
+
+ switch (switchInfo.switchType) {
+ case SwitchInfo::SwitchImmediate:
+ case SwitchInfo::SwitchCharacter: {
+ instructions()[switchInfo.bytecodeOffset + 1] = m_codeBlock->numberOfSwitchJumpTables();
instructions()[switchInfo.bytecodeOffset + 2] = defaultLabel->bind(switchInfo.bytecodeOffset, switchInfo.bytecodeOffset + 3);
- SimpleJumpTable& jumpTable = m_codeBlock->addImmediateSwitchJumpTable();
- prepareJumpTableForImmediateSwitch(jumpTable, switchInfo.bytecodeOffset, clauseCount, labels, nodes, min, max);
- } else if (switchInfo.switchType == SwitchInfo::SwitchCharacter) {
- instructions()[switchInfo.bytecodeOffset + 1] = m_codeBlock->numberOfCharacterSwitchJumpTables();
- instructions()[switchInfo.bytecodeOffset + 2] = defaultLabel->bind(switchInfo.bytecodeOffset, switchInfo.bytecodeOffset + 3);
+ UnlinkedSimpleJumpTable& jumpTable = m_codeBlock->addSwitchJumpTable();
+ prepareJumpTableForSwitch(
+ jumpTable, switchInfo.bytecodeOffset, clauseCount, labels, nodes, min, max,
+ switchInfo.switchType == SwitchInfo::SwitchImmediate
+ ? keyForImmediateSwitch
+ : keyForCharacterSwitch);
+ break;
+ }
- SimpleJumpTable& jumpTable = m_codeBlock->addCharacterSwitchJumpTable();
- prepareJumpTableForCharacterSwitch(jumpTable, switchInfo.bytecodeOffset, clauseCount, labels, nodes, min, max);
- } else {
- ASSERT(switchInfo.switchType == SwitchInfo::SwitchString);
+ case SwitchInfo::SwitchString: {
instructions()[switchInfo.bytecodeOffset + 1] = m_codeBlock->numberOfStringSwitchJumpTables();
instructions()[switchInfo.bytecodeOffset + 2] = defaultLabel->bind(switchInfo.bytecodeOffset, switchInfo.bytecodeOffset + 3);
- StringJumpTable& jumpTable = m_codeBlock->addStringSwitchJumpTable();
+ UnlinkedStringJumpTable& jumpTable = m_codeBlock->addStringSwitchJumpTable();
prepareJumpTableForStringSwitch(jumpTable, switchInfo.bytecodeOffset, clauseCount, labels, nodes);
+ break;
+ }
+
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ break;
}
}
m_codeBlock->setIsNumericCompareFunction(isNumericCompareFunction);
}
-int BytecodeGenerator::argumentNumberFor(const Identifier& ident)
+bool BytecodeGenerator::isArgumentNumber(const Identifier& ident, int argumentNumber)
{
- int parameterCount = m_parameters.size(); // includes 'this'
- RegisterID* registerID = registerFor(ident);
- if (!registerID)
- return 0;
- int index = registerID->index() + RegisterFile::CallFrameHeaderSize + parameterCount;
- return (index > 0 && index < parameterCount) ? index : 0;
+ RegisterID* registerID = local(ident).get();
+ if (!registerID || registerID->index() >= 0)
+ return 0;
+ return registerID->index() == CallFrame::argumentOffset(argumentNumber);
+}
+
+void BytecodeGenerator::emitReadOnlyExceptionIfNeeded()
+{
+ if (!isStrictMode())
+ return;
+ emitOpcode(op_throw_static_error);
+ instructions().append(addConstantValue(addStringConstant(Identifier(m_vm, StrictModeReadonlyPropertyWriteError)))->index());
+ instructions().append(false);
+}
+
+void BytecodeGenerator::emitEnumeration(ThrowableExpressionData* node, ExpressionNode* subjectNode, const std::function<void(BytecodeGenerator&, RegisterID*)>& callBack)
+{
+ if (subjectNode->isResolveNode()
+ && willResolveToArguments(static_cast<ResolveNode*>(subjectNode)->identifier())
+ && !symbolTable().slowArguments()) {
+ RefPtr<RegisterID> index = emitLoad(newTemporary(), jsNumber(0));
+
+ LabelScopePtr scope = newLabelScope(LabelScope::Loop);
+ RefPtr<RegisterID> value = emitLoad(newTemporary(), jsUndefined());
+
+ RefPtr<Label> loopCondition = newLabel();
+ RefPtr<Label> loopStart = newLabel();
+ emitJump(loopCondition.get());
+ emitLabel(loopStart.get());
+ emitLoopHint();
+ emitGetArgumentByVal(value.get(), uncheckedRegisterForArguments(), index.get());
+ callBack(*this, value.get());
+
+ emitLabel(scope->continueTarget());
+ emitInc(index.get());
+ emitLabel(loopCondition.get());
+ RefPtr<RegisterID> length = emitGetArgumentsLength(newTemporary(), uncheckedRegisterForArguments());
+ emitJumpIfTrue(emitEqualityOp(op_less, newTemporary(), index.get(), length.get()), loopStart.get());
+ emitLabel(scope->breakTarget());
+ return;
+ }
+
+ LabelScopePtr scope = newLabelScope(LabelScope::Loop);
+ RefPtr<RegisterID> subject = newTemporary();
+ emitNode(subject.get(), subjectNode);
+ RefPtr<RegisterID> iterator = emitGetById(newTemporary(), subject.get(), propertyNames().iteratorPrivateName);
+ {
+ CallArguments args(*this, 0);
+ emitMove(args.thisRegister(), subject.get());
+ emitCall(iterator.get(), iterator.get(), NoExpectedFunction, args, node->divot(), node->divotStart(), node->divotEnd());
+ }
+ RefPtr<RegisterID> iteratorNext = emitGetById(newTemporary(), iterator.get(), propertyNames().iteratorNextPrivateName);
+ RefPtr<RegisterID> value = newTemporary();
+ emitLoad(value.get(), jsUndefined());
+
+ emitJump(scope->continueTarget());
+
+ RefPtr<Label> loopStart = newLabel();
+ emitLabel(loopStart.get());
+ emitLoopHint();
+ callBack(*this, value.get());
+ emitLabel(scope->continueTarget());
+ CallArguments nextArguments(*this, 0, 1);
+ emitMove(nextArguments.thisRegister(), iterator.get());
+ emitMove(nextArguments.argumentRegister(0), value.get());
+ emitCall(value.get(), iteratorNext.get(), NoExpectedFunction, nextArguments, node->divot(), node->divotStart(), node->divotEnd());
+ RefPtr<RegisterID> result = newTemporary();
+ emitJumpIfFalse(emitEqualityOp(op_stricteq, result.get(), value.get(), emitLoad(0, JSValue(vm()->iterationTerminator.get()))), loopStart.get());
+ emitLabel(scope->breakTarget());
}
} // namespace JSC