/*
- * Copyright (C) 2008, 2009 Apple Inc. All rights reserved.
+ * Copyright (C) 2008, 2009, 2012 Apple Inc. All rights reserved.
* Copyright (C) 2008 Cameron Zwarich <cwzwarich@uwaterloo.ca>
*
* Redistribution and use in source and binary forms, with or without
#include "BytecodeGenerator.h"
#include "BatchedTransitionOptimizer.h"
-#include "PrototypeFunction.h"
#include "JSFunction.h"
#include "Interpreter.h"
+#include "LowLevelInterpreter.h"
+#include "ScopeChain.h"
+#include "StrongInlines.h"
#include "UString.h"
using namespace std;
expected by the callee.
*/
-#ifndef NDEBUG
static bool s_dumpsGeneratedCode = false;
-#endif
+
+void Label::setLocation(unsigned location)
+{
+ m_location = location;
+
+ unsigned size = m_unresolvedJumps.size();
+ for (unsigned i = 0; i < size; ++i)
+ m_generator->m_instructions[m_unresolvedJumps[i].second].u.operand = m_location - m_unresolvedJumps[i].first;
+}
void BytecodeGenerator::setDumpsGeneratedCode(bool dumpsGeneratedCode)
{
-#ifndef NDEBUG
s_dumpsGeneratedCode = dumpsGeneratedCode;
-#else
- UNUSED_PARAM(dumpsGeneratedCode);
-#endif
}
bool BytecodeGenerator::dumpsGeneratedCode()
{
-#ifndef NDEBUG
return s_dumpsGeneratedCode;
-#else
- return false;
-#endif
}
-void BytecodeGenerator::generate()
+JSObject* BytecodeGenerator::generate()
{
+ SamplingRegion samplingRegion("Bytecode Generation");
+
m_codeBlock->setThisRegister(m_thisRegister.index());
m_scopeNode->emitBytecode(*this);
-
-#ifndef NDEBUG
- m_codeBlock->setInstructionCount(m_codeBlock->instructions().size());
+
+ m_codeBlock->instructions() = RefCountedArray<Instruction>(m_instructions);
if (s_dumpsGeneratedCode)
- m_codeBlock->dump(m_scopeChain->globalObject()->globalExec());
-#endif
+ m_codeBlock->dump(m_scopeChain->globalObject->globalExec());
if ((m_codeType == FunctionCode && !m_codeBlock->needsFullScopeChain() && !m_codeBlock->usesArguments()) || m_codeType == EvalCode)
symbolTable().clear();
-
- m_codeBlock->setIsNumericCompareFunction(instructions() == m_globalData->numericCompareFunction(m_scopeChain->globalObject()->globalExec()));
-
-#if !ENABLE(OPCODE_SAMPLING)
- if (!m_regeneratingForExceptionInfo && (m_codeType == FunctionCode || m_codeType == EvalCode))
- m_codeBlock->clearExceptionInfo();
-#endif
m_codeBlock->shrinkToFit();
+
+ if (m_expressionTooDeep)
+ return createOutOfMemoryError(m_scopeChain->globalObject.get());
+ return 0;
}
bool BytecodeGenerator::addVar(const Identifier& ident, bool isConstant, RegisterID*& r0)
{
int index = m_calleeRegisters.size();
SymbolTableEntry newEntry(index, isConstant ? ReadOnly : 0);
- pair<SymbolTable::iterator, bool> result = symbolTable().add(ident.ustring().rep(), newEntry);
+ SymbolTable::AddResult result = symbolTable().add(ident.impl(), newEntry);
- if (!result.second) {
- r0 = ®isterFor(result.first->second.getIndex());
+ if (!result.isNewEntry) {
+ r0 = ®isterFor(result.iterator->second.getIndex());
return false;
}
- ++m_codeBlock->m_numVars;
- r0 = newRegister();
+ r0 = addVar();
return true;
}
-bool BytecodeGenerator::addGlobalVar(const Identifier& ident, bool isConstant, RegisterID*& r0)
+int BytecodeGenerator::addGlobalVar(const Identifier& ident, bool isConstant)
{
- int index = m_nextGlobalIndex;
+ int index = symbolTable().size();
SymbolTableEntry newEntry(index, isConstant ? ReadOnly : 0);
- pair<SymbolTable::iterator, bool> result = symbolTable().add(ident.ustring().rep(), newEntry);
-
- if (!result.second)
- index = result.first->second.getIndex();
- else {
- --m_nextGlobalIndex;
- m_globals.append(index + m_globalVarStorageOffset);
- }
-
- r0 = ®isterFor(index);
- return result.second;
+ SymbolTable::AddResult result = symbolTable().add(ident.impl(), newEntry);
+ if (!result.isNewEntry)
+ index = result.iterator->second.getIndex();
+ return index;
}
void BytecodeGenerator::preserveLastVar()
m_lastVar = &m_calleeRegisters.last();
}
-BytecodeGenerator::BytecodeGenerator(ProgramNode* programNode, const Debugger* debugger, const ScopeChain& scopeChain, SymbolTable* symbolTable, ProgramCodeBlock* codeBlock)
- : m_shouldEmitDebugHooks(!!debugger)
- , m_shouldEmitProfileHooks(scopeChain.globalObject()->supportsProfiling())
- , m_scopeChain(&scopeChain)
+BytecodeGenerator::BytecodeGenerator(ProgramNode* programNode, ScopeChainNode* scopeChain, SymbolTable* symbolTable, ProgramCodeBlock* codeBlock, CompilationKind compilationKind)
+ : m_shouldEmitDebugHooks(scopeChain->globalObject->debugger())
+ , m_shouldEmitProfileHooks(scopeChain->globalObject->globalObjectMethodTable()->supportsProfiling(scopeChain->globalObject.get()))
+ , m_shouldEmitRichSourceInfo(scopeChain->globalObject->globalObjectMethodTable()->supportsRichSourceInfo(scopeChain->globalObject.get()))
+ , m_scopeChain(*scopeChain->globalData, scopeChain)
, m_symbolTable(symbolTable)
, m_scopeNode(programNode)
, m_codeBlock(codeBlock)
- , m_thisRegister(RegisterFile::ProgramCodeThisRegister)
+ , m_thisRegister(CallFrame::thisArgumentOffset())
, m_finallyDepth(0)
, m_dynamicScopeDepth(0)
, m_baseScopeDepth(0)
, m_codeType(GlobalCode)
- , m_nextGlobalIndex(-1)
, m_nextConstantOffset(0)
, m_globalConstantIndex(0)
- , m_globalData(&scopeChain.globalObject()->globalExec()->globalData())
+ , m_hasCreatedActivation(true)
+ , m_firstLazyFunction(0)
+ , m_lastLazyFunction(0)
+ , m_globalData(scopeChain->globalData)
, m_lastOpcodeID(op_end)
- , m_emitNodeDepth(0)
- , m_regeneratingForExceptionInfo(false)
- , m_codeBlockBeingRegeneratedFrom(0)
+#ifndef NDEBUG
+ , m_lastOpcodePosition(0)
+#endif
+ , m_stack(wtfThreadData().stack())
+ , m_usesExceptions(false)
+ , m_expressionTooDeep(false)
{
+ m_globalData->startedCompiling(m_codeBlock);
if (m_shouldEmitDebugHooks)
m_codeBlock->setNeedsFullScopeChain(true);
// FIXME: Move code that modifies the global object to Interpreter::execute.
- m_codeBlock->m_numParameters = 1; // Allocate space for "this"
+ m_codeBlock->setNumParameters(1); // Allocate space for "this"
+ codeBlock->m_numCapturedVars = codeBlock->m_numVars;
+
+ if (compilationKind == OptimizingCompilation)
+ return;
- JSGlobalObject* globalObject = scopeChain.globalObject();
+ JSGlobalObject* globalObject = scopeChain->globalObject.get();
ExecState* exec = globalObject->globalExec();
- RegisterFile* registerFile = &exec->globalData().interpreter->registerFile();
- // Shift register indexes in generated code to elide registers allocated by intermediate stack frames.
- m_globalVarStorageOffset = -RegisterFile::CallFrameHeaderSize - m_codeBlock->m_numParameters - registerFile->size();
-
- // Add previously defined symbols to bookkeeping.
- m_globals.grow(symbolTable->size());
- SymbolTable::iterator end = symbolTable->end();
- for (SymbolTable::iterator it = symbolTable->begin(); it != end; ++it)
- registerFor(it->second.getIndex()).setIndex(it->second.getIndex() + m_globalVarStorageOffset);
-
- BatchedTransitionOptimizer optimizer(globalObject);
+ BatchedTransitionOptimizer optimizer(*m_globalData, globalObject);
const VarStack& varStack = programNode->varStack();
const FunctionStack& functionStack = programNode->functionStack();
- bool canOptimizeNewGlobals = symbolTable->size() + functionStack.size() + varStack.size() < registerFile->maxGlobals();
- if (canOptimizeNewGlobals) {
- // Shift new symbols so they get stored prior to existing symbols.
- m_nextGlobalIndex -= symbolTable->size();
-
- for (size_t i = 0; i < functionStack.size(); ++i) {
- FunctionBodyNode* function = functionStack[i];
- globalObject->removeDirect(function->ident()); // Make sure our new function is not shadowed by an old property.
- emitNewFunction(addGlobalVar(function->ident(), false), function);
- }
- Vector<RegisterID*, 32> newVars;
- for (size_t i = 0; i < varStack.size(); ++i)
- if (!globalObject->hasProperty(exec, *varStack[i].first))
- newVars.append(addGlobalVar(*varStack[i].first, varStack[i].second & DeclarationStacks::IsConstant));
+ size_t newGlobals = varStack.size() + functionStack.size();
+ if (!newGlobals)
+ return;
+ globalObject->resizeRegisters(symbolTable->size() + newGlobals);
- preserveLastVar();
+ for (size_t i = 0; i < functionStack.size(); ++i) {
+ FunctionBodyNode* function = functionStack[i];
+ globalObject->removeDirect(*m_globalData, function->ident()); // Newly declared functions overwrite existing properties.
- for (size_t i = 0; i < newVars.size(); ++i)
- emitLoad(newVars[i], jsUndefined());
- } else {
- for (size_t i = 0; i < functionStack.size(); ++i) {
- FunctionBodyNode* function = functionStack[i];
- globalObject->putWithAttributes(exec, function->ident(), new (exec) JSFunction(exec, makeFunction(exec, function), scopeChain.node()), DontDelete);
- }
- for (size_t i = 0; i < varStack.size(); ++i) {
- if (globalObject->hasProperty(exec, *varStack[i].first))
- continue;
- int attributes = DontDelete;
- if (varStack[i].second & DeclarationStacks::IsConstant)
- attributes |= ReadOnly;
- globalObject->putWithAttributes(exec, *varStack[i].first, jsUndefined(), attributes);
- }
+ JSValue value = JSFunction::create(exec, makeFunction(exec, function), scopeChain);
+ int index = addGlobalVar(function->ident(), false);
+ globalObject->registerAt(index).set(*m_globalData, globalObject, value);
+ }
- preserveLastVar();
+ for (size_t i = 0; i < varStack.size(); ++i) {
+ if (globalObject->hasProperty(exec, *varStack[i].first))
+ continue;
+ addGlobalVar(*varStack[i].first, varStack[i].second & DeclarationStacks::IsConstant);
}
}
-BytecodeGenerator::BytecodeGenerator(FunctionBodyNode* functionBody, const Debugger* debugger, const ScopeChain& scopeChain, SymbolTable* symbolTable, CodeBlock* codeBlock)
- : m_shouldEmitDebugHooks(!!debugger)
- , m_shouldEmitProfileHooks(scopeChain.globalObject()->supportsProfiling())
- , m_scopeChain(&scopeChain)
+BytecodeGenerator::BytecodeGenerator(FunctionBodyNode* functionBody, ScopeChainNode* scopeChain, SymbolTable* symbolTable, CodeBlock* codeBlock, CompilationKind)
+ : m_shouldEmitDebugHooks(scopeChain->globalObject->debugger())
+ , m_shouldEmitProfileHooks(scopeChain->globalObject->globalObjectMethodTable()->supportsProfiling(scopeChain->globalObject.get()))
+ , m_shouldEmitRichSourceInfo(scopeChain->globalObject->globalObjectMethodTable()->supportsRichSourceInfo(scopeChain->globalObject.get()))
+ , m_scopeChain(*scopeChain->globalData, scopeChain)
, m_symbolTable(symbolTable)
, m_scopeNode(functionBody)
, m_codeBlock(codeBlock)
+ , m_activationRegister(0)
, m_finallyDepth(0)
, m_dynamicScopeDepth(0)
, m_baseScopeDepth(0)
, m_codeType(FunctionCode)
, m_nextConstantOffset(0)
, m_globalConstantIndex(0)
- , m_globalData(&scopeChain.globalObject()->globalExec()->globalData())
+ , m_hasCreatedActivation(false)
+ , m_firstLazyFunction(0)
+ , m_lastLazyFunction(0)
+ , m_globalData(scopeChain->globalData)
, m_lastOpcodeID(op_end)
- , m_emitNodeDepth(0)
- , m_regeneratingForExceptionInfo(false)
- , m_codeBlockBeingRegeneratedFrom(0)
+#ifndef NDEBUG
+ , m_lastOpcodePosition(0)
+#endif
+ , m_stack(wtfThreadData().stack())
+ , m_usesExceptions(false)
+ , m_expressionTooDeep(false)
{
+ m_globalData->startedCompiling(m_codeBlock);
if (m_shouldEmitDebugHooks)
m_codeBlock->setNeedsFullScopeChain(true);
codeBlock->setGlobalData(m_globalData);
-
- bool usesArguments = functionBody->usesArguments();
- codeBlock->setUsesArguments(usesArguments);
- if (usesArguments) {
- m_argumentsRegister.setIndex(RegisterFile::OptionalCalleeArguments);
- addVar(propertyNames().arguments, false);
+
+ emitOpcode(op_enter);
+ if (m_codeBlock->needsFullScopeChain()) {
+ m_activationRegister = addVar();
+ emitInitLazyRegister(m_activationRegister);
+ m_codeBlock->setActivationRegister(m_activationRegister->index());
}
- if (m_codeBlock->needsFullScopeChain()) {
- ++m_codeBlock->m_numVars;
- m_activationRegisterIndex = newRegister()->index();
- emitOpcode(op_enter_with_activation);
- instructions().append(m_activationRegisterIndex);
- } else
- emitOpcode(op_enter);
+ // Both op_tear_off_activation and op_tear_off_arguments tear off the 'arguments'
+ // object, if created.
+ if (m_codeBlock->needsFullScopeChain() || functionBody->usesArguments()) {
+ RegisterID* unmodifiedArgumentsRegister = addVar(); // Anonymous, so it can't be modified by user code.
+ RegisterID* argumentsRegister = addVar(propertyNames().arguments, false); // Can be changed by assigning to 'arguments'.
- if (usesArguments) {
- emitOpcode(op_init_arguments);
+ // We can save a little space by hard-coding the knowledge that the two
+ // 'arguments' values are stored in consecutive registers, and storing
+ // only the index of the assignable one.
+ codeBlock->setArgumentsRegister(argumentsRegister->index());
+ ASSERT_UNUSED(unmodifiedArgumentsRegister, unmodifiedArgumentsRegister->index() == JSC::unmodifiedArgumentsRegister(codeBlock->argumentsRegister()));
+
+ emitInitLazyRegister(argumentsRegister);
+ emitInitLazyRegister(unmodifiedArgumentsRegister);
+
+ if (m_codeBlock->isStrictMode()) {
+ emitOpcode(op_create_arguments);
+ instructions().append(argumentsRegister->index());
+ }
// The debugger currently retrieves the arguments object from an activation rather than pulling
// it from a call frame. In the long-term it should stop doing that (<rdar://problem/6911886>),
// but for now we force eager creation of the arguments object when debugging.
- if (m_shouldEmitDebugHooks)
+ if (m_shouldEmitDebugHooks) {
emitOpcode(op_create_arguments);
+ instructions().append(argumentsRegister->index());
+ }
}
const DeclarationStacks::FunctionStack& functionStack = functionBody->functionStack();
+ const DeclarationStacks::VarStack& varStack = functionBody->varStack();
+
+ // Captured variables and functions go first so that activations don't have
+ // to step over the non-captured locals to mark them.
+ m_hasCreatedActivation = false;
+ if (functionBody->hasCapturedVariables()) {
+ for (size_t i = 0; i < functionStack.size(); ++i) {
+ FunctionBodyNode* function = functionStack[i];
+ const Identifier& ident = function->ident();
+ if (functionBody->captures(ident)) {
+ if (!m_hasCreatedActivation) {
+ m_hasCreatedActivation = true;
+ emitOpcode(op_create_activation);
+ instructions().append(m_activationRegister->index());
+ }
+ m_functions.add(ident.impl());
+ emitNewFunction(addVar(ident, false), function);
+ }
+ }
+ for (size_t i = 0; i < varStack.size(); ++i) {
+ const Identifier& ident = *varStack[i].first;
+ if (functionBody->captures(ident))
+ addVar(ident, varStack[i].second & DeclarationStacks::IsConstant);
+ }
+ }
+ bool canLazilyCreateFunctions = !functionBody->needsActivationForMoreThanVariables() && !m_shouldEmitDebugHooks;
+ if (!canLazilyCreateFunctions && !m_hasCreatedActivation) {
+ m_hasCreatedActivation = true;
+ emitOpcode(op_create_activation);
+ instructions().append(m_activationRegister->index());
+ }
+
+ codeBlock->m_numCapturedVars = codeBlock->m_numVars;
+ m_firstLazyFunction = codeBlock->m_numVars;
for (size_t i = 0; i < functionStack.size(); ++i) {
FunctionBodyNode* function = functionStack[i];
const Identifier& ident = function->ident();
- m_functions.add(ident.ustring().rep());
- emitNewFunction(addVar(ident, false), function);
+ if (!functionBody->captures(ident)) {
+ m_functions.add(ident.impl());
+ RefPtr<RegisterID> reg = addVar(ident, false);
+ // Don't lazily create functions that override the name 'arguments'
+ // as this would complicate lazy instantiation of actual arguments.
+ if (!canLazilyCreateFunctions || ident == propertyNames().arguments)
+ emitNewFunction(reg.get(), function);
+ else {
+ emitInitLazyRegister(reg.get());
+ m_lazyFunctions.set(reg->index(), function);
+ }
+ }
+ }
+ m_lastLazyFunction = canLazilyCreateFunctions ? codeBlock->m_numVars : m_firstLazyFunction;
+ for (size_t i = 0; i < varStack.size(); ++i) {
+ const Identifier& ident = *varStack[i].first;
+ if (!functionBody->captures(ident))
+ addVar(ident, varStack[i].second & DeclarationStacks::IsConstant);
}
- const DeclarationStacks::VarStack& varStack = functionBody->varStack();
- for (size_t i = 0; i < varStack.size(); ++i)
- addVar(*varStack[i].first, varStack[i].second & DeclarationStacks::IsConstant);
+ if (m_shouldEmitDebugHooks)
+ codeBlock->m_numCapturedVars = codeBlock->m_numVars;
FunctionParameters& parameters = *functionBody->parameters();
- size_t parameterCount = parameters.size();
- m_nextParameterIndex = -RegisterFile::CallFrameHeaderSize - parameterCount - 1;
- m_parameters.grow(1 + parameterCount); // reserve space for "this"
+ m_parameters.grow(parameters.size() + 1); // reserve space for "this"
// Add "this" as a parameter
- m_thisRegister.setIndex(m_nextParameterIndex);
- ++m_nextParameterIndex;
- ++m_codeBlock->m_numParameters;
+ int nextParameterIndex = CallFrame::thisArgumentOffset();
+ m_thisRegister.setIndex(nextParameterIndex--);
+ m_codeBlock->addParameter();
+
+ for (size_t i = 0; i < parameters.size(); ++i)
+ addParameter(parameters[i], nextParameterIndex--);
+
+ preserveLastVar();
- if (functionBody->usesThis() || m_shouldEmitDebugHooks) {
+ if (isConstructor()) {
+ RefPtr<RegisterID> func = newTemporary();
+ RefPtr<RegisterID> funcProto = newTemporary();
+
+ emitOpcode(op_get_callee);
+ instructions().append(func->index());
+ // Load prototype.
+ emitGetById(funcProto.get(), func.get(), globalData()->propertyNames->prototype);
+
+ emitOpcode(op_create_this);
+ instructions().append(m_thisRegister.index());
+ instructions().append(funcProto->index());
+ } else if (!codeBlock->isStrictMode() && (functionBody->usesThis() || codeBlock->usesEval() || m_shouldEmitDebugHooks)) {
emitOpcode(op_convert_this);
instructions().append(m_thisRegister.index());
}
-
- for (size_t i = 0; i < parameterCount; ++i)
- addParameter(parameters[i]);
-
- preserveLastVar();
}
-BytecodeGenerator::BytecodeGenerator(EvalNode* evalNode, const Debugger* debugger, const ScopeChain& scopeChain, SymbolTable* symbolTable, EvalCodeBlock* codeBlock)
- : m_shouldEmitDebugHooks(!!debugger)
- , m_shouldEmitProfileHooks(scopeChain.globalObject()->supportsProfiling())
- , m_scopeChain(&scopeChain)
+BytecodeGenerator::BytecodeGenerator(EvalNode* evalNode, ScopeChainNode* scopeChain, SymbolTable* symbolTable, EvalCodeBlock* codeBlock, CompilationKind)
+ : m_shouldEmitDebugHooks(scopeChain->globalObject->debugger())
+ , m_shouldEmitProfileHooks(scopeChain->globalObject->globalObjectMethodTable()->supportsProfiling(scopeChain->globalObject.get()))
+ , m_shouldEmitRichSourceInfo(scopeChain->globalObject->globalObjectMethodTable()->supportsRichSourceInfo(scopeChain->globalObject.get()))
+ , m_scopeChain(*scopeChain->globalData, scopeChain)
, m_symbolTable(symbolTable)
, m_scopeNode(evalNode)
, m_codeBlock(codeBlock)
- , m_thisRegister(RegisterFile::ProgramCodeThisRegister)
+ , m_thisRegister(CallFrame::thisArgumentOffset())
, m_finallyDepth(0)
, m_dynamicScopeDepth(0)
, m_baseScopeDepth(codeBlock->baseScopeDepth())
, m_codeType(EvalCode)
, m_nextConstantOffset(0)
, m_globalConstantIndex(0)
- , m_globalData(&scopeChain.globalObject()->globalExec()->globalData())
+ , m_hasCreatedActivation(true)
+ , m_firstLazyFunction(0)
+ , m_lastLazyFunction(0)
+ , m_globalData(scopeChain->globalData)
, m_lastOpcodeID(op_end)
- , m_emitNodeDepth(0)
- , m_regeneratingForExceptionInfo(false)
- , m_codeBlockBeingRegeneratedFrom(0)
+#ifndef NDEBUG
+ , m_lastOpcodePosition(0)
+#endif
+ , m_stack(wtfThreadData().stack())
+ , m_usesExceptions(false)
+ , m_expressionTooDeep(false)
{
+ m_globalData->startedCompiling(m_codeBlock);
if (m_shouldEmitDebugHooks || m_baseScopeDepth)
m_codeBlock->setNeedsFullScopeChain(true);
emitOpcode(op_enter);
codeBlock->setGlobalData(m_globalData);
- m_codeBlock->m_numParameters = 1; // Allocate space for "this"
+ m_codeBlock->setNumParameters(1);
const DeclarationStacks::FunctionStack& functionStack = evalNode->functionStack();
for (size_t i = 0; i < functionStack.size(); ++i)
for (size_t i = 0; i < numVariables; ++i)
variables.append(*varStack[i].first);
codeBlock->adoptVariables(variables);
-
+ codeBlock->m_numCapturedVars = codeBlock->m_numVars;
preserveLastVar();
}
-RegisterID* BytecodeGenerator::addParameter(const Identifier& ident)
+BytecodeGenerator::~BytecodeGenerator()
+{
+ m_globalData->finishedCompiling(m_codeBlock);
+}
+
+RegisterID* BytecodeGenerator::emitInitLazyRegister(RegisterID* reg)
+{
+ emitOpcode(op_init_lazy_reg);
+ instructions().append(reg->index());
+ return reg;
+}
+
+void BytecodeGenerator::addParameter(const Identifier& ident, int parameterIndex)
{
// Parameters overwrite var declarations, but not function declarations.
- RegisterID* result = 0;
- UString::Rep* rep = ident.ustring().rep();
+ StringImpl* rep = ident.impl();
if (!m_functions.contains(rep)) {
- symbolTable().set(rep, m_nextParameterIndex);
- RegisterID& parameter = registerFor(m_nextParameterIndex);
- parameter.setIndex(m_nextParameterIndex);
- result = ¶meter;
+ symbolTable().set(rep, parameterIndex);
+ RegisterID& parameter = registerFor(parameterIndex);
+ parameter.setIndex(parameterIndex);
}
// To maintain the calling convention, we have to allocate unique space for
// each parameter, even if the parameter doesn't make it into the symbol table.
- ++m_nextParameterIndex;
- ++m_codeBlock->m_numParameters;
- return result;
+ m_codeBlock->addParameter();
}
RegisterID* BytecodeGenerator::registerFor(const Identifier& ident)
{
if (ident == propertyNames().thisIdentifier)
return &m_thisRegister;
+
+ if (m_codeType == GlobalCode)
+ return 0;
if (!shouldOptimizeLocals())
return 0;
- SymbolTableEntry entry = symbolTable().get(ident.ustring().rep());
+ SymbolTableEntry entry = symbolTable().get(ident.impl());
if (entry.isNull())
return 0;
if (ident == propertyNames().arguments)
createArgumentsIfNecessary();
- return ®isterFor(entry.getIndex());
+ return createLazyRegisterIfNecessary(®isterFor(entry.getIndex()));
+}
+
+RegisterID* BytecodeGenerator::constRegisterFor(const Identifier& ident)
+{
+ if (m_codeType == EvalCode)
+ return 0;
+
+ if (m_codeType == GlobalCode)
+ return 0;
+
+ SymbolTableEntry entry = symbolTable().get(ident.impl());
+ if (entry.isNull())
+ return 0;
+
+ return createLazyRegisterIfNecessary(®isterFor(entry.getIndex()));
}
bool BytecodeGenerator::willResolveToArguments(const Identifier& ident)
if (!shouldOptimizeLocals())
return false;
- SymbolTableEntry entry = symbolTable().get(ident.ustring().rep());
+ SymbolTableEntry entry = symbolTable().get(ident.impl());
if (entry.isNull())
return false;
{
ASSERT(willResolveToArguments(propertyNames().arguments));
- SymbolTableEntry entry = symbolTable().get(propertyNames().arguments.ustring().rep());
+ SymbolTableEntry entry = symbolTable().get(propertyNames().arguments.impl());
ASSERT(!entry.isNull());
return ®isterFor(entry.getIndex());
}
-RegisterID* BytecodeGenerator::constRegisterFor(const Identifier& ident)
+RegisterID* BytecodeGenerator::createLazyRegisterIfNecessary(RegisterID* reg)
{
- if (m_codeType == EvalCode)
- return 0;
-
- SymbolTableEntry entry = symbolTable().get(ident.ustring().rep());
- if (entry.isNull())
- return 0;
-
- return ®isterFor(entry.getIndex());
+ if (m_lastLazyFunction <= reg->index() || reg->index() < m_firstLazyFunction)
+ return reg;
+ emitLazyNewFunction(reg, m_lazyFunctions.get(reg->index()));
+ return reg;
}
bool BytecodeGenerator::isLocal(const Identifier& ident)
if (ident == propertyNames().thisIdentifier)
return true;
- return shouldOptimizeLocals() && symbolTable().contains(ident.ustring().rep());
+ return shouldOptimizeLocals() && symbolTable().contains(ident.impl());
}
bool BytecodeGenerator::isLocalConstant(const Identifier& ident)
{
- return symbolTable().get(ident.ustring().rep()).isReadOnly();
+ return symbolTable().get(ident.impl()).isReadOnly();
}
RegisterID* BytecodeGenerator::newRegister()
m_labels.removeLast();
// Allocate new label ID.
- m_labels.append(m_codeBlock);
+ m_labels.append(this);
return &m_labels.last();
}
void BytecodeGenerator::emitOpcode(OpcodeID opcodeID)
{
+#ifndef NDEBUG
+ size_t opcodePosition = instructions().size();
+ ASSERT(opcodePosition - m_lastOpcodePosition == opcodeLength(m_lastOpcodeID) || m_lastOpcodeID == op_end);
+ m_lastOpcodePosition = opcodePosition;
+#endif
instructions().append(globalData()->interpreter->getOpcode(opcodeID));
m_lastOpcodeID = opcodeID;
}
+ValueProfile* BytecodeGenerator::emitProfiledOpcode(OpcodeID opcodeID)
+{
+#if ENABLE(VALUE_PROFILER)
+ ValueProfile* result = m_codeBlock->addValueProfile(instructions().size());
+#else
+ ValueProfile* result = 0;
+#endif
+ emitOpcode(opcodeID);
+ return result;
+}
+
+void BytecodeGenerator::emitLoopHint()
+{
+#if ENABLE(DFG_JIT)
+ emitOpcode(op_loop_hint);
+#endif
+}
+
void BytecodeGenerator::retrieveLastBinaryOp(int& dstIndex, int& src1Index, int& src2Index)
{
ASSERT(instructions().size() >= 4);
{
ASSERT(instructions().size() >= 4);
instructions().shrink(instructions().size() - 4);
+ m_lastOpcodeID = op_end;
}
void ALWAYS_INLINE BytecodeGenerator::rewindUnaryOp()
{
ASSERT(instructions().size() >= 3);
instructions().shrink(instructions().size() - 3);
+ m_lastOpcodeID = op_end;
}
PassRefPtr<Label> BytecodeGenerator::emitJump(Label* target)
instructions().append(target->bind(begin, instructions().size()));
return target;
}
- } else if (m_lastOpcodeID == op_lesseq && !target->isForward()) {
+ } else if (m_lastOpcodeID == op_lesseq) {
int dstIndex;
int src1Index;
int src2Index;
rewindBinaryOp();
size_t begin = instructions().size();
- emitOpcode(op_loop_if_lesseq);
+ emitOpcode(target->isForward() ? op_jlesseq : op_loop_if_lesseq);
+ instructions().append(src1Index);
+ instructions().append(src2Index);
+ instructions().append(target->bind(begin, instructions().size()));
+ return target;
+ }
+ } else if (m_lastOpcodeID == op_greater) {
+ int dstIndex;
+ int src1Index;
+ int src2Index;
+
+ retrieveLastBinaryOp(dstIndex, src1Index, src2Index);
+
+ if (cond->index() == dstIndex && cond->isTemporary() && !cond->refCount()) {
+ rewindBinaryOp();
+
+ size_t begin = instructions().size();
+ emitOpcode(target->isForward() ? op_jgreater : op_loop_if_greater);
+ instructions().append(src1Index);
+ instructions().append(src2Index);
+ instructions().append(target->bind(begin, instructions().size()));
+ return target;
+ }
+ } else if (m_lastOpcodeID == op_greatereq) {
+ int dstIndex;
+ int src1Index;
+ int src2Index;
+
+ retrieveLastBinaryOp(dstIndex, src1Index, src2Index);
+
+ if (cond->index() == dstIndex && cond->isTemporary() && !cond->refCount()) {
+ rewindBinaryOp();
+
+ size_t begin = instructions().size();
+ emitOpcode(target->isForward() ? op_jgreatereq : op_loop_if_greatereq);
instructions().append(src1Index);
instructions().append(src2Index);
instructions().append(target->bind(begin, instructions().size()));
instructions().append(target->bind(begin, instructions().size()));
return target;
}
+ } else if (m_lastOpcodeID == op_greater && target->isForward()) {
+ int dstIndex;
+ int src1Index;
+ int src2Index;
+
+ retrieveLastBinaryOp(dstIndex, src1Index, src2Index);
+
+ if (cond->index() == dstIndex && cond->isTemporary() && !cond->refCount()) {
+ rewindBinaryOp();
+
+ size_t begin = instructions().size();
+ emitOpcode(op_jngreater);
+ instructions().append(src1Index);
+ instructions().append(src2Index);
+ instructions().append(target->bind(begin, instructions().size()));
+ return target;
+ }
+ } else if (m_lastOpcodeID == op_greatereq && target->isForward()) {
+ int dstIndex;
+ int src1Index;
+ int src2Index;
+
+ retrieveLastBinaryOp(dstIndex, src1Index, src2Index);
+
+ if (cond->index() == dstIndex && cond->isTemporary() && !cond->refCount()) {
+ rewindBinaryOp();
+
+ size_t begin = instructions().size();
+ emitOpcode(op_jngreatereq);
+ instructions().append(src1Index);
+ instructions().append(src2Index);
+ instructions().append(target->bind(begin, instructions().size()));
+ return target;
+ }
} else if (m_lastOpcodeID == op_not) {
int dstIndex;
int srcIndex;
emitOpcode(op_jneq_ptr);
instructions().append(cond->index());
- instructions().append(m_scopeChain->globalObject()->d()->callFunction);
+ instructions().append(Instruction(*m_globalData, m_codeBlock->ownerExecutable(), m_scopeChain->globalObject->callFunction()));
instructions().append(target->bind(begin, instructions().size()));
return target;
}
emitOpcode(op_jneq_ptr);
instructions().append(cond->index());
- instructions().append(m_scopeChain->globalObject()->d()->applyFunction);
+ instructions().append(Instruction(*m_globalData, m_codeBlock->ownerExecutable(), m_scopeChain->globalObject->applyFunction()));
instructions().append(target->bind(begin, instructions().size()));
return target;
}
unsigned BytecodeGenerator::addConstant(const Identifier& ident)
{
- UString::Rep* rep = ident.ustring().rep();
- pair<IdentifierMap::iterator, bool> result = m_identifierMap.add(rep, m_codeBlock->numberOfIdentifiers());
- if (result.second) // new entry
+ StringImpl* rep = ident.impl();
+ IdentifierMap::AddResult result = m_identifierMap.add(rep, m_codeBlock->numberOfIdentifiers());
+ if (result.isNewEntry)
m_codeBlock->addIdentifier(Identifier(m_globalData, rep));
- return result.first->second;
+ return result.iterator->second;
}
RegisterID* BytecodeGenerator::addConstantValue(JSValue v)
{
int index = m_nextConstantOffset;
- pair<JSValueMap::iterator, bool> result = m_jsValueMap.add(JSValue::encode(v), m_nextConstantOffset);
- if (result.second) {
+ JSValueMap::AddResult result = m_jsValueMap.add(JSValue::encode(v), m_nextConstantOffset);
+ if (result.isNewEntry) {
m_constantPoolRegisters.append(FirstConstantRegisterIndex + m_nextConstantOffset);
++m_nextConstantOffset;
- m_codeBlock->addConstantRegister(JSValue(v));
+ m_codeBlock->addConstant(JSValue(v));
} else
- index = result.first->second;
+ index = result.iterator->second;
return &m_constantPoolRegisters[index];
}
if (src1->index() == dstIndex
&& src1->isTemporary()
&& m_codeBlock->isConstantRegisterIndex(src2->index())
- && m_codeBlock->constantRegister(src2->index()).jsValue().isString()) {
- const UString& value = asString(m_codeBlock->constantRegister(src2->index()).jsValue())->tryGetValue();
+ && m_codeBlock->constantRegister(src2->index()).get().isString()) {
+ const UString& value = asString(m_codeBlock->constantRegister(src2->index()).get())->tryGetValue();
if (value == "undefined") {
rewindUnaryOp();
emitOpcode(op_is_undefined);
RegisterID* BytecodeGenerator::emitLoad(RegisterID* dst, double number)
{
- // FIXME: Our hash tables won't hold infinity, so we make a new JSNumberCell each time.
- // Later we can do the extra work to handle that like the other cases.
- if (number == HashTraits<double>::emptyValue() || HashTraits<double>::isDeletedValue(number))
- return emitLoad(dst, jsNumber(globalData(), number));
- JSValue& valueInMap = m_numberMap.add(number, JSValue()).first->second;
+ // FIXME: Our hash tables won't hold infinity, so we make a new JSValue each time.
+ // Later we can do the extra work to handle that like the other cases. They also don't
+ // work correctly with NaN as a key.
+ if (isnan(number) || number == HashTraits<double>::emptyValue() || HashTraits<double>::isDeletedValue(number))
+ return emitLoad(dst, jsNumber(number));
+ JSValue& valueInMap = m_numberMap.add(number, JSValue()).iterator->second;
if (!valueInMap)
- valueInMap = jsNumber(globalData(), number);
+ valueInMap = jsNumber(number);
return emitLoad(dst, valueInMap);
}
RegisterID* BytecodeGenerator::emitLoad(RegisterID* dst, const Identifier& identifier)
{
- JSString*& stringInMap = m_stringMap.add(identifier.ustring().rep(), 0).first->second;
+ JSString*& stringInMap = m_stringMap.add(identifier.impl(), 0).iterator->second;
if (!stringInMap)
stringInMap = jsOwnedString(globalData(), identifier.ustring());
return emitLoad(dst, JSValue(stringInMap));
return constantID;
}
-bool BytecodeGenerator::findScopedProperty(const Identifier& property, int& index, size_t& stackDepth, bool forWriting, JSObject*& globalObject)
+bool BytecodeGenerator::findScopedProperty(const Identifier& property, int& index, size_t& stackDepth, bool forWriting, bool& requiresDynamicChecks, JSObject*& globalObject)
{
// Cases where we cannot statically optimize the lookup.
if (property == propertyNames().arguments || !canOptimizeNonLocals()) {
if (shouldOptimizeLocals() && m_codeType == GlobalCode) {
ScopeChainIterator iter = m_scopeChain->begin();
- globalObject = *iter;
+ globalObject = iter->get();
ASSERT((++iter) == m_scopeChain->end());
}
return false;
}
size_t depth = 0;
-
+ requiresDynamicChecks = false;
ScopeChainIterator iter = m_scopeChain->begin();
ScopeChainIterator end = m_scopeChain->end();
for (; iter != end; ++iter, ++depth) {
- JSObject* currentScope = *iter;
+ JSObject* currentScope = iter->get();
if (!currentScope->isVariableObject())
break;
- JSVariableObject* currentVariableObject = static_cast<JSVariableObject*>(currentScope);
- SymbolTableEntry entry = currentVariableObject->symbolTable().get(property.ustring().rep());
+ JSVariableObject* currentVariableObject = jsCast<JSVariableObject*>(currentScope);
+ SymbolTableEntry entry = currentVariableObject->symbolTable().get(property.impl());
// Found the property
if (!entry.isNull()) {
globalObject = currentVariableObject;
return false;
}
- stackDepth = depth;
+ stackDepth = depth + m_codeBlock->needsFullScopeChain();
index = entry.getIndex();
if (++iter == end)
globalObject = currentVariableObject;
return true;
}
- if (currentVariableObject->isDynamicScope())
+ bool scopeRequiresDynamicChecks = false;
+ if (currentVariableObject->isDynamicScope(scopeRequiresDynamicChecks))
break;
+ requiresDynamicChecks |= scopeRequiresDynamicChecks;
}
-
// Can't locate the property but we're able to avoid a few lookups.
- stackDepth = depth;
+ stackDepth = depth + m_codeBlock->needsFullScopeChain();
index = missingSymbolMarker();
- JSObject* scope = *iter;
+ JSObject* scope = iter->get();
if (++iter == end)
globalObject = scope;
return true;
}
+void BytecodeGenerator::emitCheckHasInstance(RegisterID* base)
+{
+ emitOpcode(op_check_has_instance);
+ instructions().append(base->index());
+}
+
RegisterID* BytecodeGenerator::emitInstanceOf(RegisterID* dst, RegisterID* value, RegisterID* base, RegisterID* basePrototype)
{
emitOpcode(op_instanceof);
return dst;
}
+static const unsigned maxGlobalResolves = 128;
+
+bool BytecodeGenerator::shouldAvoidResolveGlobal()
+{
+ return m_codeBlock->globalResolveInfoCount() > maxGlobalResolves && !m_labelScopes.size();
+}
+
RegisterID* BytecodeGenerator::emitResolve(RegisterID* dst, const Identifier& property)
{
size_t depth = 0;
int index = 0;
JSObject* globalObject = 0;
- if (!findScopedProperty(property, index, depth, false, globalObject) && !globalObject) {
+ bool requiresDynamicChecks = false;
+ if (!findScopedProperty(property, index, depth, false, requiresDynamicChecks, globalObject) && !globalObject) {
// We can't optimise at all :-(
- emitOpcode(op_resolve);
+ ValueProfile* profile = emitProfiledOpcode(op_resolve);
instructions().append(dst->index());
instructions().append(addConstant(property));
+ instructions().append(profile);
return dst;
}
-
+ if (shouldAvoidResolveGlobal()) {
+ globalObject = 0;
+ requiresDynamicChecks = true;
+ }
+
if (globalObject) {
- bool forceGlobalResolve = false;
- if (m_regeneratingForExceptionInfo) {
-#if ENABLE(JIT)
- forceGlobalResolve = m_codeBlockBeingRegeneratedFrom->hasGlobalResolveInfoAtBytecodeOffset(instructions().size());
-#else
- forceGlobalResolve = m_codeBlockBeingRegeneratedFrom->hasGlobalResolveInstructionAtBytecodeOffset(instructions().size());
-#endif
- }
-
- if (index != missingSymbolMarker() && !forceGlobalResolve) {
+ if (index != missingSymbolMarker() && !requiresDynamicChecks) {
// Directly index the property lookup across multiple scopes.
return emitGetScopedVar(dst, depth, index, globalObject);
}
#if ENABLE(JIT)
m_codeBlock->addGlobalResolveInfo(instructions().size());
-#else
- m_codeBlock->addGlobalResolveInstruction(instructions().size());
#endif
- emitOpcode(op_resolve_global);
+ m_codeBlock->addGlobalResolveInstruction(instructions().size());
+ ValueProfile* profile = emitProfiledOpcode(requiresDynamicChecks ? op_resolve_global_dynamic : op_resolve_global);
instructions().append(dst->index());
- instructions().append(globalObject);
instructions().append(addConstant(property));
instructions().append(0);
instructions().append(0);
+ if (requiresDynamicChecks)
+ instructions().append(depth);
+ instructions().append(profile);
+ return dst;
+ }
+
+ if (requiresDynamicChecks) {
+ // If we get here we have eval nested inside a |with| just give up
+ ValueProfile* profile = emitProfiledOpcode(op_resolve);
+ instructions().append(dst->index());
+ instructions().append(addConstant(property));
+ instructions().append(profile);
return dst;
}
// In this case we are at least able to drop a few scope chains from the
// lookup chain, although we still need to hash from then on.
- emitOpcode(op_resolve_skip);
+ ValueProfile* profile = emitProfiledOpcode(op_resolve_skip);
instructions().append(dst->index());
instructions().append(addConstant(property));
instructions().append(depth);
+ instructions().append(profile);
return dst;
}
RegisterID* BytecodeGenerator::emitGetScopedVar(RegisterID* dst, size_t depth, int index, JSValue globalObject)
{
if (globalObject) {
- emitOpcode(op_get_global_var);
+ if (m_lastOpcodeID == op_put_global_var) {
+ int dstIndex;
+ int srcIndex;
+ retrieveLastUnaryOp(dstIndex, srcIndex);
+
+ if (dstIndex == index && srcIndex == dst->index())
+ return dst;
+ }
+
+ ValueProfile* profile = emitProfiledOpcode(op_get_global_var);
instructions().append(dst->index());
- instructions().append(asCell(globalObject));
instructions().append(index);
+ instructions().append(profile);
return dst;
}
- emitOpcode(op_get_scoped_var);
+ ValueProfile* profile = emitProfiledOpcode(op_get_scoped_var);
instructions().append(dst->index());
instructions().append(index);
instructions().append(depth);
+ instructions().append(profile);
return dst;
}
{
if (globalObject) {
emitOpcode(op_put_global_var);
- instructions().append(asCell(globalObject));
instructions().append(index);
instructions().append(value->index());
return value;
size_t depth = 0;
int index = 0;
JSObject* globalObject = 0;
- findScopedProperty(property, index, depth, false, globalObject);
- if (!globalObject) {
+ bool requiresDynamicChecks = false;
+ findScopedProperty(property, index, depth, false, requiresDynamicChecks, globalObject);
+ if (!globalObject || requiresDynamicChecks) {
// We can't optimise at all :-(
- emitOpcode(op_resolve_base);
+ ValueProfile* profile = emitProfiledOpcode(op_resolve_base);
instructions().append(dst->index());
instructions().append(addConstant(property));
+ instructions().append(false);
+ instructions().append(profile);
return dst;
}
return emitLoad(dst, JSValue(globalObject));
}
+RegisterID* BytecodeGenerator::emitResolveBaseForPut(RegisterID* dst, const Identifier& property)
+{
+ if (!m_codeBlock->isStrictMode())
+ return emitResolveBase(dst, property);
+ size_t depth = 0;
+ int index = 0;
+ JSObject* globalObject = 0;
+ bool requiresDynamicChecks = false;
+ findScopedProperty(property, index, depth, false, requiresDynamicChecks, globalObject);
+ if (!globalObject || requiresDynamicChecks) {
+ // We can't optimise at all :-(
+ ValueProfile* profile = emitProfiledOpcode(op_resolve_base);
+ instructions().append(dst->index());
+ instructions().append(addConstant(property));
+ instructions().append(true);
+ instructions().append(profile);
+ return dst;
+ }
+
+ // Global object is the base
+ RefPtr<RegisterID> result = emitLoad(dst, JSValue(globalObject));
+ emitOpcode(op_ensure_property_exists);
+ instructions().append(dst->index());
+ instructions().append(addConstant(property));
+ return result.get();
+}
+
RegisterID* BytecodeGenerator::emitResolveWithBase(RegisterID* baseDst, RegisterID* propDst, const Identifier& property)
{
size_t depth = 0;
int index = 0;
JSObject* globalObject = 0;
- if (!findScopedProperty(property, index, depth, false, globalObject) || !globalObject) {
+ bool requiresDynamicChecks = false;
+ if (!findScopedProperty(property, index, depth, false, requiresDynamicChecks, globalObject) || !globalObject || requiresDynamicChecks) {
// We can't optimise at all :-(
- emitOpcode(op_resolve_with_base);
+ ValueProfile* profile = emitProfiledOpcode(op_resolve_with_base);
instructions().append(baseDst->index());
instructions().append(propDst->index());
instructions().append(addConstant(property));
+ instructions().append(profile);
return baseDst;
}
bool forceGlobalResolve = false;
- if (m_regeneratingForExceptionInfo) {
+
+ // Global object is the base
+ emitLoad(baseDst, JSValue(globalObject));
+
+ if (index != missingSymbolMarker() && !forceGlobalResolve) {
+ // Directly index the property lookup across multiple scopes.
+ emitGetScopedVar(propDst, depth, index, globalObject);
+ return baseDst;
+ }
+ if (shouldAvoidResolveGlobal()) {
+ ValueProfile* profile = emitProfiledOpcode(op_resolve);
+ instructions().append(propDst->index());
+ instructions().append(addConstant(property));
+ instructions().append(profile);
+ return baseDst;
+ }
#if ENABLE(JIT)
- forceGlobalResolve = m_codeBlockBeingRegeneratedFrom->hasGlobalResolveInfoAtBytecodeOffset(instructions().size());
-#else
- forceGlobalResolve = m_codeBlockBeingRegeneratedFrom->hasGlobalResolveInstructionAtBytecodeOffset(instructions().size());
+ m_codeBlock->addGlobalResolveInfo(instructions().size());
#endif
+#if ENABLE(CLASSIC_INTERPRETER)
+ m_codeBlock->addGlobalResolveInstruction(instructions().size());
+#endif
+ ValueProfile* profile = emitProfiledOpcode(requiresDynamicChecks ? op_resolve_global_dynamic : op_resolve_global);
+ instructions().append(propDst->index());
+ instructions().append(addConstant(property));
+ instructions().append(0);
+ instructions().append(0);
+ if (requiresDynamicChecks)
+ instructions().append(depth);
+ instructions().append(profile);
+ return baseDst;
+}
+
+RegisterID* BytecodeGenerator::emitResolveWithThis(RegisterID* baseDst, RegisterID* propDst, const Identifier& property)
+{
+ size_t depth = 0;
+ int index = 0;
+ JSObject* globalObject = 0;
+ bool requiresDynamicChecks = false;
+ if (!findScopedProperty(property, index, depth, false, requiresDynamicChecks, globalObject) || !globalObject || requiresDynamicChecks) {
+ // We can't optimise at all :-(
+ ValueProfile* profile = emitProfiledOpcode(op_resolve_with_this);
+ instructions().append(baseDst->index());
+ instructions().append(propDst->index());
+ instructions().append(addConstant(property));
+ instructions().append(profile);
+ return baseDst;
}
+ bool forceGlobalResolve = false;
+
// Global object is the base
- emitLoad(baseDst, JSValue(globalObject));
+ emitLoad(baseDst, jsUndefined());
if (index != missingSymbolMarker() && !forceGlobalResolve) {
// Directly index the property lookup across multiple scopes.
emitGetScopedVar(propDst, depth, index, globalObject);
return baseDst;
}
-
+ if (shouldAvoidResolveGlobal()) {
+ ValueProfile* profile = emitProfiledOpcode(op_resolve);
+ instructions().append(propDst->index());
+ instructions().append(addConstant(property));
+ instructions().append(profile);
+ return baseDst;
+ }
#if ENABLE(JIT)
m_codeBlock->addGlobalResolveInfo(instructions().size());
-#else
+#endif
+#if ENABLE(CLASSIC_INTERPRETER)
m_codeBlock->addGlobalResolveInstruction(instructions().size());
#endif
- emitOpcode(op_resolve_global);
+ ValueProfile* profile = emitProfiledOpcode(requiresDynamicChecks ? op_resolve_global_dynamic : op_resolve_global);
instructions().append(propDst->index());
- instructions().append(globalObject);
instructions().append(addConstant(property));
instructions().append(0);
instructions().append(0);
+ if (requiresDynamicChecks)
+ instructions().append(depth);
+ instructions().append(profile);
return baseDst;
}
RegisterID* BytecodeGenerator::emitGetById(RegisterID* dst, RegisterID* base, const Identifier& property)
{
-#if ENABLE(JIT)
- m_codeBlock->addStructureStubInfo(StructureStubInfo(access_get_by_id));
-#else
m_codeBlock->addPropertyAccessInstruction(instructions().size());
-#endif
- emitOpcode(op_get_by_id);
+ ValueProfile* profile = emitProfiledOpcode(op_get_by_id);
instructions().append(dst->index());
instructions().append(base->index());
instructions().append(addConstant(property));
instructions().append(0);
instructions().append(0);
instructions().append(0);
+ instructions().append(profile);
+ return dst;
+}
+
+RegisterID* BytecodeGenerator::emitGetArgumentsLength(RegisterID* dst, RegisterID* base)
+{
+ emitOpcode(op_get_arguments_length);
+ instructions().append(dst->index());
+ ASSERT(base->index() == m_codeBlock->argumentsRegister());
+ instructions().append(base->index());
+ instructions().append(addConstant(propertyNames().length));
return dst;
}
RegisterID* BytecodeGenerator::emitPutById(RegisterID* base, const Identifier& property, RegisterID* value)
{
-#if ENABLE(JIT)
- m_codeBlock->addStructureStubInfo(StructureStubInfo(access_put_by_id));
-#else
m_codeBlock->addPropertyAccessInstruction(instructions().size());
-#endif
emitOpcode(op_put_by_id);
instructions().append(base->index());
instructions().append(0);
instructions().append(0);
instructions().append(0);
+ instructions().append(0);
return value;
}
-RegisterID* BytecodeGenerator::emitPutGetter(RegisterID* base, const Identifier& property, RegisterID* value)
+RegisterID* BytecodeGenerator::emitDirectPutById(RegisterID* base, const Identifier& property, RegisterID* value)
{
- emitOpcode(op_put_getter);
+ m_codeBlock->addPropertyAccessInstruction(instructions().size());
+
+ emitOpcode(op_put_by_id);
instructions().append(base->index());
instructions().append(addConstant(property));
instructions().append(value->index());
+ instructions().append(0);
+ instructions().append(0);
+ instructions().append(0);
+ instructions().append(0);
+ instructions().append(property != m_globalData->propertyNames->underscoreProto);
return value;
}
-RegisterID* BytecodeGenerator::emitPutSetter(RegisterID* base, const Identifier& property, RegisterID* value)
+void BytecodeGenerator::emitPutGetterSetter(RegisterID* base, const Identifier& property, RegisterID* getter, RegisterID* setter)
{
- emitOpcode(op_put_setter);
+ emitOpcode(op_put_getter_setter);
instructions().append(base->index());
instructions().append(addConstant(property));
- instructions().append(value->index());
- return value;
+ instructions().append(getter->index());
+ instructions().append(setter->index());
}
RegisterID* BytecodeGenerator::emitDeleteById(RegisterID* dst, RegisterID* base, const Identifier& property)
return dst;
}
+RegisterID* BytecodeGenerator::emitGetArgumentByVal(RegisterID* dst, RegisterID* base, RegisterID* property)
+{
+ ValueProfile* profile = emitProfiledOpcode(op_get_argument_by_val);
+ instructions().append(dst->index());
+ ASSERT(base->index() == m_codeBlock->argumentsRegister());
+ instructions().append(base->index());
+ instructions().append(property->index());
+ instructions().append(profile);
+ return dst;
+}
+
RegisterID* BytecodeGenerator::emitGetByVal(RegisterID* dst, RegisterID* base, RegisterID* property)
{
for (size_t i = m_forInContextStack.size(); i > 0; i--) {
return dst;
}
}
- emitOpcode(op_get_by_val);
+ ValueProfile* profile = emitProfiledOpcode(op_get_by_val);
instructions().append(dst->index());
instructions().append(base->index());
instructions().append(property->index());
+ instructions().append(profile);
return dst;
}
return dst;
}
-RegisterID* BytecodeGenerator::emitNewArray(RegisterID* dst, ElementNode* elements)
+unsigned BytecodeGenerator::addConstantBuffer(unsigned length)
+{
+ return m_codeBlock->addConstantBuffer(length);
+}
+
+JSString* BytecodeGenerator::addStringConstant(const Identifier& identifier)
{
+ JSString*& stringInMap = m_stringMap.add(identifier.impl(), 0).iterator->second;
+ if (!stringInMap) {
+ stringInMap = jsString(globalData(), identifier.ustring());
+ addConstantValue(stringInMap);
+ }
+ return stringInMap;
+}
+
+RegisterID* BytecodeGenerator::emitNewArray(RegisterID* dst, ElementNode* elements, unsigned length)
+{
+#if !ASSERT_DISABLED
+ unsigned checkLength = 0;
+#endif
+ bool hadVariableExpression = false;
+ if (length) {
+ for (ElementNode* n = elements; n; n = n->next()) {
+ if (!n->value()->isNumber() && !n->value()->isString()) {
+ hadVariableExpression = true;
+ break;
+ }
+ if (n->elision())
+ break;
+#if !ASSERT_DISABLED
+ checkLength++;
+#endif
+ }
+ if (!hadVariableExpression) {
+ ASSERT(length == checkLength);
+ unsigned constantBufferIndex = addConstantBuffer(length);
+ JSValue* constantBuffer = m_codeBlock->constantBuffer(constantBufferIndex);
+ unsigned index = 0;
+ for (ElementNode* n = elements; index < length; n = n->next()) {
+ if (n->value()->isNumber())
+ constantBuffer[index++] = jsNumber(static_cast<NumberNode*>(n->value())->value());
+ else {
+ ASSERT(n->value()->isString());
+ constantBuffer[index++] = addStringConstant(static_cast<StringNode*>(n->value())->value());
+ }
+ }
+ emitOpcode(op_new_array_buffer);
+ instructions().append(dst->index());
+ instructions().append(constantBufferIndex);
+ instructions().append(length);
+ return dst;
+ }
+ }
+
Vector<RefPtr<RegisterID>, 16> argv;
for (ElementNode* n = elements; n; n = n->next()) {
if (n->elision())
RegisterID* BytecodeGenerator::emitNewFunction(RegisterID* dst, FunctionBodyNode* function)
{
- unsigned index = m_codeBlock->addFunctionDecl(makeFunction(m_globalData, function));
+ return emitNewFunctionInternal(dst, m_codeBlock->addFunctionDecl(makeFunction(m_globalData, function)), false);
+}
+
+RegisterID* BytecodeGenerator::emitLazyNewFunction(RegisterID* dst, FunctionBodyNode* function)
+{
+ FunctionOffsetMap::AddResult ptr = m_functionOffsets.add(function, 0);
+ if (ptr.isNewEntry)
+ ptr.iterator->second = m_codeBlock->addFunctionDecl(makeFunction(m_globalData, function));
+ return emitNewFunctionInternal(dst, ptr.iterator->second, true);
+}
+RegisterID* BytecodeGenerator::emitNewFunctionInternal(RegisterID* dst, unsigned index, bool doNullCheck)
+{
+ createActivationIfNecessary();
emitOpcode(op_new_func);
instructions().append(dst->index());
instructions().append(index);
+ instructions().append(doNullCheck);
return dst;
}
return dst;
}
-
RegisterID* BytecodeGenerator::emitNewFunctionExpression(RegisterID* r0, FuncExprNode* n)
{
FunctionBodyNode* function = n->body();
unsigned index = m_codeBlock->addFunctionExpr(makeFunction(m_globalData, function));
-
+
+ createActivationIfNecessary();
emitOpcode(op_new_func_exp);
instructions().append(r0->index());
instructions().append(index);
return r0;
}
-RegisterID* BytecodeGenerator::emitCall(RegisterID* dst, RegisterID* func, RegisterID* thisRegister, ArgumentsNode* argumentsNode, unsigned divot, unsigned startOffset, unsigned endOffset)
+RegisterID* BytecodeGenerator::emitCall(RegisterID* dst, RegisterID* func, CallArguments& callArguments, unsigned divot, unsigned startOffset, unsigned endOffset)
{
- return emitCall(op_call, dst, func, thisRegister, argumentsNode, divot, startOffset, endOffset);
+ return emitCall(op_call, dst, func, callArguments, divot, startOffset, endOffset);
}
void BytecodeGenerator::createArgumentsIfNecessary()
{
- if (m_codeBlock->usesArguments() && m_codeType == FunctionCode)
- emitOpcode(op_create_arguments);
+ if (m_codeType != FunctionCode)
+ return;
+
+ if (!m_codeBlock->usesArguments())
+ return;
+
+ // If we're in strict mode we tear off the arguments on function
+ // entry, so there's no need to check if we need to create them
+ // now
+ if (m_codeBlock->isStrictMode())
+ return;
+
+ emitOpcode(op_create_arguments);
+ instructions().append(m_codeBlock->argumentsRegister());
+}
+
+void BytecodeGenerator::createActivationIfNecessary()
+{
+ if (m_hasCreatedActivation)
+ return;
+ if (!m_codeBlock->needsFullScopeChain())
+ return;
+ emitOpcode(op_create_activation);
+ instructions().append(m_activationRegister->index());
}
-RegisterID* BytecodeGenerator::emitCallEval(RegisterID* dst, RegisterID* func, RegisterID* thisRegister, ArgumentsNode* argumentsNode, unsigned divot, unsigned startOffset, unsigned endOffset)
+RegisterID* BytecodeGenerator::emitCallEval(RegisterID* dst, RegisterID* func, CallArguments& callArguments, unsigned divot, unsigned startOffset, unsigned endOffset)
{
- createArgumentsIfNecessary();
- return emitCall(op_call_eval, dst, func, thisRegister, argumentsNode, divot, startOffset, endOffset);
+ return emitCall(op_call_eval, dst, func, callArguments, divot, startOffset, endOffset);
}
-RegisterID* BytecodeGenerator::emitCall(OpcodeID opcodeID, RegisterID* dst, RegisterID* func, RegisterID* thisRegister, ArgumentsNode* argumentsNode, unsigned divot, unsigned startOffset, unsigned endOffset)
+RegisterID* BytecodeGenerator::emitCall(OpcodeID opcodeID, RegisterID* dst, RegisterID* func, CallArguments& callArguments, unsigned divot, unsigned startOffset, unsigned endOffset)
{
ASSERT(opcodeID == op_call || opcodeID == op_call_eval);
ASSERT(func->refCount());
- ASSERT(thisRegister->refCount());
- RegisterID* originalFunc = func;
- if (m_shouldEmitProfileHooks) {
- // If codegen decided to recycle func as this call's destination register,
- // we need to undo that optimization here so that func will still be around
- // for the sake of op_profile_did_call.
- if (dst == func) {
- RefPtr<RegisterID> movedThisRegister = emitMove(newTemporary(), thisRegister);
- RefPtr<RegisterID> movedFunc = emitMove(thisRegister, func);
-
- thisRegister = movedThisRegister.release().releaseRef();
- func = movedFunc.release().releaseRef();
- }
- }
+ if (m_shouldEmitProfileHooks)
+ emitMove(callArguments.profileHookRegister(), func);
// Generate code for arguments.
- Vector<RefPtr<RegisterID>, 16> argv;
- argv.append(thisRegister);
- for (ArgumentListNode* n = argumentsNode->m_listNode; n; n = n->m_next) {
- argv.append(newTemporary());
- // op_call requires the arguments to be a sequential range of registers
- ASSERT(argv[argv.size() - 1]->index() == argv[argv.size() - 2]->index() + 1);
- emitNode(argv.last().get(), n);
- }
+ unsigned argument = 0;
+ for (ArgumentListNode* n = callArguments.argumentsNode()->m_listNode; n; n = n->m_next)
+ emitNode(callArguments.argumentRegister(argument++), n);
// Reserve space for call frame.
Vector<RefPtr<RegisterID>, RegisterFile::CallFrameHeaderSize> callFrame;
if (m_shouldEmitProfileHooks) {
emitOpcode(op_profile_will_call);
- instructions().append(func->index());
-
-#if ENABLE(JIT)
- m_codeBlock->addFunctionRegisterInfo(instructions().size(), func->index());
-#endif
+ instructions().append(callArguments.profileHookRegister()->index());
}
emitExpressionInfo(divot, startOffset, endOffset);
-#if ENABLE(JIT)
- m_codeBlock->addCallLinkInfo();
-#endif
-
// Emit call.
emitOpcode(opcodeID);
- instructions().append(dst->index()); // dst
instructions().append(func->index()); // func
- instructions().append(argv.size()); // argCount
- instructions().append(argv[0]->index() + argv.size() + RegisterFile::CallFrameHeaderSize); // registerOffset
+ instructions().append(callArguments.argumentCountIncludingThis()); // argCount
+ instructions().append(callArguments.registerOffset()); // registerOffset
+#if ENABLE(LLINT)
+ instructions().append(m_codeBlock->addLLIntCallLinkInfo());
+#else
+ instructions().append(0);
+#endif
+ instructions().append(0);
+ if (dst != ignoredResult()) {
+ ValueProfile* profile = emitProfiledOpcode(op_call_put_result);
+ instructions().append(dst->index()); // dst
+ instructions().append(profile);
+ }
if (m_shouldEmitProfileHooks) {
emitOpcode(op_profile_did_call);
- instructions().append(func->index());
-
- if (dst == originalFunc) {
- thisRegister->deref();
- func->deref();
- }
+ instructions().append(callArguments.profileHookRegister()->index());
}
return dst;
}
-RegisterID* BytecodeGenerator::emitLoadVarargs(RegisterID* argCountDst, RegisterID* arguments)
-{
- ASSERT(argCountDst->index() < arguments->index());
- emitOpcode(op_load_varargs);
- instructions().append(argCountDst->index());
- instructions().append(arguments->index());
- return argCountDst;
-}
-
-RegisterID* BytecodeGenerator::emitCallVarargs(RegisterID* dst, RegisterID* func, RegisterID* thisRegister, RegisterID* argCountRegister, unsigned divot, unsigned startOffset, unsigned endOffset)
+RegisterID* BytecodeGenerator::emitCallVarargs(RegisterID* dst, RegisterID* func, RegisterID* thisRegister, RegisterID* arguments, RegisterID* firstFreeRegister, RegisterID* profileHookRegister, unsigned divot, unsigned startOffset, unsigned endOffset)
{
- ASSERT(func->refCount());
- ASSERT(thisRegister->refCount());
- ASSERT(dst != func);
if (m_shouldEmitProfileHooks) {
+ emitMove(profileHookRegister, func);
emitOpcode(op_profile_will_call);
- instructions().append(func->index());
-
-#if ENABLE(JIT)
- m_codeBlock->addFunctionRegisterInfo(instructions().size(), func->index());
-#endif
+ instructions().append(profileHookRegister->index());
}
emitExpressionInfo(divot, startOffset, endOffset);
-
+
// Emit call.
emitOpcode(op_call_varargs);
- instructions().append(dst->index()); // dst
- instructions().append(func->index()); // func
- instructions().append(argCountRegister->index()); // arg count
- instructions().append(thisRegister->index() + RegisterFile::CallFrameHeaderSize); // initial registerOffset
+ instructions().append(func->index());
+ instructions().append(thisRegister->index());
+ instructions().append(arguments->index());
+ instructions().append(firstFreeRegister->index());
+ if (dst != ignoredResult()) {
+ ValueProfile* profile = emitProfiledOpcode(op_call_put_result);
+ instructions().append(dst->index());
+ instructions().append(profile);
+ }
if (m_shouldEmitProfileHooks) {
emitOpcode(op_profile_did_call);
- instructions().append(func->index());
+ instructions().append(profileHookRegister->index());
}
return dst;
}
{
if (m_codeBlock->needsFullScopeChain()) {
emitOpcode(op_tear_off_activation);
- instructions().append(m_activationRegisterIndex);
- } else if (m_codeBlock->usesArguments() && m_codeBlock->m_numParameters > 1)
+ instructions().append(m_activationRegister->index());
+ instructions().append(m_codeBlock->argumentsRegister());
+ } else if (m_codeBlock->usesArguments() && m_codeBlock->numParameters() != 1 && !m_codeBlock->isStrictMode()) {
emitOpcode(op_tear_off_arguments);
+ instructions().append(m_codeBlock->argumentsRegister());
+ }
+ // Constructors use op_ret_object_or_this to check the result is an
+ // object, unless we can trivially determine the check is not
+ // necessary (currently, if the return value is 'this').
+ if (isConstructor() && (src->index() != m_thisRegister.index())) {
+ emitOpcode(op_ret_object_or_this);
+ instructions().append(src->index());
+ instructions().append(m_thisRegister.index());
+ return src;
+ }
return emitUnaryNoDstOp(op_ret, src);
}
return src;
}
-RegisterID* BytecodeGenerator::emitConstruct(RegisterID* dst, RegisterID* func, ArgumentsNode* argumentsNode, unsigned divot, unsigned startOffset, unsigned endOffset)
+RegisterID* BytecodeGenerator::emitConstruct(RegisterID* dst, RegisterID* func, CallArguments& callArguments, unsigned divot, unsigned startOffset, unsigned endOffset)
{
ASSERT(func->refCount());
- RegisterID* originalFunc = func;
- if (m_shouldEmitProfileHooks) {
- // If codegen decided to recycle func as this call's destination register,
- // we need to undo that optimization here so that func will still be around
- // for the sake of op_profile_did_call.
- if (dst == func) {
- RefPtr<RegisterID> movedFunc = emitMove(newTemporary(), func);
- func = movedFunc.release().releaseRef();
- }
- }
-
- RefPtr<RegisterID> funcProto = newTemporary();
+ if (m_shouldEmitProfileHooks)
+ emitMove(callArguments.profileHookRegister(), func);
// Generate code for arguments.
- Vector<RefPtr<RegisterID>, 16> argv;
- argv.append(newTemporary()); // reserve space for "this"
- for (ArgumentListNode* n = argumentsNode ? argumentsNode->m_listNode : 0; n; n = n->m_next) {
- argv.append(newTemporary());
- // op_construct requires the arguments to be a sequential range of registers
- ASSERT(argv[argv.size() - 1]->index() == argv[argv.size() - 2]->index() + 1);
- emitNode(argv.last().get(), n);
+ unsigned argument = 0;
+ if (ArgumentsNode* argumentsNode = callArguments.argumentsNode()) {
+ for (ArgumentListNode* n = argumentsNode->m_listNode; n; n = n->m_next)
+ emitNode(callArguments.argumentRegister(argument++), n);
}
if (m_shouldEmitProfileHooks) {
emitOpcode(op_profile_will_call);
- instructions().append(func->index());
+ instructions().append(callArguments.profileHookRegister()->index());
}
- // Load prototype.
- emitExpressionInfo(divot, startOffset, endOffset);
- emitGetByIdExceptionInfo(op_construct);
- emitGetById(funcProto.get(), func, globalData()->propertyNames->prototype);
-
// Reserve space for call frame.
Vector<RefPtr<RegisterID>, RegisterFile::CallFrameHeaderSize> callFrame;
for (int i = 0; i < RegisterFile::CallFrameHeaderSize; ++i)
emitExpressionInfo(divot, startOffset, endOffset);
-#if ENABLE(JIT)
- m_codeBlock->addCallLinkInfo();
-#endif
-
emitOpcode(op_construct);
- instructions().append(dst->index()); // dst
instructions().append(func->index()); // func
- instructions().append(argv.size()); // argCount
- instructions().append(argv[0]->index() + argv.size() + RegisterFile::CallFrameHeaderSize); // registerOffset
- instructions().append(funcProto->index()); // proto
- instructions().append(argv[0]->index()); // thisRegister
-
- emitOpcode(op_construct_verify);
- instructions().append(dst->index());
- instructions().append(argv[0]->index());
+ instructions().append(callArguments.argumentCountIncludingThis()); // argCount
+ instructions().append(callArguments.registerOffset()); // registerOffset
+#if ENABLE(LLINT)
+ instructions().append(m_codeBlock->addLLIntCallLinkInfo());
+#else
+ instructions().append(0);
+#endif
+ instructions().append(0);
+ if (dst != ignoredResult()) {
+ ValueProfile* profile = emitProfiledOpcode(op_call_put_result);
+ instructions().append(dst->index()); // dst
+ instructions().append(profile);
+ }
if (m_shouldEmitProfileHooks) {
emitOpcode(op_profile_did_call);
- instructions().append(func->index());
-
- if (dst == originalFunc)
- func->deref();
+ instructions().append(callArguments.profileHookRegister()->index());
}
return dst;
context.isFinallyBlock = false;
m_scopeContextStack.append(context);
m_dynamicScopeDepth++;
- createArgumentsIfNecessary();
return emitUnaryNoDstOp(op_push_scope, scope);
}
void BytecodeGenerator::emitDebugHook(DebugHookID debugHookID, int firstLine, int lastLine)
{
+#if ENABLE(DEBUG_WITH_BREAKPOINT)
+ if (debugHookID != DidReachBreakpoint)
+ return;
+#else
if (!m_shouldEmitDebugHooks)
return;
+#endif
emitOpcode(op_debug);
instructions().append(debugHookID);
instructions().append(firstLine);
instructions().append(lastLine);
}
-void BytecodeGenerator::pushFinallyContext(Label* target, RegisterID* retAddrDst)
+void BytecodeGenerator::pushFinallyContext(StatementNode* finallyBlock)
{
ControlFlowContext scope;
scope.isFinallyBlock = true;
- FinallyContext context = { target, retAddrDst };
+ FinallyContext context = {
+ finallyBlock,
+ m_scopeContextStack.size(),
+ m_switchContextStack.size(),
+ m_forInContextStack.size(),
+ m_labelScopes.size(),
+ m_finallyDepth,
+ m_dynamicScopeDepth
+ };
scope.finallyContext = context;
m_scopeContextStack.append(scope);
m_finallyDepth++;
instructions().append(nextInsn->bind(begin, instructions().size()));
emitLabel(nextInsn.get());
}
-
+
+ Vector<ControlFlowContext> savedScopeContextStack;
+ Vector<SwitchInfo> savedSwitchContextStack;
+ Vector<ForInContext> savedForInContextStack;
+ SegmentedVector<LabelScope, 8> savedLabelScopes;
while (topScope > bottomScope && topScope->isFinallyBlock) {
- emitJumpSubroutine(topScope->finallyContext.retAddrDst, topScope->finallyContext.finallyAddr);
+ // Save the current state of the world while instating the state of the world
+ // for the finally block.
+ FinallyContext finallyContext = topScope->finallyContext;
+ bool flipScopes = finallyContext.scopeContextStackSize != m_scopeContextStack.size();
+ bool flipSwitches = finallyContext.switchContextStackSize != m_switchContextStack.size();
+ bool flipForIns = finallyContext.forInContextStackSize != m_forInContextStack.size();
+ bool flipLabelScopes = finallyContext.labelScopesSize != m_labelScopes.size();
+ int topScopeIndex = -1;
+ int bottomScopeIndex = -1;
+ if (flipScopes) {
+ topScopeIndex = topScope - m_scopeContextStack.begin();
+ bottomScopeIndex = bottomScope - m_scopeContextStack.begin();
+ savedScopeContextStack = m_scopeContextStack;
+ m_scopeContextStack.shrink(finallyContext.scopeContextStackSize);
+ }
+ if (flipSwitches) {
+ savedSwitchContextStack = m_switchContextStack;
+ m_switchContextStack.shrink(finallyContext.switchContextStackSize);
+ }
+ if (flipForIns) {
+ savedForInContextStack = m_forInContextStack;
+ m_forInContextStack.shrink(finallyContext.forInContextStackSize);
+ }
+ if (flipLabelScopes) {
+ savedLabelScopes = m_labelScopes;
+ while (m_labelScopes.size() > finallyContext.labelScopesSize)
+ m_labelScopes.removeLast();
+ }
+ int savedFinallyDepth = m_finallyDepth;
+ m_finallyDepth = finallyContext.finallyDepth;
+ int savedDynamicScopeDepth = m_dynamicScopeDepth;
+ m_dynamicScopeDepth = finallyContext.dynamicScopeDepth;
+
+ // Emit the finally block.
+ emitNode(finallyContext.finallyBlock);
+
+ // Restore the state of the world.
+ if (flipScopes) {
+ m_scopeContextStack = savedScopeContextStack;
+ topScope = &m_scopeContextStack[topScopeIndex]; // assert it's within bounds
+ bottomScope = m_scopeContextStack.begin() + bottomScopeIndex; // don't assert, since it the index might be -1.
+ }
+ if (flipSwitches)
+ m_switchContextStack = savedSwitchContextStack;
+ if (flipForIns)
+ m_forInContextStack = savedForInContextStack;
+ if (flipLabelScopes)
+ m_labelScopes = savedLabelScopes;
+ m_finallyDepth = savedFinallyDepth;
+ m_dynamicScopeDepth = savedDynamicScopeDepth;
+
--topScope;
}
}
RegisterID* BytecodeGenerator::emitCatch(RegisterID* targetRegister, Label* start, Label* end)
{
+ m_usesExceptions = true;
#if ENABLE(JIT)
+#if ENABLE(LLINT)
+ HandlerInfo info = { start->bind(0, 0), end->bind(0, 0), instructions().size(), m_dynamicScopeDepth + m_baseScopeDepth, CodeLocationLabel(MacroAssemblerCodePtr::createFromExecutableAddress(bitwise_cast<void*>(&llint_op_catch))) };
+#else
HandlerInfo info = { start->bind(0, 0), end->bind(0, 0), instructions().size(), m_dynamicScopeDepth + m_baseScopeDepth, CodeLocationLabel() };
+#endif
#else
HandlerInfo info = { start->bind(0, 0), end->bind(0, 0), instructions().size(), m_dynamicScopeDepth + m_baseScopeDepth };
#endif
return targetRegister;
}
-RegisterID* BytecodeGenerator::emitNewError(RegisterID* dst, ErrorType type, JSValue message)
+void BytecodeGenerator::emitThrowReferenceError(const UString& message)
{
- emitOpcode(op_new_error);
- instructions().append(dst->index());
- instructions().append(static_cast<int>(type));
- instructions().append(addConstantValue(message)->index());
- return dst;
-}
-
-PassRefPtr<Label> BytecodeGenerator::emitJumpSubroutine(RegisterID* retAddrDst, Label* finally)
-{
- size_t begin = instructions().size();
-
- emitOpcode(op_jsr);
- instructions().append(retAddrDst->index());
- instructions().append(finally->bind(begin, instructions().size()));
- emitLabel(newLabel().get()); // Record the fact that the next instruction is implicitly labeled, because op_sret will return to it.
- return finally;
-}
-
-void BytecodeGenerator::emitSubroutineReturn(RegisterID* retAddrSrc)
-{
- emitOpcode(op_sret);
- instructions().append(retAddrSrc->index());
+ emitOpcode(op_throw_reference_error);
+ instructions().append(addConstantValue(jsString(globalData(), message))->index());
}
void BytecodeGenerator::emitPushNewScope(RegisterID* dst, const Identifier& property, RegisterID* value)
context.isFinallyBlock = false;
m_scopeContextStack.append(context);
m_dynamicScopeDepth++;
-
- createArgumentsIfNecessary();
emitOpcode(op_push_new_scope);
instructions().append(dst->index());
{
UNUSED_PARAM(max);
ASSERT(node->isString());
- UString::Rep* clause = static_cast<StringNode*>(node)->value().ustring().rep();
- ASSERT(clause->size() == 1);
+ StringImpl* clause = static_cast<StringNode*>(node)->value().impl();
+ ASSERT(clause->length() == 1);
- int32_t key = clause->data()[0];
+ int32_t key = (*clause)[0];
ASSERT(key >= min);
ASSERT(key <= max);
return key - min;
ASSERT(!labels[i]->isForward());
ASSERT(nodes[i]->isString());
- UString::Rep* clause = static_cast<StringNode*>(nodes[i])->value().ustring().rep();
+ StringImpl* clause = static_cast<StringNode*>(nodes[i])->value().impl();
OffsetLocation location;
location.branchOffset = labels[i]->bind(switchAddress, switchAddress + 3);
jumpTable.offsetTable.add(clause, location);
// And we could make the caller pass the node pointer in, if there was some way of getting
// that from an arbitrary node. However, calling emitExpressionInfo without any useful data
// is still good enough to get us an accurate line number.
- emitExpressionInfo(0, 0, 0);
- RegisterID* exception = emitNewError(newTemporary(), SyntaxError, jsString(globalData(), "Expression too deep"));
- emitThrow(exception);
- return exception;
+ m_expressionTooDeep = true;
+ return newTemporary();
+}
+
+void BytecodeGenerator::setIsNumericCompareFunction(bool isNumericCompareFunction)
+{
+ m_codeBlock->setIsNumericCompareFunction(isNumericCompareFunction);
+}
+
+bool BytecodeGenerator::isArgumentNumber(const Identifier& ident, int argumentNumber)
+{
+ RegisterID* registerID = registerFor(ident);
+ if (!registerID || registerID->index() >= 0)
+ return 0;
+ return registerID->index() == CallFrame::argumentOffset(argumentNumber);
}
} // namespace JSC