-/*
- * Copyright (C) 2011, 2012 Apple Inc. All rights reserved.
+ /*
+ * Copyright (C) 2011, 2012, 2013, 2014 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
#if ENABLE(DFG_JIT)
+#include "ArrayConstructor.h"
#include "CallLinkStatus.h"
#include "CodeBlock.h"
-#include "DFGByteCodeCache.h"
+#include "CodeBlockWithJITType.h"
+#include "DFGArrayMode.h"
#include "DFGCapabilities.h"
+#include "DFGJITCode.h"
#include "GetByIdStatus.h"
-#include "MethodCallLinkStatus.h"
+#include "Heap.h"
+#include "JSActivation.h"
+#include "JSCInlines.h"
+#include "PreciseJumpTargets.h"
#include "PutByIdStatus.h"
+#include "StackAlignment.h"
+#include "StringConstructor.h"
+#include <wtf/CommaPrinter.h>
#include <wtf/HashMap.h>
#include <wtf/MathExtras.h>
+#include <wtf/StdLibExtras.h>
+
+namespace JSC { namespace DFG {
+
+class ConstantBufferKey {
+public:
+ ConstantBufferKey()
+ : m_codeBlock(0)
+ , m_index(0)
+ {
+ }
+
+ ConstantBufferKey(WTF::HashTableDeletedValueType)
+ : m_codeBlock(0)
+ , m_index(1)
+ {
+ }
+
+ ConstantBufferKey(CodeBlock* codeBlock, unsigned index)
+ : m_codeBlock(codeBlock)
+ , m_index(index)
+ {
+ }
+
+ bool operator==(const ConstantBufferKey& other) const
+ {
+ return m_codeBlock == other.m_codeBlock
+ && m_index == other.m_index;
+ }
+
+ unsigned hash() const
+ {
+ return WTF::PtrHash<CodeBlock*>::hash(m_codeBlock) ^ m_index;
+ }
+
+ bool isHashTableDeletedValue() const
+ {
+ return !m_codeBlock && m_index;
+ }
+
+ CodeBlock* codeBlock() const { return m_codeBlock; }
+ unsigned index() const { return m_index; }
+
+private:
+ CodeBlock* m_codeBlock;
+ unsigned m_index;
+};
+
+struct ConstantBufferKeyHash {
+ static unsigned hash(const ConstantBufferKey& key) { return key.hash(); }
+ static bool equal(const ConstantBufferKey& a, const ConstantBufferKey& b)
+ {
+ return a == b;
+ }
+
+ static const bool safeToCompareToEmptyOrDeleted = true;
+};
+
+} } // namespace JSC::DFG
+
+namespace WTF {
+
+template<typename T> struct DefaultHash;
+template<> struct DefaultHash<JSC::DFG::ConstantBufferKey> {
+ typedef JSC::DFG::ConstantBufferKeyHash Hash;
+};
+
+template<typename T> struct HashTraits;
+template<> struct HashTraits<JSC::DFG::ConstantBufferKey> : SimpleClassHashTraits<JSC::DFG::ConstantBufferKey> { };
+
+} // namespace WTF
namespace JSC { namespace DFG {
class ByteCodeParser {
public:
ByteCodeParser(Graph& graph)
- : m_globalData(&graph.m_globalData)
+ : m_vm(&graph.m_vm)
, m_codeBlock(graph.m_codeBlock)
, m_profiledBlock(graph.m_profiledBlock)
, m_graph(graph)
, m_currentBlock(0)
, m_currentIndex(0)
- , m_currentProfilingIndex(0)
, m_constantUndefined(UINT_MAX)
, m_constantNull(UINT_MAX)
, m_constantNaN(UINT_MAX)
, m_constants(m_codeBlock->numberOfConstantRegisters())
, m_numArguments(m_codeBlock->numParameters())
, m_numLocals(m_codeBlock->m_numCalleeRegisters)
- , m_preservedVars(m_codeBlock->m_numVars)
, m_parameterSlots(0)
, m_numPassedVarArgs(0)
- , m_globalResolveNumber(0)
, m_inlineStackTop(0)
, m_haveBuiltOperandMaps(false)
, m_emptyJSValueIndex(UINT_MAX)
+ , m_currentInstruction(0)
{
ASSERT(m_profiledBlock);
-
- for (int i = 0; i < m_codeBlock->m_numVars; ++i)
- m_preservedVars.set(i);
}
// Parse a full CodeBlock of bytecode.
bool parse();
private:
+ struct InlineStackEntry;
+
// Just parse from m_currentIndex to the end of the current CodeBlock.
void parseCodeBlock();
+
+ void ensureLocals(unsigned newNumLocals)
+ {
+ if (newNumLocals <= m_numLocals)
+ return;
+ m_numLocals = newNumLocals;
+ for (size_t i = 0; i < m_graph.numBlocks(); ++i)
+ m_graph.block(i)->ensureLocals(newNumLocals);
+ }
// Helper for min and max.
- bool handleMinMax(bool usesResult, int resultOperand, NodeType op, int registerOffset, int argumentCountIncludingThis);
+ bool handleMinMax(int resultOperand, NodeType op, int registerOffset, int argumentCountIncludingThis);
// Handle calls. This resolves issues surrounding inlining and intrinsics.
- void handleCall(Interpreter*, Instruction* currentInstruction, NodeType op, CodeSpecializationKind);
- void emitFunctionCheck(JSFunction* expectedFunction, NodeIndex callTarget, int registerOffset, CodeSpecializationKind);
+ void handleCall(int result, NodeType op, CodeSpecializationKind, unsigned instructionSize, int callee, int argCount, int registerOffset);
+ void handleCall(Instruction* pc, NodeType op, CodeSpecializationKind);
+ void emitFunctionChecks(const CallLinkStatus&, Node* callTarget, int registerOffset, CodeSpecializationKind);
+ void emitArgumentPhantoms(int registerOffset, int argumentCountIncludingThis, CodeSpecializationKind);
// Handle inlining. Return true if it succeeded, false if we need to plant a call.
- bool handleInlining(bool usesResult, int callTarget, NodeIndex callTargetNodeIndex, int resultOperand, bool certainAboutExpectedFunction, JSFunction*, int registerOffset, int argumentCountIncludingThis, unsigned nextOffset, CodeSpecializationKind);
- // Handle setting the result of an intrinsic.
- void setIntrinsicResult(bool usesResult, int resultOperand, NodeIndex);
+ bool handleInlining(Node* callTargetNode, int resultOperand, const CallLinkStatus&, int registerOffset, int argumentCountIncludingThis, unsigned nextOffset, CodeSpecializationKind);
// Handle intrinsic functions. Return true if it succeeded, false if we need to plant a call.
- bool handleIntrinsic(bool usesResult, int resultOperand, Intrinsic, int registerOffset, int argumentCountIncludingThis, PredictedType prediction);
+ bool handleIntrinsic(int resultOperand, Intrinsic, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction);
+ bool handleTypedArrayConstructor(int resultOperand, InternalFunction*, int registerOffset, int argumentCountIncludingThis, TypedArrayType);
+ bool handleConstantInternalFunction(int resultOperand, InternalFunction*, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction, CodeSpecializationKind);
+ Node* handlePutByOffset(Node* base, unsigned identifier, PropertyOffset, Node* value);
+ Node* handleGetByOffset(SpeculatedType, Node* base, unsigned identifierNumber, PropertyOffset);
+ void handleGetByOffset(
+ int destinationOperand, SpeculatedType, Node* base, unsigned identifierNumber,
+ PropertyOffset);
+ void handleGetById(
+ int destinationOperand, SpeculatedType, Node* base, unsigned identifierNumber,
+ const GetByIdStatus&);
+ void emitPutById(
+ Node* base, unsigned identifierNumber, Node* value, const PutByIdStatus&, bool isDirect);
+ void handlePutById(
+ Node* base, unsigned identifierNumber, Node* value, const PutByIdStatus&,
+ bool isDirect);
+ Node* emitPrototypeChecks(Structure*, IntendedStructureChain*);
+
+ Node* getScope(bool skipTop, unsigned skipCount);
+
// Prepare to parse a block.
void prepareToParseBlock();
// Parse a single basic block of bytecode instructions.
bool parseBlock(unsigned limit);
- // Find reachable code and setup predecessor links in the graph's BasicBlocks.
- void determineReachability();
- // Enqueue a block onto the worklist, if necessary.
- void handleSuccessor(Vector<BlockIndex, 16>& worklist, BlockIndex, BlockIndex successor);
// Link block successors.
- void linkBlock(BasicBlock*, Vector<BlockIndex>& possibleTargets);
- void linkBlocks(Vector<UnlinkedBlock>& unlinkedBlocks, Vector<BlockIndex>& possibleTargets);
- // Link GetLocal & SetLocal nodes, to ensure live values are generated.
- enum PhiStackType {
- LocalPhiStack,
- ArgumentPhiStack
- };
- template<PhiStackType stackType>
- void processPhiStack();
-
- void fixVariableAccessPredictions();
- // Add spill locations to nodes.
- void allocateVirtualRegisters();
+ void linkBlock(BasicBlock*, Vector<BasicBlock*>& possibleTargets);
+ void linkBlocks(Vector<UnlinkedBlock>& unlinkedBlocks, Vector<BasicBlock*>& possibleTargets);
- VariableAccessData* newVariableAccessData(int operand)
+ VariableAccessData* newVariableAccessData(VirtualRegister operand, bool isCaptured)
{
- ASSERT(operand < FirstConstantRegisterIndex);
+ ASSERT(!operand.isConstant());
- m_graph.m_variableAccessData.append(VariableAccessData(static_cast<VirtualRegister>(operand)));
+ m_graph.m_variableAccessData.append(VariableAccessData(operand, isCaptured));
return &m_graph.m_variableAccessData.last();
}
// Get/Set the operands/result of a bytecode instruction.
- NodeIndex getDirect(int operand)
+ Node* getDirect(VirtualRegister operand)
{
// Is this a constant?
- if (operand >= FirstConstantRegisterIndex) {
- unsigned constant = operand - FirstConstantRegisterIndex;
+ if (operand.isConstant()) {
+ unsigned constant = operand.toConstantIndex();
ASSERT(constant < m_constants.size());
return getJSConstant(constant);
}
// Is this an argument?
- if (operandIsArgument(operand))
+ if (operand.isArgument())
return getArgument(operand);
// Must be a local.
- return getLocal((unsigned)operand);
+ return getLocal(operand);
}
- NodeIndex get(int operand)
+
+ Node* get(VirtualRegister operand)
{
+ if (inlineCallFrame()) {
+ if (!inlineCallFrame()->isClosureCall) {
+ JSFunction* callee = inlineCallFrame()->calleeConstant();
+ if (operand.offset() == JSStack::Callee)
+ return cellConstant(callee);
+ if (operand.offset() == JSStack::ScopeChain)
+ return cellConstant(callee->scope());
+ }
+ } else if (operand.offset() == JSStack::Callee)
+ return addToGraph(GetCallee);
+ else if (operand.offset() == JSStack::ScopeChain)
+ return addToGraph(GetMyScope);
+
return getDirect(m_inlineStackTop->remapOperand(operand));
}
- void setDirect(int operand, NodeIndex value)
+
+ enum SetMode {
+ // A normal set which follows a two-phase commit that spans code origins. During
+ // the current code origin it issues a MovHint, and at the start of the next
+ // code origin there will be a SetLocal. If the local needs flushing, the second
+ // SetLocal will be preceded with a Flush.
+ NormalSet,
+
+ // A set where the SetLocal happens immediately and there is still a Flush. This
+ // is relevant when assigning to a local in tricky situations for the delayed
+ // SetLocal logic but where we know that we have not performed any side effects
+ // within this code origin. This is a safe replacement for NormalSet anytime we
+ // know that we have not yet performed side effects in this code origin.
+ ImmediateSetWithFlush,
+
+ // A set where the SetLocal happens immediately and we do not Flush it even if
+ // this is a local that is marked as needing it. This is relevant when
+ // initializing locals at the top of a function.
+ ImmediateNakedSet
+ };
+ Node* setDirect(VirtualRegister operand, Node* value, SetMode setMode = NormalSet)
{
- // Is this an argument?
- if (operandIsArgument(operand)) {
- setArgument(operand, value);
- return;
+ addToGraph(MovHint, OpInfo(operand.offset()), value);
+
+ DelayedSetLocal delayed = DelayedSetLocal(operand, value);
+
+ if (setMode == NormalSet) {
+ m_setLocalQueue.append(delayed);
+ return 0;
}
-
- // Must be a local.
- setLocal((unsigned)operand, value);
+
+ return delayed.execute(this, setMode);
}
- void set(int operand, NodeIndex value)
+
+ Node* set(VirtualRegister operand, Node* value, SetMode setMode = NormalSet)
{
- setDirect(m_inlineStackTop->remapOperand(operand), value);
+ return setDirect(m_inlineStackTop->remapOperand(operand), value, setMode);
}
- NodeIndex injectLazyOperandPrediction(NodeIndex nodeIndex)
+ Node* injectLazyOperandSpeculation(Node* node)
{
- Node& node = m_graph[nodeIndex];
- ASSERT(node.op() == GetLocal);
- ASSERT(node.codeOrigin.bytecodeIndex == m_currentIndex);
- PredictedType prediction =
- m_inlineStackTop->m_lazyOperands.prediction(
- LazyOperandValueProfileKey(m_currentIndex, node.local()));
-#if DFG_ENABLE(DEBUG_VERBOSE)
- dataLog("Lazy operand [@%u, bc#%u, r%d] prediction: %s\n",
- nodeIndex, m_currentIndex, node.local(), predictionToString(prediction));
-#endif
- node.variableAccessData()->predict(prediction);
- return nodeIndex;
+ ASSERT(node->op() == GetLocal);
+ ASSERT(node->origin.semantic.bytecodeIndex == m_currentIndex);
+ ConcurrentJITLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
+ LazyOperandValueProfileKey key(m_currentIndex, node->local());
+ SpeculatedType prediction = m_inlineStackTop->m_lazyOperands.prediction(locker, key);
+ node->variableAccessData()->predict(prediction);
+ return node;
}
// Used in implementing get/set, above, where the operand is a local variable.
- NodeIndex getLocal(unsigned operand)
- {
- NodeIndex nodeIndex = m_currentBlock->variablesAtTail.local(operand);
-
- if (nodeIndex != NoNode) {
- Node* nodePtr = &m_graph[nodeIndex];
- if (nodePtr->op() == Flush) {
- // Two possibilities: either the block wants the local to be live
- // but has not loaded its value, or it has loaded its value, in
- // which case we're done.
- nodeIndex = nodePtr->child1().index();
- Node& flushChild = m_graph[nodeIndex];
- if (flushChild.op() == Phi) {
- VariableAccessData* variableAccessData = flushChild.variableAccessData();
- nodeIndex = injectLazyOperandPrediction(addToGraph(GetLocal, OpInfo(variableAccessData), nodeIndex));
- m_currentBlock->variablesAtTail.local(operand) = nodeIndex;
- return nodeIndex;
+ Node* getLocal(VirtualRegister operand)
+ {
+ unsigned local = operand.toLocal();
+
+ if (local < m_localWatchpoints.size()) {
+ if (VariableWatchpointSet* set = m_localWatchpoints[local]) {
+ if (JSValue value = set->inferredValue()) {
+ addToGraph(FunctionReentryWatchpoint, OpInfo(m_codeBlock->symbolTable()));
+ addToGraph(VariableWatchpoint, OpInfo(set));
+ // Note: this is very special from an OSR exit standpoint. We wouldn't be
+ // able to do this for most locals, but it works here because we're dealing
+ // with a flushed local. For most locals we would need to issue a GetLocal
+ // here and ensure that we have uses in DFG IR wherever there would have
+ // been uses in bytecode. Clearly this optimization does not do this. But
+ // that's fine, because we don't need to track liveness for captured
+ // locals, and this optimization only kicks in for captured locals.
+ return inferredConstant(value);
}
- nodePtr = &flushChild;
- }
-
- ASSERT(&m_graph[nodeIndex] == nodePtr);
- ASSERT(nodePtr->op() != Flush);
-
- if (m_graph.localIsCaptured(operand)) {
- // We wish to use the same variable access data as the previous access,
- // but for all other purposes we want to issue a load since for all we
- // know, at this stage of compilation, the local has been clobbered.
-
- // Make sure we link to the Phi node, not to the GetLocal.
- if (nodePtr->op() == GetLocal)
- nodeIndex = nodePtr->child1().index();
-
- return injectLazyOperandPrediction(addToGraph(GetLocal, OpInfo(nodePtr->variableAccessData()), nodeIndex));
}
-
- if (nodePtr->op() == GetLocal)
- return nodeIndex;
- ASSERT(nodePtr->op() == SetLocal);
- return nodePtr->child1().index();
}
- // Check for reads of temporaries from prior blocks,
- // expand m_preservedVars to cover these.
- m_preservedVars.set(operand);
+ Node* node = m_currentBlock->variablesAtTail.local(local);
+ bool isCaptured = m_codeBlock->isCaptured(operand, inlineCallFrame());
- VariableAccessData* variableAccessData = newVariableAccessData(operand);
+ // This has two goals: 1) link together variable access datas, and 2)
+ // try to avoid creating redundant GetLocals. (1) is required for
+ // correctness - no other phase will ensure that block-local variable
+ // access data unification is done correctly. (2) is purely opportunistic
+ // and is meant as an compile-time optimization only.
- NodeIndex phi = addToGraph(Phi, OpInfo(variableAccessData));
- m_localPhiStack.append(PhiStackEntry(m_currentBlock, phi, operand));
- nodeIndex = injectLazyOperandPrediction(addToGraph(GetLocal, OpInfo(variableAccessData), phi));
- m_currentBlock->variablesAtTail.local(operand) = nodeIndex;
+ VariableAccessData* variable;
- m_currentBlock->variablesAtHead.setLocalFirstTime(operand, nodeIndex);
+ if (node) {
+ variable = node->variableAccessData();
+ variable->mergeIsCaptured(isCaptured);
+
+ if (!isCaptured) {
+ switch (node->op()) {
+ case GetLocal:
+ return node;
+ case SetLocal:
+ return node->child1().node();
+ default:
+ break;
+ }
+ }
+ } else
+ variable = newVariableAccessData(operand, isCaptured);
- return nodeIndex;
+ node = injectLazyOperandSpeculation(addToGraph(GetLocal, OpInfo(variable)));
+ m_currentBlock->variablesAtTail.local(local) = node;
+ return node;
}
- void setLocal(unsigned operand, NodeIndex value)
+
+ Node* setLocal(VirtualRegister operand, Node* value, SetMode setMode = NormalSet)
{
- VariableAccessData* variableAccessData = newVariableAccessData(operand);
- NodeIndex nodeIndex = addToGraph(SetLocal, OpInfo(variableAccessData), value);
- m_currentBlock->variablesAtTail.local(operand) = nodeIndex;
+ unsigned local = operand.toLocal();
+ bool isCaptured = m_codeBlock->isCaptured(operand, inlineCallFrame());
- bool shouldFlush = m_graph.localIsCaptured(operand);
-
- if (!shouldFlush) {
- // If this is in argument position, then it should be flushed.
- for (InlineStackEntry* stack = m_inlineStackTop; ; stack = stack->m_caller) {
- InlineCallFrame* inlineCallFrame = stack->m_inlineCallFrame;
- if (!inlineCallFrame)
- break;
- if (static_cast<int>(operand) >= inlineCallFrame->stackOffset - RegisterFile::CallFrameHeaderSize)
- continue;
- if (static_cast<int>(operand) == inlineCallFrame->stackOffset + CallFrame::thisArgumentOffset())
- continue;
- if (operand < inlineCallFrame->stackOffset - RegisterFile::CallFrameHeaderSize - inlineCallFrame->arguments.size())
- continue;
- int argument = operandToArgument(operand - inlineCallFrame->stackOffset);
- stack->m_argumentPositions[argument]->addVariable(variableAccessData);
- shouldFlush = true;
- break;
- }
+ if (setMode != ImmediateNakedSet) {
+ ArgumentPosition* argumentPosition = findArgumentPositionForLocal(operand);
+ if (isCaptured || argumentPosition)
+ flushDirect(operand, argumentPosition);
}
-
- if (shouldFlush)
- addToGraph(Flush, OpInfo(variableAccessData), nodeIndex);
+
+ VariableAccessData* variableAccessData = newVariableAccessData(operand, isCaptured);
+ variableAccessData->mergeStructureCheckHoistingFailed(
+ m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache)
+ || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCacheWatchpoint));
+ variableAccessData->mergeCheckArrayHoistingFailed(
+ m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadIndexingType));
+ Node* node = addToGraph(SetLocal, OpInfo(variableAccessData), value);
+ m_currentBlock->variablesAtTail.local(local) = node;
+ return node;
}
// Used in implementing get/set, above, where the operand is an argument.
- NodeIndex getArgument(unsigned operand)
+ Node* getArgument(VirtualRegister operand)
{
- unsigned argument = operandToArgument(operand);
+ unsigned argument = operand.toArgument();
ASSERT(argument < m_numArguments);
- NodeIndex nodeIndex = m_currentBlock->variablesAtTail.argument(argument);
-
- if (nodeIndex != NoNode) {
- Node* nodePtr = &m_graph[nodeIndex];
- if (nodePtr->op() == Flush) {
- // Two possibilities: either the block wants the local to be live
- // but has not loaded its value, or it has loaded its value, in
- // which case we're done.
- nodeIndex = nodePtr->child1().index();
- Node& flushChild = m_graph[nodeIndex];
- if (flushChild.op() == Phi) {
- VariableAccessData* variableAccessData = flushChild.variableAccessData();
- nodeIndex = injectLazyOperandPrediction(addToGraph(GetLocal, OpInfo(variableAccessData), nodeIndex));
- m_currentBlock->variablesAtTail.local(operand) = nodeIndex;
- return nodeIndex;
- }
- nodePtr = &flushChild;
- }
-
- ASSERT(&m_graph[nodeIndex] == nodePtr);
- ASSERT(nodePtr->op() != Flush);
-
- if (nodePtr->op() == SetArgument) {
- // We're getting an argument in the first basic block; link
- // the GetLocal to the SetArgument.
- ASSERT(nodePtr->local() == static_cast<VirtualRegister>(operand));
- nodeIndex = injectLazyOperandPrediction(addToGraph(GetLocal, OpInfo(nodePtr->variableAccessData()), nodeIndex));
- m_currentBlock->variablesAtTail.argument(argument) = nodeIndex;
- return nodeIndex;
- }
-
- if (m_graph.argumentIsCaptured(argument)) {
- if (nodePtr->op() == GetLocal)
- nodeIndex = nodePtr->child1().index();
- return injectLazyOperandPrediction(addToGraph(GetLocal, OpInfo(nodePtr->variableAccessData()), nodeIndex));
- }
-
- if (nodePtr->op() == GetLocal)
- return nodeIndex;
-
- ASSERT(nodePtr->op() == SetLocal);
- return nodePtr->child1().index();
- }
-
- VariableAccessData* variableAccessData = newVariableAccessData(operand);
+ Node* node = m_currentBlock->variablesAtTail.argument(argument);
+ bool isCaptured = m_codeBlock->isCaptured(operand);
- NodeIndex phi = addToGraph(Phi, OpInfo(variableAccessData));
- m_argumentPhiStack.append(PhiStackEntry(m_currentBlock, phi, argument));
- nodeIndex = injectLazyOperandPrediction(addToGraph(GetLocal, OpInfo(variableAccessData), phi));
- m_currentBlock->variablesAtTail.argument(argument) = nodeIndex;
+ VariableAccessData* variable;
- m_currentBlock->variablesAtHead.setArgumentFirstTime(argument, nodeIndex);
+ if (node) {
+ variable = node->variableAccessData();
+ variable->mergeIsCaptured(isCaptured);
+
+ switch (node->op()) {
+ case GetLocal:
+ return node;
+ case SetLocal:
+ return node->child1().node();
+ default:
+ break;
+ }
+ } else
+ variable = newVariableAccessData(operand, isCaptured);
- return nodeIndex;
+ node = injectLazyOperandSpeculation(addToGraph(GetLocal, OpInfo(variable)));
+ m_currentBlock->variablesAtTail.argument(argument) = node;
+ return node;
}
- void setArgument(int operand, NodeIndex value)
+ Node* setArgument(VirtualRegister operand, Node* value, SetMode setMode = NormalSet)
{
- unsigned argument = operandToArgument(operand);
+ unsigned argument = operand.toArgument();
ASSERT(argument < m_numArguments);
- VariableAccessData* variableAccessData = newVariableAccessData(operand);
+ bool isCaptured = m_codeBlock->isCaptured(operand);
+
+ VariableAccessData* variableAccessData = newVariableAccessData(operand, isCaptured);
+
+ // Always flush arguments, except for 'this'. If 'this' is created by us,
+ // then make sure that it's never unboxed.
+ if (argument) {
+ if (setMode != ImmediateNakedSet)
+ flushDirect(operand);
+ } else if (m_codeBlock->specializationKind() == CodeForConstruct)
+ variableAccessData->mergeShouldNeverUnbox(true);
+
+ variableAccessData->mergeStructureCheckHoistingFailed(
+ m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache)
+ || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCacheWatchpoint));
+ variableAccessData->mergeCheckArrayHoistingFailed(
+ m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadIndexingType));
+ Node* node = addToGraph(SetLocal, OpInfo(variableAccessData), value);
+ m_currentBlock->variablesAtTail.argument(argument) = node;
+ return node;
+ }
+
+ ArgumentPosition* findArgumentPositionForArgument(int argument)
+ {
InlineStackEntry* stack = m_inlineStackTop;
- while (stack->m_inlineCallFrame) // find the machine stack entry.
+ while (stack->m_inlineCallFrame)
stack = stack->m_caller;
- stack->m_argumentPositions[argument]->addVariable(variableAccessData);
- NodeIndex nodeIndex = addToGraph(SetLocal, OpInfo(variableAccessData), value);
- m_currentBlock->variablesAtTail.argument(argument) = nodeIndex;
- // Always flush arguments.
- addToGraph(Flush, OpInfo(variableAccessData), nodeIndex);
+ return stack->m_argumentPositions[argument];
+ }
+
+ ArgumentPosition* findArgumentPositionForLocal(VirtualRegister operand)
+ {
+ for (InlineStackEntry* stack = m_inlineStackTop; ; stack = stack->m_caller) {
+ InlineCallFrame* inlineCallFrame = stack->m_inlineCallFrame;
+ if (!inlineCallFrame)
+ break;
+ if (operand.offset() < static_cast<int>(inlineCallFrame->stackOffset + JSStack::CallFrameHeaderSize))
+ continue;
+ if (operand.offset() == inlineCallFrame->stackOffset + CallFrame::thisArgumentOffset())
+ continue;
+ if (operand.offset() >= static_cast<int>(inlineCallFrame->stackOffset + CallFrame::thisArgumentOffset() + inlineCallFrame->arguments.size()))
+ continue;
+ int argument = VirtualRegister(operand.offset() - inlineCallFrame->stackOffset).toArgument();
+ return stack->m_argumentPositions[argument];
+ }
+ return 0;
+ }
+
+ ArgumentPosition* findArgumentPosition(VirtualRegister operand)
+ {
+ if (operand.isArgument())
+ return findArgumentPositionForArgument(operand.toArgument());
+ return findArgumentPositionForLocal(operand);
+ }
+
+ void addConstant(JSValue value)
+ {
+ unsigned constantIndex = m_codeBlock->addConstantLazily();
+ initializeLazyWriteBarrierForConstant(
+ m_graph.m_plan.writeBarriers,
+ m_codeBlock->constants()[constantIndex],
+ m_codeBlock,
+ constantIndex,
+ m_codeBlock->ownerExecutable(),
+ value);
+ }
+
+ void flush(VirtualRegister operand)
+ {
+ flushDirect(m_inlineStackTop->remapOperand(operand));
+ }
+
+ void flushDirect(VirtualRegister operand)
+ {
+ flushDirect(operand, findArgumentPosition(operand));
}
- VariableAccessData* flushArgument(int operand)
+ void flushDirect(VirtualRegister operand, ArgumentPosition* argumentPosition)
{
- // FIXME: This should check if the same operand had already been flushed to
- // some other local variable.
+ bool isCaptured = m_codeBlock->isCaptured(operand, inlineCallFrame());
- operand = m_inlineStackTop->remapOperand(operand);
+ ASSERT(!operand.isConstant());
- ASSERT(operand < FirstConstantRegisterIndex);
+ Node* node = m_currentBlock->variablesAtTail.operand(operand);
- NodeIndex nodeIndex;
- int index;
- if (operandIsArgument(operand)) {
- index = operandToArgument(operand);
- nodeIndex = m_currentBlock->variablesAtTail.argument(index);
- } else {
- index = operand;
- nodeIndex = m_currentBlock->variablesAtTail.local(index);
- m_preservedVars.set(operand);
- }
+ VariableAccessData* variable;
- if (nodeIndex != NoNode) {
- Node& node = m_graph[nodeIndex];
- switch (node.op()) {
- case Flush:
- nodeIndex = node.child1().index();
- break;
- case GetLocal:
- nodeIndex = node.child1().index();
- break;
- default:
- break;
+ if (node) {
+ variable = node->variableAccessData();
+ variable->mergeIsCaptured(isCaptured);
+ } else
+ variable = newVariableAccessData(operand, isCaptured);
+
+ node = addToGraph(Flush, OpInfo(variable));
+ m_currentBlock->variablesAtTail.operand(operand) = node;
+ if (argumentPosition)
+ argumentPosition->addVariable(variable);
+ }
+
+ void flush(InlineStackEntry* inlineStackEntry)
+ {
+ int numArguments;
+ if (InlineCallFrame* inlineCallFrame = inlineStackEntry->m_inlineCallFrame) {
+ numArguments = inlineCallFrame->arguments.size();
+ if (inlineCallFrame->isClosureCall) {
+ flushDirect(inlineStackEntry->remapOperand(VirtualRegister(JSStack::Callee)));
+ flushDirect(inlineStackEntry->remapOperand(VirtualRegister(JSStack::ScopeChain)));
}
-
- ASSERT(m_graph[nodeIndex].op() != Flush
- && m_graph[nodeIndex].op() != GetLocal);
-
- // Emit a Flush regardless of whether we already flushed it.
- // This gives us guidance to see that the variable also needs to be flushed
- // for arguments, even if it already had to be flushed for other reasons.
- VariableAccessData* variableAccessData = node.variableAccessData();
- addToGraph(Flush, OpInfo(variableAccessData), nodeIndex);
- return variableAccessData;
- }
-
- VariableAccessData* variableAccessData = newVariableAccessData(operand);
- NodeIndex phi = addToGraph(Phi, OpInfo(variableAccessData));
- nodeIndex = addToGraph(Flush, OpInfo(variableAccessData), phi);
- if (operandIsArgument(operand)) {
- m_argumentPhiStack.append(PhiStackEntry(m_currentBlock, phi, index));
- m_currentBlock->variablesAtTail.argument(index) = nodeIndex;
- m_currentBlock->variablesAtHead.setArgumentFirstTime(index, nodeIndex);
- } else {
- m_localPhiStack.append(PhiStackEntry(m_currentBlock, phi, index));
- m_currentBlock->variablesAtTail.local(index) = nodeIndex;
- m_currentBlock->variablesAtHead.setLocalFirstTime(index, nodeIndex);
+ } else
+ numArguments = inlineStackEntry->m_codeBlock->numParameters();
+ for (unsigned argument = numArguments; argument-- > 1;)
+ flushDirect(inlineStackEntry->remapOperand(virtualRegisterForArgument(argument)));
+ for (int local = 0; local < inlineStackEntry->m_codeBlock->m_numVars; ++local) {
+ if (!inlineStackEntry->m_codeBlock->isCaptured(virtualRegisterForLocal(local)))
+ continue;
+ flushDirect(inlineStackEntry->remapOperand(virtualRegisterForLocal(local)));
}
- return variableAccessData;
}
- // Get an operand, and perform a ToInt32/ToNumber conversion on it.
- NodeIndex getToInt32(int operand)
+ void flushForTerminal()
{
- return toInt32(get(operand));
+ for (InlineStackEntry* inlineStackEntry = m_inlineStackTop; inlineStackEntry; inlineStackEntry = inlineStackEntry->m_caller)
+ flush(inlineStackEntry);
}
- // Perform an ES5 ToInt32 operation - returns a node of type NodeResultInt32.
- NodeIndex toInt32(NodeIndex index)
+ void flushForReturn()
{
- Node& node = m_graph[index];
-
- if (node.hasInt32Result())
- return index;
-
- if (node.op() == UInt32ToNumber)
- return node.child1().index();
-
- // Check for numeric constants boxed as JSValues.
- if (node.op() == JSConstant) {
- JSValue v = valueOfJSConstant(index);
- if (v.isInt32())
- return getJSConstant(node.constantNumber());
- if (v.isNumber())
- return getJSConstantForValue(JSValue(JSC::toInt32(v.asNumber())));
+ flush(m_inlineStackTop);
+ }
+
+ void flushIfTerminal(SwitchData& data)
+ {
+ if (data.fallThrough.bytecodeIndex() > m_currentIndex)
+ return;
+
+ for (unsigned i = data.cases.size(); i--;) {
+ if (data.cases[i].target.bytecodeIndex() > m_currentIndex)
+ return;
}
-
- return addToGraph(ValueToInt32, index);
+
+ flushForTerminal();
}
- NodeIndex getJSConstantForValue(JSValue constantValue)
+ // NOTE: Only use this to construct constants that arise from non-speculative
+ // constant folding. I.e. creating constants using this if we had constant
+ // field inference would be a bad idea, since the bytecode parser's folding
+ // doesn't handle liveness preservation.
+ Node* getJSConstantForValue(JSValue constantValue)
{
- unsigned constantIndex = m_codeBlock->addOrFindConstant(constantValue);
- if (constantIndex >= m_constants.size())
+ unsigned constantIndex;
+ if (!m_codeBlock->findConstant(constantValue, constantIndex)) {
+ addConstant(constantValue);
m_constants.append(ConstantRecord());
+ }
ASSERT(m_constants.size() == m_codeBlock->numberOfConstantRegisters());
return getJSConstant(constantIndex);
}
- NodeIndex getJSConstant(unsigned constant)
+ Node* getJSConstant(unsigned constant)
{
- NodeIndex index = m_constants[constant].asJSValue;
- if (index != NoNode)
- return index;
+ Node* node = m_constants[constant].asJSValue;
+ if (node)
+ return node;
- NodeIndex resultIndex = addToGraph(JSConstant, OpInfo(constant));
- m_constants[constant].asJSValue = resultIndex;
- return resultIndex;
+ Node* result = addToGraph(JSConstant, OpInfo(constant));
+ m_constants[constant].asJSValue = result;
+ return result;
}
// Helper functions to get/set the this value.
- NodeIndex getThis()
+ Node* getThis()
{
return get(m_inlineStackTop->m_codeBlock->thisRegister());
}
- void setThis(NodeIndex value)
+
+ void setThis(Node* value)
{
set(m_inlineStackTop->m_codeBlock->thisRegister(), value);
}
// Convenience methods for checking nodes for constants.
- bool isJSConstant(NodeIndex index)
+ bool isJSConstant(Node* node)
{
- return m_graph[index].op() == JSConstant;
+ return node->op() == JSConstant;
}
- bool isInt32Constant(NodeIndex nodeIndex)
+ bool isInt32Constant(Node* node)
{
- return isJSConstant(nodeIndex) && valueOfJSConstant(nodeIndex).isInt32();
+ return isJSConstant(node) && valueOfJSConstant(node).isInt32();
}
// Convenience methods for getting constant values.
- JSValue valueOfJSConstant(NodeIndex index)
+ JSValue valueOfJSConstant(Node* node)
{
- ASSERT(isJSConstant(index));
- return m_codeBlock->getConstant(FirstConstantRegisterIndex + m_graph[index].constantNumber());
+ ASSERT(isJSConstant(node));
+ return m_codeBlock->getConstant(FirstConstantRegisterIndex + node->constantNumber());
}
- int32_t valueOfInt32Constant(NodeIndex nodeIndex)
+ int32_t valueOfInt32Constant(Node* node)
{
- ASSERT(isInt32Constant(nodeIndex));
- return valueOfJSConstant(nodeIndex).asInt32();
+ ASSERT(isInt32Constant(node));
+ return valueOfJSConstant(node).asInt32();
}
// This method returns a JSConstant with the value 'undefined'.
- NodeIndex constantUndefined()
+ Node* constantUndefined()
{
// Has m_constantUndefined been set up yet?
if (m_constantUndefined == UINT_MAX) {
// Add undefined to the CodeBlock's constants, and add a corresponding slot in m_constants.
ASSERT(m_constants.size() == numberOfConstants);
- m_codeBlock->addConstant(jsUndefined());
+ addConstant(jsUndefined());
m_constants.append(ConstantRecord());
ASSERT(m_constants.size() == m_codeBlock->numberOfConstantRegisters());
}
}
// This method returns a JSConstant with the value 'null'.
- NodeIndex constantNull()
+ Node* constantNull()
{
// Has m_constantNull been set up yet?
if (m_constantNull == UINT_MAX) {
// Add null to the CodeBlock's constants, and add a corresponding slot in m_constants.
ASSERT(m_constants.size() == numberOfConstants);
- m_codeBlock->addConstant(jsNull());
+ addConstant(jsNull());
m_constants.append(ConstantRecord());
ASSERT(m_constants.size() == m_codeBlock->numberOfConstantRegisters());
}
}
// This method returns a DoubleConstant with the value 1.
- NodeIndex one()
+ Node* one()
{
// Has m_constant1 been set up yet?
if (m_constant1 == UINT_MAX) {
// Add the value 1 to the CodeBlock's constants, and add a corresponding slot in m_constants.
ASSERT(m_constants.size() == numberOfConstants);
- m_codeBlock->addConstant(jsNumber(1));
+ addConstant(jsNumber(1));
m_constants.append(ConstantRecord());
ASSERT(m_constants.size() == m_codeBlock->numberOfConstantRegisters());
}
}
// This method returns a DoubleConstant with the value NaN.
- NodeIndex constantNaN()
+ Node* constantNaN()
{
JSValue nan = jsNaN();
// Add the value nan to the CodeBlock's constants, and add a corresponding slot in m_constants.
ASSERT(m_constants.size() == numberOfConstants);
- m_codeBlock->addConstant(nan);
+ addConstant(nan);
m_constants.append(ConstantRecord());
ASSERT(m_constants.size() == m_codeBlock->numberOfConstantRegisters());
}
// m_constantNaN must refer to an entry in the CodeBlock's constant pool that has the value nan.
ASSERT(m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constantNaN).isDouble());
- ASSERT(isnan(m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constantNaN).asDouble()));
+ ASSERT(std::isnan(m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constantNaN).asDouble()));
return getJSConstant(m_constantNaN);
}
- NodeIndex cellConstant(JSCell* cell)
+ Node* cellConstant(JSCell* cell)
{
- HashMap<JSCell*, NodeIndex>::AddResult result = m_cellConstantNodes.add(cell, NoNode);
- if (result.isNewEntry)
- result.iterator->second = addToGraph(WeakJSConstant, OpInfo(cell));
+ HashMap<JSCell*, Node*>::AddResult result = m_cellConstantNodes.add(cell, nullptr);
+ if (result.isNewEntry) {
+ ASSERT(!Heap::isZombified(cell));
+ result.iterator->value = addToGraph(WeakJSConstant, OpInfo(cell));
+ }
- return result.iterator->second;
+ return result.iterator->value;
}
- CodeOrigin currentCodeOrigin()
+ Node* inferredConstant(JSValue value)
+ {
+ if (value.isCell())
+ return cellConstant(value.asCell());
+ return getJSConstantForValue(value);
+ }
+
+ InlineCallFrame* inlineCallFrame()
{
- return CodeOrigin(m_currentIndex, m_inlineStackTop->m_inlineCallFrame, m_currentProfilingIndex - m_currentIndex);
+ return m_inlineStackTop->m_inlineCallFrame;
}
- // These methods create a node and add it to the graph. If nodes of this type are
- // 'mustGenerate' then the node will implicitly be ref'ed to ensure generation.
- NodeIndex addToGraph(NodeType op, NodeIndex child1 = NoNode, NodeIndex child2 = NoNode, NodeIndex child3 = NoNode)
+ CodeOrigin currentCodeOrigin()
+ {
+ return CodeOrigin(m_currentIndex, inlineCallFrame());
+ }
+
+ BranchData* branchData(unsigned taken, unsigned notTaken)
+ {
+ // We assume that branches originating from bytecode always have a fall-through. We
+ // use this assumption to avoid checking for the creation of terminal blocks.
+ ASSERT((taken > m_currentIndex) || (notTaken > m_currentIndex));
+ BranchData* data = m_graph.m_branchData.add();
+ *data = BranchData::withBytecodeIndices(taken, notTaken);
+ return data;
+ }
+
+ Node* addToGraph(NodeType op, Node* child1 = 0, Node* child2 = 0, Node* child3 = 0)
{
- NodeIndex resultIndex = (NodeIndex)m_graph.size();
- m_graph.append(Node(op, currentCodeOrigin(), child1, child2, child3));
+ Node* result = m_graph.addNode(
+ SpecNone, op, NodeOrigin(currentCodeOrigin()), Edge(child1), Edge(child2),
+ Edge(child3));
ASSERT(op != Phi);
- m_currentBlock->append(resultIndex);
-
- if (defaultFlags(op) & NodeMustGenerate)
- m_graph.ref(resultIndex);
- return resultIndex;
+ m_currentBlock->append(result);
+ return result;
}
- NodeIndex addToGraph(NodeType op, OpInfo info, NodeIndex child1 = NoNode, NodeIndex child2 = NoNode, NodeIndex child3 = NoNode)
+ Node* addToGraph(NodeType op, Edge child1, Edge child2 = Edge(), Edge child3 = Edge())
{
- NodeIndex resultIndex = (NodeIndex)m_graph.size();
- m_graph.append(Node(op, currentCodeOrigin(), info, child1, child2, child3));
- if (op == Phi)
- m_currentBlock->phis.append(resultIndex);
- else
- m_currentBlock->append(resultIndex);
-
- if (defaultFlags(op) & NodeMustGenerate)
- m_graph.ref(resultIndex);
- return resultIndex;
+ Node* result = m_graph.addNode(
+ SpecNone, op, NodeOrigin(currentCodeOrigin()), child1, child2, child3);
+ ASSERT(op != Phi);
+ m_currentBlock->append(result);
+ return result;
}
- NodeIndex addToGraph(NodeType op, OpInfo info1, OpInfo info2, NodeIndex child1 = NoNode, NodeIndex child2 = NoNode, NodeIndex child3 = NoNode)
+ Node* addToGraph(NodeType op, OpInfo info, Node* child1 = 0, Node* child2 = 0, Node* child3 = 0)
{
- NodeIndex resultIndex = (NodeIndex)m_graph.size();
- m_graph.append(Node(op, currentCodeOrigin(), info1, info2, child1, child2, child3));
+ Node* result = m_graph.addNode(
+ SpecNone, op, NodeOrigin(currentCodeOrigin()), info, Edge(child1), Edge(child2),
+ Edge(child3));
ASSERT(op != Phi);
- m_currentBlock->append(resultIndex);
-
- if (defaultFlags(op) & NodeMustGenerate)
- m_graph.ref(resultIndex);
- return resultIndex;
+ m_currentBlock->append(result);
+ return result;
+ }
+ Node* addToGraph(NodeType op, OpInfo info1, OpInfo info2, Node* child1 = 0, Node* child2 = 0, Node* child3 = 0)
+ {
+ Node* result = m_graph.addNode(
+ SpecNone, op, NodeOrigin(currentCodeOrigin()), info1, info2,
+ Edge(child1), Edge(child2), Edge(child3));
+ ASSERT(op != Phi);
+ m_currentBlock->append(result);
+ return result;
}
- NodeIndex addToGraph(Node::VarArgTag, NodeType op, OpInfo info1, OpInfo info2)
+ Node* addToGraph(Node::VarArgTag, NodeType op, OpInfo info1, OpInfo info2)
{
- NodeIndex resultIndex = (NodeIndex)m_graph.size();
- m_graph.append(Node(Node::VarArg, op, currentCodeOrigin(), info1, info2, m_graph.m_varArgChildren.size() - m_numPassedVarArgs, m_numPassedVarArgs));
+ Node* result = m_graph.addNode(
+ SpecNone, Node::VarArg, op, NodeOrigin(currentCodeOrigin()), info1, info2,
+ m_graph.m_varArgChildren.size() - m_numPassedVarArgs, m_numPassedVarArgs);
ASSERT(op != Phi);
- m_currentBlock->append(resultIndex);
+ m_currentBlock->append(result);
m_numPassedVarArgs = 0;
- if (defaultFlags(op) & NodeMustGenerate)
- m_graph.ref(resultIndex);
- return resultIndex;
- }
-
- NodeIndex insertPhiNode(OpInfo info, BasicBlock* block)
- {
- NodeIndex resultIndex = (NodeIndex)m_graph.size();
- m_graph.append(Node(Phi, currentCodeOrigin(), info));
- block->phis.append(resultIndex);
-
- return resultIndex;
+ return result;
}
- void addVarArgChild(NodeIndex child)
+ void addVarArgChild(Node* child)
{
m_graph.m_varArgChildren.append(Edge(child));
m_numPassedVarArgs++;
}
- NodeIndex addCall(Interpreter* interpreter, Instruction* currentInstruction, NodeType op)
+ Node* addCall(int result, NodeType op, int callee, int argCount, int registerOffset)
{
- Instruction* putInstruction = currentInstruction + OPCODE_LENGTH(op_call);
-
- PredictedType prediction = PredictNone;
- if (interpreter->getOpcodeID(putInstruction->u.opcode) == op_call_put_result) {
- m_currentProfilingIndex = m_currentIndex + OPCODE_LENGTH(op_call);
- prediction = getPrediction();
- }
+ SpeculatedType prediction = getPrediction();
- addVarArgChild(get(currentInstruction[1].u.operand));
- int argCount = currentInstruction[2].u.operand;
- if (RegisterFile::CallFrameHeaderSize + (unsigned)argCount > m_parameterSlots)
- m_parameterSlots = RegisterFile::CallFrameHeaderSize + argCount;
+ addVarArgChild(get(VirtualRegister(callee)));
+ size_t parameterSlots = JSStack::CallFrameHeaderSize - JSStack::CallerFrameAndPCSize + argCount;
+ if (parameterSlots > m_parameterSlots)
+ m_parameterSlots = parameterSlots;
- int registerOffset = currentInstruction[3].u.operand;
int dummyThisArgument = op == Call ? 0 : 1;
for (int i = 0 + dummyThisArgument; i < argCount; ++i)
- addVarArgChild(get(registerOffset + argumentToOperand(i)));
+ addVarArgChild(get(virtualRegisterForArgument(i, registerOffset)));
- NodeIndex call = addToGraph(Node::VarArg, op, OpInfo(0), OpInfo(prediction));
- if (interpreter->getOpcodeID(putInstruction->u.opcode) == op_call_put_result)
- set(putInstruction[1].u.operand, call);
+ Node* call = addToGraph(Node::VarArg, op, OpInfo(0), OpInfo(prediction));
+ set(VirtualRegister(result), call);
return call;
}
- PredictedType getPredictionWithoutOSRExit(NodeIndex nodeIndex, unsigned bytecodeIndex)
+ Node* cellConstantWithStructureCheck(JSCell* object, Structure* structure)
{
- UNUSED_PARAM(nodeIndex);
-
- PredictedType prediction = m_inlineStackTop->m_profiledBlock->valueProfilePredictionForBytecodeOffset(bytecodeIndex);
-#if DFG_ENABLE(DEBUG_VERBOSE)
- dataLog("Dynamic [@%u, bc#%u] prediction: %s\n", nodeIndex, bytecodeIndex, predictionToString(prediction));
-#endif
-
- return prediction;
+ Node* objectNode = cellConstant(object);
+ addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(structure)), objectNode);
+ return objectNode;
+ }
+
+ Node* cellConstantWithStructureCheck(JSCell* object)
+ {
+ return cellConstantWithStructureCheck(object, object->structure());
+ }
+
+ SpeculatedType getPredictionWithoutOSRExit(unsigned bytecodeIndex)
+ {
+ ConcurrentJITLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
+ return m_inlineStackTop->m_profiledBlock->valueProfilePredictionForBytecodeOffset(locker, bytecodeIndex);
}
- PredictedType getPrediction(NodeIndex nodeIndex, unsigned bytecodeIndex)
+ SpeculatedType getPrediction(unsigned bytecodeIndex)
{
- PredictedType prediction = getPredictionWithoutOSRExit(nodeIndex, bytecodeIndex);
+ SpeculatedType prediction = getPredictionWithoutOSRExit(bytecodeIndex);
- if (prediction == PredictNone) {
+ if (prediction == SpecNone) {
// We have no information about what values this node generates. Give up
// on executing this code, since we're likely to do more damage than good.
addToGraph(ForceOSRExit);
return prediction;
}
- PredictedType getPredictionWithoutOSRExit()
+ SpeculatedType getPredictionWithoutOSRExit()
{
- return getPredictionWithoutOSRExit(m_graph.size(), m_currentProfilingIndex);
+ return getPredictionWithoutOSRExit(m_currentIndex);
}
- PredictedType getPrediction()
+ SpeculatedType getPrediction()
{
- return getPrediction(m_graph.size(), m_currentProfilingIndex);
+ return getPrediction(m_currentIndex);
}
-
- NodeIndex makeSafe(NodeIndex nodeIndex)
+
+ ArrayMode getArrayMode(ArrayProfile* profile, Array::Action action)
+ {
+ ConcurrentJITLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
+ profile->computeUpdatedPrediction(locker, m_inlineStackTop->m_profiledBlock);
+ return ArrayMode::fromObserved(locker, profile, action, false);
+ }
+
+ ArrayMode getArrayMode(ArrayProfile* profile)
+ {
+ return getArrayMode(profile, Array::Read);
+ }
+
+ ArrayMode getArrayModeConsideringSlowPath(ArrayProfile* profile, Array::Action action)
{
- Node& node = m_graph[nodeIndex];
+ ConcurrentJITLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
- bool likelyToTakeSlowCase;
- if (!isX86() && node.op() == ArithMod)
- likelyToTakeSlowCase = false;
- else
- likelyToTakeSlowCase = m_inlineStackTop->m_profiledBlock->likelyToTakeSlowCase(m_currentIndex);
+ profile->computeUpdatedPrediction(locker, m_inlineStackTop->m_profiledBlock);
+
+ bool makeSafe =
+ m_inlineStackTop->m_profiledBlock->likelyToTakeSlowCase(m_currentIndex)
+ || profile->outOfBounds(locker);
+
+ ArrayMode result = ArrayMode::fromObserved(locker, profile, action, makeSafe);
- if (!likelyToTakeSlowCase
- && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow)
- && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero))
- return nodeIndex;
+ return result;
+ }
+
+ Node* makeSafe(Node* node)
+ {
+ if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow))
+ node->mergeFlags(NodeMayOverflowInDFG);
+ if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero))
+ node->mergeFlags(NodeMayNegZeroInDFG);
- switch (m_graph[nodeIndex].op()) {
+ if (!isX86() && node->op() == ArithMod)
+ return node;
+
+ if (!m_inlineStackTop->m_profiledBlock->likelyToTakeSlowCase(m_currentIndex))
+ return node;
+
+ switch (node->op()) {
case UInt32ToNumber:
case ArithAdd:
case ArithSub:
- case ArithNegate:
case ValueAdd:
case ArithMod: // for ArithMod "MayOverflow" means we tried to divide by zero, or we saw double.
- m_graph[nodeIndex].mergeFlags(NodeMayOverflow);
+ node->mergeFlags(NodeMayOverflowInBaseline);
break;
+ case ArithNegate:
+ // Currently we can't tell the difference between a negation overflowing
+ // (i.e. -(1 << 31)) or generating negative zero (i.e. -0). If it took slow
+ // path then we assume that it did both of those things.
+ node->mergeFlags(NodeMayOverflowInBaseline);
+ node->mergeFlags(NodeMayNegZeroInBaseline);
+ break;
+
case ArithMul:
+ // FIXME: We should detect cases where we only overflowed but never created
+ // negative zero.
+ // https://bugs.webkit.org/show_bug.cgi?id=132470
if (m_inlineStackTop->m_profiledBlock->likelyToTakeDeepestSlowCase(m_currentIndex)
- || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow)) {
-#if DFG_ENABLE(DEBUG_VERBOSE)
- dataLog("Making ArithMul @%u take deepest slow case.\n", nodeIndex);
-#endif
- m_graph[nodeIndex].mergeFlags(NodeMayOverflow | NodeMayNegZero);
- } else if (m_inlineStackTop->m_profiledBlock->likelyToTakeSlowCase(m_currentIndex)
- || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero)) {
-#if DFG_ENABLE(DEBUG_VERBOSE)
- dataLog("Making ArithMul @%u take faster slow case.\n", nodeIndex);
-#endif
- m_graph[nodeIndex].mergeFlags(NodeMayNegZero);
- }
+ || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow))
+ node->mergeFlags(NodeMayOverflowInBaseline | NodeMayNegZeroInBaseline);
+ else if (m_inlineStackTop->m_profiledBlock->likelyToTakeSlowCase(m_currentIndex)
+ || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero))
+ node->mergeFlags(NodeMayNegZeroInBaseline);
break;
default:
- ASSERT_NOT_REACHED();
+ RELEASE_ASSERT_NOT_REACHED();
break;
}
- return nodeIndex;
+ return node;
}
- NodeIndex makeDivSafe(NodeIndex nodeIndex)
+ Node* makeDivSafe(Node* node)
{
- ASSERT(m_graph[nodeIndex].op() == ArithDiv);
+ ASSERT(node->op() == ArithDiv);
+
+ if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow))
+ node->mergeFlags(NodeMayOverflowInDFG);
+ if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero))
+ node->mergeFlags(NodeMayNegZeroInDFG);
// The main slow case counter for op_div in the old JIT counts only when
// the operands are not numbers. We don't care about that since we already
// care about when the outcome of the division is not an integer, which
// is what the special fast case counter tells us.
- if (!m_inlineStackTop->m_profiledBlock->likelyToTakeSpecialFastCase(m_currentIndex)
- && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow)
- && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero))
- return nodeIndex;
-
-#if DFG_ENABLE(DEBUG_VERBOSE)
- dataLog("Making %s @%u safe at bc#%u because special fast-case counter is at %u and exit profiles say %d, %d\n", Graph::opName(m_graph[nodeIndex].op()), nodeIndex, m_currentIndex, m_inlineStackTop->m_profiledBlock->specialFastCaseProfileForBytecodeOffset(m_currentIndex)->m_counter, m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow), m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero));
-#endif
+ if (!m_inlineStackTop->m_profiledBlock->couldTakeSpecialFastCase(m_currentIndex))
+ return node;
- // FIXME: It might be possible to make this more granular. The DFG certainly can
- // distinguish between negative zero and overflow in its exit profiles.
- m_graph[nodeIndex].mergeFlags(NodeMayOverflow | NodeMayNegZero);
+ // FIXME: It might be possible to make this more granular.
+ node->mergeFlags(NodeMayOverflowInBaseline | NodeMayNegZeroInBaseline);
- return nodeIndex;
- }
-
- bool willNeedFlush(StructureStubInfo& stubInfo)
- {
- PolymorphicAccessStructureList* list;
- int listSize;
- switch (stubInfo.accessType) {
- case access_get_by_id_self_list:
- list = stubInfo.u.getByIdSelfList.structureList;
- listSize = stubInfo.u.getByIdSelfList.listSize;
- break;
- case access_get_by_id_proto_list:
- list = stubInfo.u.getByIdProtoList.structureList;
- listSize = stubInfo.u.getByIdProtoList.listSize;
- break;
- default:
- return false;
- }
- for (int i = 0; i < listSize; ++i) {
- if (!list->list[i].isDirect)
- return true;
- }
- return false;
+ return node;
}
bool structureChainIsStillValid(bool direct, Structure* previousStructure, StructureChain* chain)
void buildOperandMapsIfNecessary();
- JSGlobalData* m_globalData;
+ VM* m_vm;
CodeBlock* m_codeBlock;
CodeBlock* m_profiledBlock;
Graph& m_graph;
BasicBlock* m_currentBlock;
// The bytecode index of the current instruction being generated.
unsigned m_currentIndex;
- // The bytecode index of the value profile of the current instruction being generated.
- unsigned m_currentProfilingIndex;
// We use these values during code generation, and to avoid the need for
// special handling we make sure they are available as constants in the
unsigned m_constantNaN;
unsigned m_constant1;
HashMap<JSCell*, unsigned> m_cellConstants;
- HashMap<JSCell*, NodeIndex> m_cellConstantNodes;
+ HashMap<JSCell*, Node*> m_cellConstantNodes;
// A constant in the constant pool may be represented by more than one
// node in the graph, depending on the context in which it is being used.
struct ConstantRecord {
ConstantRecord()
- : asInt32(NoNode)
- , asNumeric(NoNode)
- , asJSValue(NoNode)
+ : asInt32(0)
+ , asNumeric(0)
+ , asJSValue(0)
{
}
- NodeIndex asInt32;
- NodeIndex asNumeric;
- NodeIndex asJSValue;
+ Node* asInt32;
+ Node* asNumeric;
+ Node* asJSValue;
};
// Track the index of the node whose result is the current value for every
unsigned m_numArguments;
// The number of locals (vars + temporaries) used in the function.
unsigned m_numLocals;
- // The set of registers we need to preserve across BasicBlock boundaries;
- // typically equal to the set of vars, but we expand this to cover all
- // temporaries that persist across blocks (dues to ?:, &&, ||, etc).
- BitVector m_preservedVars;
// The number of slots (in units of sizeof(Register)) that we need to
- // preallocate for calls emanating from this frame. This includes the
- // size of the CallFrame, only if this is not a leaf function. (I.e.
- // this is 0 if and only if this function is a leaf.)
+ // preallocate for arguments to outgoing calls from this frame. This
+ // number includes the CallFrame slots that we initialize for the callee
+ // (but not the callee-initialized CallerFrame and ReturnPC slots).
+ // This number is 0 if and only if this function is a leaf.
unsigned m_parameterSlots;
// The number of var args passed to the next var arg node.
unsigned m_numPassedVarArgs;
- // The index in the global resolve info.
- unsigned m_globalResolveNumber;
-
- struct PhiStackEntry {
- PhiStackEntry(BasicBlock* block, NodeIndex phi, unsigned varNo)
- : m_block(block)
- , m_phi(phi)
- , m_varNo(varNo)
- {
- }
- BasicBlock* m_block;
- NodeIndex m_phi;
- unsigned m_varNo;
- };
- Vector<PhiStackEntry, 16> m_argumentPhiStack;
- Vector<PhiStackEntry, 16> m_localPhiStack;
+ HashMap<ConstantBufferKey, unsigned> m_constantBufferCache;
+
+ Vector<VariableWatchpointSet*, 16> m_localWatchpoints;
struct InlineStackEntry {
ByteCodeParser* m_byteCodeParser;
CodeBlock* m_codeBlock;
CodeBlock* m_profiledBlock;
InlineCallFrame* m_inlineCallFrame;
- VirtualRegister m_calleeVR; // absolute virtual register, not relative to call frame
ScriptExecutable* executable() { return m_codeBlock->ownerExecutable(); }
// direct, caller).
Vector<unsigned> m_identifierRemap;
Vector<unsigned> m_constantRemap;
+ Vector<unsigned> m_constantBufferRemap;
+ Vector<unsigned> m_switchRemap;
// Blocks introduced by this code block, which need successor linking.
// May include up to one basic block that includes the continuation after
// Potential block linking targets. Must be sorted by bytecodeBegin, and
// cannot have two blocks that have the same bytecodeBegin. For this very
// reason, this is not equivalent to
- Vector<BlockIndex> m_blockLinkingTargets;
+ Vector<BasicBlock*> m_blockLinkingTargets;
// If the callsite's basic block was split into two, then this will be
// the head of the callsite block. It needs its successors linked to the
// m_unlinkedBlocks, but not the other way around: there's no way for
// any blocks in m_unlinkedBlocks to jump back into this block.
- BlockIndex m_callsiteBlockHead;
+ BasicBlock* m_callsiteBlockHead;
// Does the callsite block head need linking? This is typically true
// but will be false for the machine code block's inline stack entry
VirtualRegister m_returnValue;
- // Predictions about variable types collected from the profiled code block,
+ // Speculations about variable types collected from the profiled code block,
// which are based on OSR exit profiles that past DFG compilatins of this
// code block had gathered.
LazyOperandValueProfileParser m_lazyOperands;
+ CallLinkInfoMap m_callLinkInfos;
+ StubInfoMap m_stubInfos;
+
// Did we see any returns? We need to handle the (uncommon but necessary)
// case where a procedure that does not return was inlined.
bool m_didReturn;
InlineStackEntry* m_caller;
- InlineStackEntry(ByteCodeParser*, CodeBlock*, CodeBlock* profiledBlock, BlockIndex callsiteBlockHead, VirtualRegister calleeVR, JSFunction* callee, VirtualRegister returnValueVR, VirtualRegister inlineCallFrameStart, CodeSpecializationKind);
+ InlineStackEntry(
+ ByteCodeParser*,
+ CodeBlock*,
+ CodeBlock* profiledBlock,
+ BasicBlock* callsiteBlockHead,
+ JSFunction* callee, // Null if this is a closure call.
+ VirtualRegister returnValueVR,
+ VirtualRegister inlineCallFrameStart,
+ int argumentCountIncludingThis,
+ CodeSpecializationKind);
~InlineStackEntry()
{
m_byteCodeParser->m_inlineStackTop = m_caller;
}
- int remapOperand(int operand) const
+ VirtualRegister remapOperand(VirtualRegister operand) const
{
if (!m_inlineCallFrame)
return operand;
- if (operand >= FirstConstantRegisterIndex) {
- int result = m_constantRemap[operand - FirstConstantRegisterIndex];
- ASSERT(result >= FirstConstantRegisterIndex);
+ if (operand.isConstant()) {
+ VirtualRegister result = VirtualRegister(m_constantRemap[operand.toConstantIndex()]);
+ ASSERT(result.isConstant());
return result;
}
-
- return operand + m_inlineCallFrame->stackOffset;
+
+ return VirtualRegister(operand.offset() + m_inlineCallFrame->stackOffset);
}
};
InlineStackEntry* m_inlineStackTop;
+
+ struct DelayedSetLocal {
+ VirtualRegister m_operand;
+ Node* m_value;
+
+ DelayedSetLocal() { }
+ DelayedSetLocal(VirtualRegister operand, Node* value)
+ : m_operand(operand)
+ , m_value(value)
+ {
+ }
+
+ Node* execute(ByteCodeParser* parser, SetMode setMode = NormalSet)
+ {
+ if (m_operand.isArgument())
+ return parser->setArgument(m_operand, m_value, setMode);
+ return parser->setLocal(m_operand, m_value, setMode);
+ }
+ };
+
+ Vector<DelayedSetLocal, 2> m_setLocalQueue;
// Have we built operand maps? We initialize them lazily, and only when doing
// inlining.
bool m_haveBuiltOperandMaps;
// Mapping between identifier names and numbers.
- IdentifierMap m_identifierMap;
+ BorrowedIdentifierMap m_identifierMap;
// Mapping between values and constant numbers.
JSValueMap m_jsValueMap;
// Index of the empty value, or UINT_MAX if there is no mapping. This is a horrible
// work-around for the fact that JSValueMap can't handle "empty" values.
unsigned m_emptyJSValueIndex;
- // Cache of code blocks that we've generated bytecode for.
- ByteCodeCache<canInlineFunctionFor> m_codeBlockCache;
+ CodeBlock* m_dfgCodeBlock;
+ CallLinkStatus::ContextMap m_callContextMap;
+ StubInfoMap m_dfgStubInfos;
+
+ Instruction* m_currentInstruction;
};
#define NEXT_OPCODE(name) \
m_currentIndex += OPCODE_LENGTH(name); \
return shouldContinueParsing
-
-void ByteCodeParser::handleCall(Interpreter* interpreter, Instruction* currentInstruction, NodeType op, CodeSpecializationKind kind)
+void ByteCodeParser::handleCall(Instruction* pc, NodeType op, CodeSpecializationKind kind)
{
ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_construct));
+ handleCall(
+ pc[1].u.operand, op, kind, OPCODE_LENGTH(op_call),
+ pc[2].u.operand, pc[3].u.operand, -pc[4].u.operand);
+}
+
+void ByteCodeParser::handleCall(
+ int result, NodeType op, CodeSpecializationKind kind, unsigned instructionSize,
+ int callee, int argumentCountIncludingThis, int registerOffset)
+{
+ ASSERT(registerOffset <= 0);
- NodeIndex callTarget = get(currentInstruction[1].u.operand);
- enum { ConstantFunction, LinkedFunction, UnknownFunction } callType;
-
- CallLinkStatus callLinkStatus = CallLinkStatus::computeFor(
- m_inlineStackTop->m_profiledBlock, m_currentIndex);
-
-#if DFG_ENABLE(DEBUG_VERBOSE)
- dataLog("For call at @%lu bc#%u: ", m_graph.size(), m_currentIndex);
- if (callLinkStatus.isSet()) {
- if (callLinkStatus.couldTakeSlowPath())
- dataLog("could take slow path, ");
- dataLog("target = %p\n", callLinkStatus.callTarget());
- } else
- dataLog("not set.\n");
-#endif
+ Node* callTarget = get(VirtualRegister(callee));
- if (m_graph.isFunctionConstant(callTarget))
- callType = ConstantFunction;
- else if (callLinkStatus.isSet() && !callLinkStatus.couldTakeSlowPath()
- && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache))
- callType = LinkedFunction;
- else
- callType = UnknownFunction;
- if (callType != UnknownFunction) {
- int argumentCountIncludingThis = currentInstruction[2].u.operand;
- int registerOffset = currentInstruction[3].u.operand;
-
- // Do we have a result?
- bool usesResult = false;
- int resultOperand = 0; // make compiler happy
- unsigned nextOffset = m_currentIndex + OPCODE_LENGTH(op_call);
- Instruction* putInstruction = currentInstruction + OPCODE_LENGTH(op_call);
- PredictedType prediction = PredictNone;
- if (interpreter->getOpcodeID(putInstruction->u.opcode) == op_call_put_result) {
- resultOperand = putInstruction[1].u.operand;
- usesResult = true;
- m_currentProfilingIndex = nextOffset;
- prediction = getPrediction();
- nextOffset += OPCODE_LENGTH(op_call_put_result);
- }
- JSFunction* expectedFunction;
- Intrinsic intrinsic;
- bool certainAboutExpectedFunction;
- if (callType == ConstantFunction) {
- expectedFunction = m_graph.valueOfFunctionConstant(callTarget);
- intrinsic = expectedFunction->executable()->intrinsicFor(kind);
- certainAboutExpectedFunction = true;
- } else {
- ASSERT(callType == LinkedFunction);
- expectedFunction = callLinkStatus.callTarget();
- intrinsic = expectedFunction->executable()->intrinsicFor(kind);
- certainAboutExpectedFunction = false;
- }
-
- if (intrinsic != NoIntrinsic) {
- if (!certainAboutExpectedFunction)
- emitFunctionCheck(expectedFunction, callTarget, registerOffset, kind);
-
- if (handleIntrinsic(usesResult, resultOperand, intrinsic, registerOffset, argumentCountIncludingThis, prediction)) {
- if (!certainAboutExpectedFunction) {
- // Need to keep the call target alive for OSR. We could easily optimize this out if we wanted
- // to, since at this point we know that the call target is a constant. It's just that OSR isn't
- // smart enough to figure that out, since it doesn't understand CheckFunction.
- addToGraph(Phantom, callTarget);
- }
-
- return;
- }
- } else if (handleInlining(usesResult, currentInstruction[1].u.operand, callTarget, resultOperand, certainAboutExpectedFunction, expectedFunction, registerOffset, argumentCountIncludingThis, nextOffset, kind))
+ CallLinkStatus callLinkStatus;
+
+ if (m_graph.isConstant(callTarget)) {
+ callLinkStatus = CallLinkStatus(
+ m_graph.valueOfJSConstant(callTarget)).setIsProved(true);
+ } else {
+ callLinkStatus = CallLinkStatus::computeFor(
+ m_inlineStackTop->m_profiledBlock, currentCodeOrigin(),
+ m_inlineStackTop->m_callLinkInfos, m_callContextMap);
+ }
+
+ if (!callLinkStatus.canOptimize()) {
+ // Oddly, this conflates calls that haven't executed with calls that behaved sufficiently polymorphically
+ // that we cannot optimize them.
+
+ addCall(result, op, callee, argumentCountIncludingThis, registerOffset);
+ return;
+ }
+
+ unsigned nextOffset = m_currentIndex + instructionSize;
+ SpeculatedType prediction = getPrediction();
+
+ if (InternalFunction* function = callLinkStatus.internalFunction()) {
+ if (handleConstantInternalFunction(result, function, registerOffset, argumentCountIncludingThis, prediction, kind)) {
+ // This phantoming has to be *after* the code for the intrinsic, to signify that
+ // the inputs must be kept alive whatever exits the intrinsic may do.
+ addToGraph(Phantom, callTarget);
+ emitArgumentPhantoms(registerOffset, argumentCountIncludingThis, kind);
return;
+ }
+
+ // Can only handle this using the generic call handler.
+ addCall(result, op, callee, argumentCountIncludingThis, registerOffset);
+ return;
}
+
+ Intrinsic intrinsic = callLinkStatus.intrinsicFor(kind);
+ if (intrinsic != NoIntrinsic) {
+ emitFunctionChecks(callLinkStatus, callTarget, registerOffset, kind);
- addCall(interpreter, currentInstruction, op);
+ if (handleIntrinsic(result, intrinsic, registerOffset, argumentCountIncludingThis, prediction)) {
+ // This phantoming has to be *after* the code for the intrinsic, to signify that
+ // the inputs must be kept alive whatever exits the intrinsic may do.
+ addToGraph(Phantom, callTarget);
+ emitArgumentPhantoms(registerOffset, argumentCountIncludingThis, kind);
+ if (m_graph.compilation())
+ m_graph.compilation()->noticeInlinedCall();
+ return;
+ }
+ } else if (handleInlining(callTarget, result, callLinkStatus, registerOffset, argumentCountIncludingThis, nextOffset, kind)) {
+ if (m_graph.compilation())
+ m_graph.compilation()->noticeInlinedCall();
+ return;
+ }
+
+ addCall(result, op, callee, argumentCountIncludingThis, registerOffset);
}
-void ByteCodeParser::emitFunctionCheck(JSFunction* expectedFunction, NodeIndex callTarget, int registerOffset, CodeSpecializationKind kind)
+void ByteCodeParser::emitFunctionChecks(const CallLinkStatus& callLinkStatus, Node* callTarget, int registerOffset, CodeSpecializationKind kind)
{
- NodeIndex thisArgument;
+ Node* thisArgument;
if (kind == CodeForCall)
- thisArgument = get(registerOffset + argumentToOperand(0));
+ thisArgument = get(virtualRegisterForArgument(0, registerOffset));
else
- thisArgument = NoNode;
- addToGraph(CheckFunction, OpInfo(expectedFunction), callTarget, thisArgument);
+ thisArgument = 0;
+
+ if (callLinkStatus.isProved()) {
+ addToGraph(Phantom, callTarget, thisArgument);
+ return;
+ }
+
+ ASSERT(callLinkStatus.canOptimize());
+
+ if (JSFunction* function = callLinkStatus.function())
+ addToGraph(CheckFunction, OpInfo(function), callTarget, thisArgument);
+ else {
+ ASSERT(callLinkStatus.structure());
+ ASSERT(callLinkStatus.executable());
+
+ addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(callLinkStatus.structure())), callTarget);
+ addToGraph(CheckExecutable, OpInfo(callLinkStatus.executable()), callTarget, thisArgument);
+ }
+}
+
+void ByteCodeParser::emitArgumentPhantoms(int registerOffset, int argumentCountIncludingThis, CodeSpecializationKind kind)
+{
+ for (int i = kind == CodeForCall ? 0 : 1; i < argumentCountIncludingThis; ++i)
+ addToGraph(Phantom, get(virtualRegisterForArgument(i, registerOffset)));
}
-bool ByteCodeParser::handleInlining(bool usesResult, int callTarget, NodeIndex callTargetNodeIndex, int resultOperand, bool certainAboutExpectedFunction, JSFunction* expectedFunction, int registerOffset, int argumentCountIncludingThis, unsigned nextOffset, CodeSpecializationKind kind)
+bool ByteCodeParser::handleInlining(Node* callTargetNode, int resultOperand, const CallLinkStatus& callLinkStatus, int registerOffset, int argumentCountIncludingThis, unsigned nextOffset, CodeSpecializationKind kind)
{
+ static const bool verbose = false;
+
+ if (verbose)
+ dataLog("Considering inlining ", callLinkStatus, " into ", currentCodeOrigin(), "\n");
+
// First, the really simple checks: do we have an actual JS function?
- if (!expectedFunction)
+ if (!callLinkStatus.executable()) {
+ if (verbose)
+ dataLog(" Failing because there is no executable.\n");
return false;
- if (expectedFunction->isHostFunction())
+ }
+ if (callLinkStatus.executable()->isHostFunction()) {
+ if (verbose)
+ dataLog(" Failing because it's a host function.\n");
+ return false;
+ }
+
+ FunctionExecutable* executable = jsCast<FunctionExecutable*>(callLinkStatus.executable());
+
+ // Does the number of arguments we're passing match the arity of the target? We currently
+ // inline only if the number of arguments passed is greater than or equal to the number
+ // arguments expected.
+ if (static_cast<int>(executable->parameterCount()) + 1 > argumentCountIncludingThis) {
+ if (verbose)
+ dataLog(" Failing because of arity mismatch.\n");
return false;
+ }
- FunctionExecutable* executable = expectedFunction->jsExecutable();
+ // Do we have a code block, and does the code block's size match the heuristics/requirements for
+ // being an inline candidate? We might not have a code block if code was thrown away or if we
+ // simply hadn't actually made this call yet. We could still theoretically attempt to inline it
+ // if we had a static proof of what was being called; this might happen for example if you call a
+ // global function, where watchpointing gives us static information. Overall, it's a rare case
+ // because we expect that any hot callees would have already been compiled.
+ CodeBlock* codeBlock = executable->baselineCodeBlockFor(kind);
+ if (!codeBlock) {
+ if (verbose)
+ dataLog(" Failing because no code block available.\n");
+ return false;
+ }
+ CapabilityLevel capabilityLevel = inlineFunctionForCapabilityLevel(
+ codeBlock, kind, callLinkStatus.isClosureCall());
+ if (!canInline(capabilityLevel)) {
+ if (verbose)
+ dataLog(" Failing because the function is not inlineable.\n");
+ return false;
+ }
- // Does the number of arguments we're passing match the arity of the target? We could
- // inline arity check failures, but for simplicity we currently don't.
- if (static_cast<int>(executable->parameterCount()) + 1 != argumentCountIncludingThis)
+ // Check if the caller is already too large. We do this check here because that's just
+ // where we happen to also have the callee's code block, and we want that for the
+ // purpose of unsetting SABI.
+ if (!isSmallEnoughToInlineCodeInto(m_codeBlock)) {
+ codeBlock->m_shouldAlwaysBeInlined = false;
+ if (verbose)
+ dataLog(" Failing because the caller is too large.\n");
return false;
+ }
+
+ // FIXME: this should be better at predicting how much bloat we will introduce by inlining
+ // this function.
+ // https://bugs.webkit.org/show_bug.cgi?id=127627
+
+ // Have we exceeded inline stack depth, or are we trying to inline a recursive call to
+ // too many levels? If either of these are detected, then don't inline. We adjust our
+ // heuristics if we are dealing with a function that cannot otherwise be compiled.
- // Have we exceeded inline stack depth, or are we trying to inline a recursive call?
- // If either of these are detected, then don't inline.
unsigned depth = 0;
+ unsigned recursion = 0;
+
for (InlineStackEntry* entry = m_inlineStackTop; entry; entry = entry->m_caller) {
++depth;
- if (depth >= Options::maximumInliningDepth)
- return false; // Depth exceeded.
+ if (depth >= Options::maximumInliningDepth()) {
+ if (verbose)
+ dataLog(" Failing because depth exceeded.\n");
+ return false;
+ }
- if (entry->executable() == executable)
- return false; // Recursion detected.
+ if (entry->executable() == executable) {
+ ++recursion;
+ if (recursion >= Options::maximumInliningRecursion()) {
+ if (verbose)
+ dataLog(" Failing because recursion detected.\n");
+ return false;
+ }
+ }
}
- // Does the code block's size match the heuristics/requirements for being
- // an inline candidate?
- CodeBlock* profiledBlock = executable->profiledCodeBlockFor(kind);
- if (!mightInlineFunctionFor(profiledBlock, kind))
- return false;
-
- // If we get here then it looks like we should definitely inline this code. Proceed
- // with parsing the code to get bytecode, so that we can then parse the bytecode.
- // Note that if LLInt is enabled, the bytecode will always be available. Also note
- // that if LLInt is enabled, we may inline a code block that has never been JITted
- // before!
- CodeBlock* codeBlock = m_codeBlockCache.get(CodeBlockKey(executable, kind), expectedFunction->scope());
- if (!codeBlock)
- return false;
-
- ASSERT(canInlineFunctionFor(codeBlock, kind));
-
-#if DFG_ENABLE(DEBUG_VERBOSE)
- dataLog("Inlining executable %p.\n", executable);
-#endif
+ if (verbose)
+ dataLog(" Committing to inlining.\n");
// Now we know without a doubt that we are committed to inlining. So begin the process
// by checking the callee (if necessary) and making sure that arguments and the callee
// are flushed.
- if (!certainAboutExpectedFunction)
- emitFunctionCheck(expectedFunction, callTargetNodeIndex, registerOffset, kind);
+ emitFunctionChecks(callLinkStatus, callTargetNode, registerOffset, kind);
// FIXME: Don't flush constants!
- Vector<VariableAccessData*, 8> arguments;
- for (int i = 1; i < argumentCountIncludingThis; ++i)
- arguments.append(flushArgument(registerOffset + argumentToOperand(i)));
-
- int inlineCallFrameStart = m_inlineStackTop->remapOperand(registerOffset) - RegisterFile::CallFrameHeaderSize;
+ int inlineCallFrameStart = m_inlineStackTop->remapOperand(VirtualRegister(registerOffset)).offset() + JSStack::CallFrameHeaderSize;
- // Make sure that the area used by the call frame is reserved.
- for (int arg = inlineCallFrameStart + RegisterFile::CallFrameHeaderSize + codeBlock->m_numVars; arg-- > inlineCallFrameStart;)
- m_preservedVars.set(arg);
+ ensureLocals(
+ VirtualRegister(inlineCallFrameStart).toLocal() + 1 +
+ JSStack::CallFrameHeaderSize + codeBlock->m_numCalleeRegisters);
- // Make sure that we have enough locals.
- unsigned newNumLocals = inlineCallFrameStart + RegisterFile::CallFrameHeaderSize + codeBlock->m_numCalleeRegisters;
- if (newNumLocals > m_numLocals) {
- m_numLocals = newNumLocals;
- for (size_t i = 0; i < m_graph.m_blocks.size(); ++i)
- m_graph.m_blocks[i]->ensureLocals(newNumLocals);
- }
+ size_t argumentPositionStart = m_graph.m_argumentPositions.size();
- InlineStackEntry inlineStackEntry(this, codeBlock, profiledBlock, m_graph.m_blocks.size() - 1, (VirtualRegister)m_inlineStackTop->remapOperand(callTarget), expectedFunction, (VirtualRegister)m_inlineStackTop->remapOperand(usesResult ? resultOperand : InvalidVirtualRegister), (VirtualRegister)inlineCallFrameStart, kind);
-
- // Link up the argument variable access datas to their argument positions.
- for (int i = 1; i < argumentCountIncludingThis; ++i)
- inlineStackEntry.m_argumentPositions[i]->addVariable(arguments[i - 1]);
+ InlineStackEntry inlineStackEntry(
+ this, codeBlock, codeBlock, m_graph.lastBlock(), callLinkStatus.function(),
+ m_inlineStackTop->remapOperand(VirtualRegister(resultOperand)),
+ (VirtualRegister)inlineCallFrameStart, argumentCountIncludingThis, kind);
// This is where the actual inlining really happens.
unsigned oldIndex = m_currentIndex;
- unsigned oldProfilingIndex = m_currentProfilingIndex;
m_currentIndex = 0;
- m_currentProfilingIndex = 0;
- addToGraph(InlineStart);
+ InlineVariableData inlineVariableData;
+ inlineVariableData.inlineCallFrame = m_inlineStackTop->m_inlineCallFrame;
+ inlineVariableData.argumentPositionStart = argumentPositionStart;
+ inlineVariableData.calleeVariable = 0;
+
+ RELEASE_ASSERT(
+ m_inlineStackTop->m_inlineCallFrame->isClosureCall
+ == callLinkStatus.isClosureCall());
+ if (callLinkStatus.isClosureCall()) {
+ VariableAccessData* calleeVariable =
+ set(VirtualRegister(JSStack::Callee), callTargetNode, ImmediateNakedSet)->variableAccessData();
+ VariableAccessData* scopeVariable =
+ set(VirtualRegister(JSStack::ScopeChain), addToGraph(GetScope, callTargetNode), ImmediateNakedSet)->variableAccessData();
+
+ calleeVariable->mergeShouldNeverUnbox(true);
+ scopeVariable->mergeShouldNeverUnbox(true);
+
+ inlineVariableData.calleeVariable = calleeVariable;
+ }
+
+ m_graph.m_inlineVariableData.append(inlineVariableData);
parseCodeBlock();
m_currentIndex = oldIndex;
- m_currentProfilingIndex = oldProfilingIndex;
// If the inlined code created some new basic blocks, then we have linking to do.
- if (inlineStackEntry.m_callsiteBlockHead != m_graph.m_blocks.size() - 1) {
+ if (inlineStackEntry.m_callsiteBlockHead != m_graph.lastBlock()) {
ASSERT(!inlineStackEntry.m_unlinkedBlocks.isEmpty());
if (inlineStackEntry.m_callsiteBlockHeadNeedsLinking)
- linkBlock(m_graph.m_blocks[inlineStackEntry.m_callsiteBlockHead].get(), inlineStackEntry.m_blockLinkingTargets);
+ linkBlock(inlineStackEntry.m_callsiteBlockHead, inlineStackEntry.m_blockLinkingTargets);
else
- ASSERT(m_graph.m_blocks[inlineStackEntry.m_callsiteBlockHead]->isLinked);
+ ASSERT(inlineStackEntry.m_callsiteBlockHead->isLinked);
// It's possible that the callsite block head is not owned by the caller.
if (!inlineStackEntry.m_caller->m_unlinkedBlocks.isEmpty()) {
// It's definitely owned by the caller, because the caller created new blocks.
// Assert that this all adds up.
- ASSERT(inlineStackEntry.m_caller->m_unlinkedBlocks.last().m_blockIndex == inlineStackEntry.m_callsiteBlockHead);
+ ASSERT(inlineStackEntry.m_caller->m_unlinkedBlocks.last().m_block == inlineStackEntry.m_callsiteBlockHead);
ASSERT(inlineStackEntry.m_caller->m_unlinkedBlocks.last().m_needsNormalLinking);
inlineStackEntry.m_caller->m_unlinkedBlocks.last().m_needsNormalLinking = false;
} else {
} else
ASSERT(inlineStackEntry.m_unlinkedBlocks.isEmpty());
+ BasicBlock* lastBlock = m_graph.lastBlock();
// If there was a return, but no early returns, then we're done. We allow parsing of
// the caller to continue in whatever basic block we're in right now.
if (!inlineStackEntry.m_didEarlyReturn && inlineStackEntry.m_didReturn) {
- BasicBlock* lastBlock = m_graph.m_blocks.last().get();
- ASSERT(lastBlock->isEmpty() || !m_graph.last().isTerminal());
+ ASSERT(lastBlock->isEmpty() || !lastBlock->last()->isTerminal());
// If we created new blocks then the last block needs linking, but in the
// caller. It doesn't need to be linked to, but it needs outgoing links.
if (!inlineStackEntry.m_unlinkedBlocks.isEmpty()) {
-#if DFG_ENABLE(DEBUG_VERBOSE)
- dataLog("Reascribing bytecode index of block %p from bc#%u to bc#%u (inline return case).\n", lastBlock, lastBlock->bytecodeBegin, m_currentIndex);
-#endif
// For debugging purposes, set the bytecodeBegin. Note that this doesn't matter
// for release builds because this block will never serve as a potential target
// in the linker's binary search.
lastBlock->bytecodeBegin = m_currentIndex;
- m_inlineStackTop->m_caller->m_unlinkedBlocks.append(UnlinkedBlock(m_graph.m_blocks.size() - 1));
+ m_inlineStackTop->m_caller->m_unlinkedBlocks.append(UnlinkedBlock(m_graph.lastBlock()));
}
- m_currentBlock = m_graph.m_blocks.last().get();
-
-#if DFG_ENABLE(DEBUG_VERBOSE)
- dataLog("Done inlining executable %p, continuing code generation at epilogue.\n", executable);
-#endif
+ m_currentBlock = m_graph.lastBlock();
return true;
}
// If we get to this point then all blocks must end in some sort of terminals.
- ASSERT(m_graph.last().isTerminal());
+ ASSERT(lastBlock->last()->isTerminal());
+
+ // Need to create a new basic block for the continuation at the caller.
+ RefPtr<BasicBlock> block = adoptRef(new BasicBlock(nextOffset, m_numArguments, m_numLocals, PNaN));
+
// Link the early returns to the basic block we're about to create.
for (size_t i = 0; i < inlineStackEntry.m_unlinkedBlocks.size(); ++i) {
if (!inlineStackEntry.m_unlinkedBlocks[i].m_needsEarlyReturnLinking)
continue;
- BasicBlock* block = m_graph.m_blocks[inlineStackEntry.m_unlinkedBlocks[i].m_blockIndex].get();
- ASSERT(!block->isLinked);
- Node& node = m_graph[block->last()];
- ASSERT(node.op() == Jump);
- ASSERT(node.takenBlockIndex() == NoBlock);
- node.setTakenBlockIndex(m_graph.m_blocks.size());
+ BasicBlock* blockToLink = inlineStackEntry.m_unlinkedBlocks[i].m_block;
+ ASSERT(!blockToLink->isLinked);
+ Node* node = blockToLink->last();
+ ASSERT(node->op() == Jump);
+ ASSERT(!node->targetBlock());
+ node->targetBlock() = block.get();
inlineStackEntry.m_unlinkedBlocks[i].m_needsEarlyReturnLinking = false;
#if !ASSERT_DISABLED
- block->isLinked = true;
+ blockToLink->isLinked = true;
#endif
}
- // Need to create a new basic block for the continuation at the caller.
- OwnPtr<BasicBlock> block = adoptPtr(new BasicBlock(nextOffset, m_numArguments, m_numLocals));
-#if DFG_ENABLE(DEBUG_VERBOSE)
- dataLog("Creating inline epilogue basic block %p, #%zu for %p bc#%u at inline depth %u.\n", block.get(), m_graph.m_blocks.size(), m_inlineStackTop->executable(), m_currentIndex, CodeOrigin::inlineDepthForCallFrame(m_inlineStackTop->m_inlineCallFrame));
-#endif
m_currentBlock = block.get();
- ASSERT(m_inlineStackTop->m_caller->m_blockLinkingTargets.isEmpty() || m_graph.m_blocks[m_inlineStackTop->m_caller->m_blockLinkingTargets.last()]->bytecodeBegin < nextOffset);
- m_inlineStackTop->m_caller->m_unlinkedBlocks.append(UnlinkedBlock(m_graph.m_blocks.size()));
- m_inlineStackTop->m_caller->m_blockLinkingTargets.append(m_graph.m_blocks.size());
- m_graph.m_blocks.append(block.release());
+ ASSERT(m_inlineStackTop->m_caller->m_blockLinkingTargets.isEmpty() || m_inlineStackTop->m_caller->m_blockLinkingTargets.last()->bytecodeBegin < nextOffset);
+ m_inlineStackTop->m_caller->m_unlinkedBlocks.append(UnlinkedBlock(block.get()));
+ m_inlineStackTop->m_caller->m_blockLinkingTargets.append(block.get());
+ m_graph.appendBlock(block);
prepareToParseBlock();
// At this point we return and continue to generate code for the caller, but
// in the new basic block.
-#if DFG_ENABLE(DEBUG_VERBOSE)
- dataLog("Done inlining executable %p, continuing code generation in new block.\n", executable);
-#endif
return true;
}
-void ByteCodeParser::setIntrinsicResult(bool usesResult, int resultOperand, NodeIndex nodeIndex)
-{
- if (!usesResult)
- return;
- set(resultOperand, nodeIndex);
-}
-
-bool ByteCodeParser::handleMinMax(bool usesResult, int resultOperand, NodeType op, int registerOffset, int argumentCountIncludingThis)
+bool ByteCodeParser::handleMinMax(int resultOperand, NodeType op, int registerOffset, int argumentCountIncludingThis)
{
if (argumentCountIncludingThis == 1) { // Math.min()
- setIntrinsicResult(usesResult, resultOperand, constantNaN());
+ set(VirtualRegister(resultOperand), constantNaN());
return true;
}
if (argumentCountIncludingThis == 2) { // Math.min(x)
- // FIXME: what we'd really like is a ValueToNumber, except we don't support that right now. Oh well.
- NodeIndex result = get(registerOffset + argumentToOperand(1));
- addToGraph(CheckNumber, result);
- setIntrinsicResult(usesResult, resultOperand, result);
+ Node* result = get(VirtualRegister(virtualRegisterForArgument(1, registerOffset)));
+ addToGraph(Phantom, Edge(result, NumberUse));
+ set(VirtualRegister(resultOperand), result);
return true;
}
if (argumentCountIncludingThis == 3) { // Math.min(x, y)
- setIntrinsicResult(usesResult, resultOperand, addToGraph(op, get(registerOffset + argumentToOperand(1)), get(registerOffset + argumentToOperand(2))));
+ set(VirtualRegister(resultOperand), addToGraph(op, get(virtualRegisterForArgument(1, registerOffset)), get(virtualRegisterForArgument(2, registerOffset))));
return true;
}
return false;
}
-// FIXME: We dead-code-eliminate unused Math intrinsics, but that's invalid because
-// they need to perform the ToNumber conversion, which can have side-effects.
-bool ByteCodeParser::handleIntrinsic(bool usesResult, int resultOperand, Intrinsic intrinsic, int registerOffset, int argumentCountIncludingThis, PredictedType prediction)
+bool ByteCodeParser::handleIntrinsic(int resultOperand, Intrinsic intrinsic, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction)
{
switch (intrinsic) {
case AbsIntrinsic: {
if (argumentCountIncludingThis == 1) { // Math.abs()
- setIntrinsicResult(usesResult, resultOperand, constantNaN());
+ set(VirtualRegister(resultOperand), constantNaN());
return true;
}
if (!MacroAssembler::supportsFloatingPointAbs())
return false;
- NodeIndex nodeIndex = addToGraph(ArithAbs, get(registerOffset + argumentToOperand(1)));
+ Node* node = addToGraph(ArithAbs, get(virtualRegisterForArgument(1, registerOffset)));
if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow))
- m_graph[nodeIndex].mergeFlags(NodeMayOverflow);
- setIntrinsicResult(usesResult, resultOperand, nodeIndex);
+ node->mergeFlags(NodeMayOverflowInDFG);
+ set(VirtualRegister(resultOperand), node);
return true;
}
case MinIntrinsic:
- return handleMinMax(usesResult, resultOperand, ArithMin, registerOffset, argumentCountIncludingThis);
+ return handleMinMax(resultOperand, ArithMin, registerOffset, argumentCountIncludingThis);
case MaxIntrinsic:
- return handleMinMax(usesResult, resultOperand, ArithMax, registerOffset, argumentCountIncludingThis);
+ return handleMinMax(resultOperand, ArithMax, registerOffset, argumentCountIncludingThis);
- case SqrtIntrinsic: {
- if (argumentCountIncludingThis == 1) { // Math.sqrt()
- setIntrinsicResult(usesResult, resultOperand, constantNaN());
+ case SqrtIntrinsic:
+ case CosIntrinsic:
+ case SinIntrinsic: {
+ if (argumentCountIncludingThis == 1) {
+ set(VirtualRegister(resultOperand), constantNaN());
return true;
}
- if (!MacroAssembler::supportsFloatingPointSqrt())
+ switch (intrinsic) {
+ case SqrtIntrinsic:
+ if (!MacroAssembler::supportsFloatingPointSqrt())
+ return false;
+
+ set(VirtualRegister(resultOperand), addToGraph(ArithSqrt, get(virtualRegisterForArgument(1, registerOffset))));
+ return true;
+
+ case CosIntrinsic:
+ set(VirtualRegister(resultOperand), addToGraph(ArithCos, get(virtualRegisterForArgument(1, registerOffset))));
+ return true;
+
+ case SinIntrinsic:
+ set(VirtualRegister(resultOperand), addToGraph(ArithSin, get(virtualRegisterForArgument(1, registerOffset))));
+ return true;
+
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
return false;
-
- setIntrinsicResult(usesResult, resultOperand, addToGraph(ArithSqrt, get(registerOffset + argumentToOperand(1))));
- return true;
+ }
}
case ArrayPushIntrinsic: {
if (argumentCountIncludingThis != 2)
return false;
- NodeIndex arrayPush = addToGraph(ArrayPush, OpInfo(0), OpInfo(prediction), get(registerOffset + argumentToOperand(0)), get(registerOffset + argumentToOperand(1)));
- if (usesResult)
- set(resultOperand, arrayPush);
-
- return true;
+ ArrayMode arrayMode = getArrayMode(m_currentInstruction[OPCODE_LENGTH(op_call) - 2].u.arrayProfile);
+ if (!arrayMode.isJSArray())
+ return false;
+ switch (arrayMode.type()) {
+ case Array::Undecided:
+ case Array::Int32:
+ case Array::Double:
+ case Array::Contiguous:
+ case Array::ArrayStorage: {
+ Node* arrayPush = addToGraph(ArrayPush, OpInfo(arrayMode.asWord()), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset)));
+ set(VirtualRegister(resultOperand), arrayPush);
+
+ return true;
+ }
+
+ default:
+ return false;
+ }
}
case ArrayPopIntrinsic: {
if (argumentCountIncludingThis != 1)
return false;
- NodeIndex arrayPop = addToGraph(ArrayPop, OpInfo(0), OpInfo(prediction), get(registerOffset + argumentToOperand(0)));
- if (usesResult)
- set(resultOperand, arrayPop);
- return true;
+ ArrayMode arrayMode = getArrayMode(m_currentInstruction[OPCODE_LENGTH(op_call) - 2].u.arrayProfile);
+ if (!arrayMode.isJSArray())
+ return false;
+ switch (arrayMode.type()) {
+ case Array::Int32:
+ case Array::Double:
+ case Array::Contiguous:
+ case Array::ArrayStorage: {
+ Node* arrayPop = addToGraph(ArrayPop, OpInfo(arrayMode.asWord()), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset)));
+ set(VirtualRegister(resultOperand), arrayPop);
+ return true;
+ }
+
+ default:
+ return false;
+ }
}
case CharCodeAtIntrinsic: {
if (argumentCountIncludingThis != 2)
return false;
- int thisOperand = registerOffset + argumentToOperand(0);
- if (!(m_graph[get(thisOperand)].prediction() & PredictString))
- return false;
-
- int indexOperand = registerOffset + argumentToOperand(1);
- NodeIndex storage = addToGraph(GetIndexedPropertyStorage, get(thisOperand), getToInt32(indexOperand));
- NodeIndex charCode = addToGraph(StringCharCodeAt, get(thisOperand), getToInt32(indexOperand), storage);
+ VirtualRegister thisOperand = virtualRegisterForArgument(0, registerOffset);
+ VirtualRegister indexOperand = virtualRegisterForArgument(1, registerOffset);
+ Node* charCode = addToGraph(StringCharCodeAt, OpInfo(ArrayMode(Array::String).asWord()), get(thisOperand), get(indexOperand));
- if (usesResult)
- set(resultOperand, charCode);
+ set(VirtualRegister(resultOperand), charCode);
return true;
}
if (argumentCountIncludingThis != 2)
return false;
- int thisOperand = registerOffset + argumentToOperand(0);
- if (!(m_graph[get(thisOperand)].prediction() & PredictString))
+ VirtualRegister thisOperand = virtualRegisterForArgument(0, registerOffset);
+ VirtualRegister indexOperand = virtualRegisterForArgument(1, registerOffset);
+ Node* charCode = addToGraph(StringCharAt, OpInfo(ArrayMode(Array::String).asWord()), get(thisOperand), get(indexOperand));
+
+ set(VirtualRegister(resultOperand), charCode);
+ return true;
+ }
+ case FromCharCodeIntrinsic: {
+ if (argumentCountIncludingThis != 2)
return false;
- int indexOperand = registerOffset + argumentToOperand(1);
- NodeIndex storage = addToGraph(GetIndexedPropertyStorage, get(thisOperand), getToInt32(indexOperand));
- NodeIndex charCode = addToGraph(StringCharAt, get(thisOperand), getToInt32(indexOperand), storage);
+ VirtualRegister indexOperand = virtualRegisterForArgument(1, registerOffset);
+ Node* charCode = addToGraph(StringFromCharCode, get(indexOperand));
+
+ set(VirtualRegister(resultOperand), charCode);
- if (usesResult)
- set(resultOperand, charCode);
return true;
}
if (argumentCountIncludingThis != 2)
return false;
- NodeIndex regExpExec = addToGraph(RegExpExec, OpInfo(0), OpInfo(prediction), get(registerOffset + argumentToOperand(0)), get(registerOffset + argumentToOperand(1)));
- if (usesResult)
- set(resultOperand, regExpExec);
+ Node* regExpExec = addToGraph(RegExpExec, OpInfo(0), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset)));
+ set(VirtualRegister(resultOperand), regExpExec);
return true;
}
if (argumentCountIncludingThis != 2)
return false;
- NodeIndex regExpExec = addToGraph(RegExpTest, OpInfo(0), OpInfo(prediction), get(registerOffset + argumentToOperand(0)), get(registerOffset + argumentToOperand(1)));
- if (usesResult)
- set(resultOperand, regExpExec);
+ Node* regExpExec = addToGraph(RegExpTest, OpInfo(0), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset)));
+ set(VirtualRegister(resultOperand), regExpExec);
return true;
}
+
+ case IMulIntrinsic: {
+ if (argumentCountIncludingThis != 3)
+ return false;
+ VirtualRegister leftOperand = virtualRegisterForArgument(1, registerOffset);
+ VirtualRegister rightOperand = virtualRegisterForArgument(2, registerOffset);
+ Node* left = get(leftOperand);
+ Node* right = get(rightOperand);
+ set(VirtualRegister(resultOperand), addToGraph(ArithIMul, left, right));
+ return true;
+ }
+
+ case FRoundIntrinsic: {
+ if (argumentCountIncludingThis != 2)
+ return false;
+ VirtualRegister operand = virtualRegisterForArgument(1, registerOffset);
+ set(VirtualRegister(resultOperand), addToGraph(ArithFRound, get(operand)));
+ return true;
+ }
+
+ case DFGTrueIntrinsic: {
+ set(VirtualRegister(resultOperand), getJSConstantForValue(jsBoolean(true)));
+ return true;
+ }
+
+ case OSRExitIntrinsic: {
+ addToGraph(ForceOSRExit);
+ set(VirtualRegister(resultOperand), constantUndefined());
+ return true;
+ }
+
+ case IsFinalTierIntrinsic: {
+ set(VirtualRegister(resultOperand),
+ getJSConstantForValue(jsBoolean(Options::useFTLJIT() ? isFTL(m_graph.m_plan.mode) : true)));
+ return true;
+ }
+
+ case SetInt32HeapPredictionIntrinsic: {
+ for (int i = 1; i < argumentCountIncludingThis; ++i) {
+ Node* node = get(virtualRegisterForArgument(i, registerOffset));
+ if (node->hasHeapPrediction())
+ node->setHeapPrediction(SpecInt32);
+ }
+ set(VirtualRegister(resultOperand), constantUndefined());
+ return true;
+ }
+
+ case FiatInt52Intrinsic: {
+ if (argumentCountIncludingThis != 2)
+ return false;
+ VirtualRegister operand = virtualRegisterForArgument(1, registerOffset);
+ if (enableInt52())
+ set(VirtualRegister(resultOperand), addToGraph(FiatInt52, get(operand)));
+ else
+ set(VirtualRegister(resultOperand), get(operand));
+ return true;
+ }
default:
return false;
}
}
+bool ByteCodeParser::handleTypedArrayConstructor(
+ int resultOperand, InternalFunction* function, int registerOffset,
+ int argumentCountIncludingThis, TypedArrayType type)
+{
+ if (!isTypedView(type))
+ return false;
+
+ if (function->classInfo() != constructorClassInfoForType(type))
+ return false;
+
+ if (function->globalObject() != m_inlineStackTop->m_codeBlock->globalObject())
+ return false;
+
+ // We only have an intrinsic for the case where you say:
+ //
+ // new FooArray(blah);
+ //
+ // Of course, 'blah' could be any of the following:
+ //
+ // - Integer, indicating that you want to allocate an array of that length.
+ // This is the thing we're hoping for, and what we can actually do meaningful
+ // optimizations for.
+ //
+ // - Array buffer, indicating that you want to create a view onto that _entire_
+ // buffer.
+ //
+ // - Non-buffer object, indicating that you want to create a copy of that
+ // object by pretending that it quacks like an array.
+ //
+ // - Anything else, indicating that you want to have an exception thrown at
+ // you.
+ //
+ // The intrinsic, NewTypedArray, will behave as if it could do any of these
+ // things up until we do Fixup. Thereafter, if child1 (i.e. 'blah') is
+ // predicted Int32, then we lock it in as a normal typed array allocation.
+ // Otherwise, NewTypedArray turns into a totally opaque function call that
+ // may clobber the world - by virtue of it accessing properties on what could
+ // be an object.
+ //
+ // Note that although the generic form of NewTypedArray sounds sort of awful,
+ // it is actually quite likely to be more efficient than a fully generic
+ // Construct. So, we might want to think about making NewTypedArray variadic,
+ // or else making Construct not super slow.
+
+ if (argumentCountIncludingThis != 2)
+ return false;
+
+ set(VirtualRegister(resultOperand),
+ addToGraph(NewTypedArray, OpInfo(type), get(virtualRegisterForArgument(1, registerOffset))));
+ return true;
+}
+
+bool ByteCodeParser::handleConstantInternalFunction(
+ int resultOperand, InternalFunction* function, int registerOffset,
+ int argumentCountIncludingThis, SpeculatedType prediction, CodeSpecializationKind kind)
+{
+ // If we ever find that we have a lot of internal functions that we specialize for,
+ // then we should probably have some sort of hashtable dispatch, or maybe even
+ // dispatch straight through the MethodTable of the InternalFunction. But for now,
+ // it seems that this case is hit infrequently enough, and the number of functions
+ // we know about is small enough, that having just a linear cascade of if statements
+ // is good enough.
+
+ UNUSED_PARAM(prediction); // Remove this once we do more things.
+
+ if (function->classInfo() == ArrayConstructor::info()) {
+ if (function->globalObject() != m_inlineStackTop->m_codeBlock->globalObject())
+ return false;
+
+ if (argumentCountIncludingThis == 2) {
+ set(VirtualRegister(resultOperand),
+ addToGraph(NewArrayWithSize, OpInfo(ArrayWithUndecided), get(virtualRegisterForArgument(1, registerOffset))));
+ return true;
+ }
+
+ for (int i = 1; i < argumentCountIncludingThis; ++i)
+ addVarArgChild(get(virtualRegisterForArgument(i, registerOffset)));
+ set(VirtualRegister(resultOperand),
+ addToGraph(Node::VarArg, NewArray, OpInfo(ArrayWithUndecided), OpInfo(0)));
+ return true;
+ }
+
+ if (function->classInfo() == StringConstructor::info()) {
+ Node* result;
+
+ if (argumentCountIncludingThis <= 1)
+ result = cellConstant(m_vm->smallStrings.emptyString());
+ else
+ result = addToGraph(ToString, get(virtualRegisterForArgument(1, registerOffset)));
+
+ if (kind == CodeForConstruct)
+ result = addToGraph(NewStringObject, OpInfo(function->globalObject()->stringObjectStructure()), result);
+
+ set(VirtualRegister(resultOperand), result);
+ return true;
+ }
+
+ for (unsigned typeIndex = 0; typeIndex < NUMBER_OF_TYPED_ARRAY_TYPES; ++typeIndex) {
+ bool result = handleTypedArrayConstructor(
+ resultOperand, function, registerOffset, argumentCountIncludingThis,
+ indexToTypedArrayType(typeIndex));
+ if (result)
+ return true;
+ }
+
+ return false;
+}
+
+Node* ByteCodeParser::handleGetByOffset(SpeculatedType prediction, Node* base, unsigned identifierNumber, PropertyOffset offset)
+{
+ Node* propertyStorage;
+ if (isInlineOffset(offset))
+ propertyStorage = base;
+ else
+ propertyStorage = addToGraph(GetButterfly, base);
+ Node* getByOffset = addToGraph(GetByOffset, OpInfo(m_graph.m_storageAccessData.size()), OpInfo(prediction), propertyStorage, base);
+
+ StorageAccessData storageAccessData;
+ storageAccessData.offset = offset;
+ storageAccessData.identifierNumber = identifierNumber;
+ m_graph.m_storageAccessData.append(storageAccessData);
+
+ return getByOffset;
+}
+
+void ByteCodeParser::handleGetByOffset(
+ int destinationOperand, SpeculatedType prediction, Node* base, unsigned identifierNumber,
+ PropertyOffset offset)
+{
+ set(VirtualRegister(destinationOperand), handleGetByOffset(prediction, base, identifierNumber, offset));
+}
+
+Node* ByteCodeParser::handlePutByOffset(Node* base, unsigned identifier, PropertyOffset offset, Node* value)
+{
+ Node* propertyStorage;
+ if (isInlineOffset(offset))
+ propertyStorage = base;
+ else
+ propertyStorage = addToGraph(GetButterfly, base);
+ Node* result = addToGraph(PutByOffset, OpInfo(m_graph.m_storageAccessData.size()), propertyStorage, base, value);
+
+ StorageAccessData storageAccessData;
+ storageAccessData.offset = offset;
+ storageAccessData.identifierNumber = identifier;
+ m_graph.m_storageAccessData.append(storageAccessData);
+
+ return result;
+}
+
+Node* ByteCodeParser::emitPrototypeChecks(
+ Structure* structure, IntendedStructureChain* chain)
+{
+ Node* base = 0;
+ m_graph.chains().addLazily(chain);
+ Structure* currentStructure = structure;
+ JSObject* currentObject = 0;
+ for (unsigned i = 0; i < chain->size(); ++i) {
+ currentObject = asObject(currentStructure->prototypeForLookup(m_inlineStackTop->m_codeBlock));
+ currentStructure = chain->at(i);
+ base = cellConstantWithStructureCheck(currentObject, currentStructure);
+ }
+ return base;
+}
+
+void ByteCodeParser::handleGetById(
+ int destinationOperand, SpeculatedType prediction, Node* base, unsigned identifierNumber,
+ const GetByIdStatus& getByIdStatus)
+{
+ if (!getByIdStatus.isSimple() || !Options::enableAccessInlining()) {
+ set(VirtualRegister(destinationOperand),
+ addToGraph(
+ getByIdStatus.makesCalls() ? GetByIdFlush : GetById,
+ OpInfo(identifierNumber), OpInfo(prediction), base));
+ return;
+ }
+
+ if (getByIdStatus.numVariants() > 1) {
+ if (!isFTL(m_graph.m_plan.mode) || !Options::enablePolymorphicAccessInlining()) {
+ set(VirtualRegister(destinationOperand),
+ addToGraph(GetById, OpInfo(identifierNumber), OpInfo(prediction), base));
+ return;
+ }
+
+ if (m_graph.compilation())
+ m_graph.compilation()->noticeInlinedGetById();
+
+ // 1) Emit prototype structure checks for all chains. This could sort of maybe not be
+ // optimal, if there is some rarely executed case in the chain that requires a lot
+ // of checks and those checks are not watchpointable.
+ for (unsigned variantIndex = getByIdStatus.numVariants(); variantIndex--;) {
+ if (getByIdStatus[variantIndex].chain()) {
+ emitPrototypeChecks(
+ getByIdStatus[variantIndex].structureSet().singletonStructure(),
+ getByIdStatus[variantIndex].chain());
+ }
+ }
+
+ // 2) Emit a MultiGetByOffset
+ MultiGetByOffsetData* data = m_graph.m_multiGetByOffsetData.add();
+ data->variants = getByIdStatus.variants();
+ data->identifierNumber = identifierNumber;
+ set(VirtualRegister(destinationOperand),
+ addToGraph(MultiGetByOffset, OpInfo(data), OpInfo(prediction), base));
+ return;
+ }
+
+ ASSERT(getByIdStatus.numVariants() == 1);
+ GetByIdVariant variant = getByIdStatus[0];
+
+ if (m_graph.compilation())
+ m_graph.compilation()->noticeInlinedGetById();
+
+ Node* originalBaseForBaselineJIT = base;
+
+ addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(variant.structureSet())), base);
+
+ if (variant.chain()) {
+ base = emitPrototypeChecks(
+ variant.structureSet().singletonStructure(), variant.chain());
+ }
+
+ // Unless we want bugs like https://bugs.webkit.org/show_bug.cgi?id=88783, we need to
+ // ensure that the base of the original get_by_id is kept alive until we're done with
+ // all of the speculations. We only insert the Phantom if there had been a CheckStructure
+ // on something other than the base following the CheckStructure on base, or if the
+ // access was compiled to a WeakJSConstant specific value, in which case we might not
+ // have any explicit use of the base at all.
+ if (variant.specificValue() || originalBaseForBaselineJIT != base)
+ addToGraph(Phantom, originalBaseForBaselineJIT);
+
+ if (variant.specificValue()) {
+ ASSERT(variant.specificValue().isCell());
+
+ set(VirtualRegister(destinationOperand), cellConstant(variant.specificValue().asCell()));
+ return;
+ }
+
+ handleGetByOffset(
+ destinationOperand, prediction, base, identifierNumber, variant.offset());
+}
+
+void ByteCodeParser::emitPutById(
+ Node* base, unsigned identifierNumber, Node* value, const PutByIdStatus& putByIdStatus, bool isDirect)
+{
+ if (isDirect)
+ addToGraph(PutByIdDirect, OpInfo(identifierNumber), base, value);
+ else
+ addToGraph(putByIdStatus.makesCalls() ? PutByIdFlush : PutById, OpInfo(identifierNumber), base, value);
+}
+
+void ByteCodeParser::handlePutById(
+ Node* base, unsigned identifierNumber, Node* value,
+ const PutByIdStatus& putByIdStatus, bool isDirect)
+{
+ if (!putByIdStatus.isSimple() || !Options::enableAccessInlining()) {
+ if (!putByIdStatus.isSet())
+ addToGraph(ForceOSRExit);
+ emitPutById(base, identifierNumber, value, putByIdStatus, isDirect);
+ return;
+ }
+
+ if (putByIdStatus.numVariants() > 1) {
+ if (!isFTL(m_graph.m_plan.mode) || putByIdStatus.makesCalls()
+ || !Options::enablePolymorphicAccessInlining()) {
+ emitPutById(base, identifierNumber, value, putByIdStatus, isDirect);
+ return;
+ }
+
+ if (m_graph.compilation())
+ m_graph.compilation()->noticeInlinedPutById();
+
+ if (!isDirect) {
+ for (unsigned variantIndex = putByIdStatus.numVariants(); variantIndex--;) {
+ if (putByIdStatus[variantIndex].kind() != PutByIdVariant::Transition)
+ continue;
+ if (!putByIdStatus[variantIndex].structureChain())
+ continue;
+ emitPrototypeChecks(
+ putByIdStatus[variantIndex].oldStructure(),
+ putByIdStatus[variantIndex].structureChain());
+ }
+ }
+
+ MultiPutByOffsetData* data = m_graph.m_multiPutByOffsetData.add();
+ data->variants = putByIdStatus.variants();
+ data->identifierNumber = identifierNumber;
+ addToGraph(MultiPutByOffset, OpInfo(data), base, value);
+ return;
+ }
+
+ ASSERT(putByIdStatus.numVariants() == 1);
+ const PutByIdVariant& variant = putByIdStatus[0];
+
+ if (variant.kind() == PutByIdVariant::Replace) {
+ addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(variant.structure())), base);
+ handlePutByOffset(base, identifierNumber, variant.offset(), value);
+ if (m_graph.compilation())
+ m_graph.compilation()->noticeInlinedPutById();
+ return;
+ }
+
+ if (variant.kind() != PutByIdVariant::Transition) {
+ emitPutById(base, identifierNumber, value, putByIdStatus, isDirect);
+ return;
+ }
+
+ if (variant.structureChain() && !variant.structureChain()->isStillValid()) {
+ emitPutById(base, identifierNumber, value, putByIdStatus, isDirect);
+ return;
+ }
+
+ m_graph.chains().addLazily(variant.structureChain());
+
+ addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(variant.oldStructure())), base);
+ if (!isDirect)
+ emitPrototypeChecks(variant.oldStructure(), variant.structureChain());
+
+ ASSERT(variant.oldStructure()->transitionWatchpointSetHasBeenInvalidated());
+
+ Node* propertyStorage;
+ StructureTransitionData* transitionData = m_graph.addStructureTransitionData(
+ StructureTransitionData(variant.oldStructure(), variant.newStructure()));
+
+ if (variant.oldStructure()->outOfLineCapacity()
+ != variant.newStructure()->outOfLineCapacity()) {
+
+ // If we're growing the property storage then it must be because we're
+ // storing into the out-of-line storage.
+ ASSERT(!isInlineOffset(variant.offset()));
+
+ if (!variant.oldStructure()->outOfLineCapacity()) {
+ propertyStorage = addToGraph(
+ AllocatePropertyStorage, OpInfo(transitionData), base);
+ } else {
+ propertyStorage = addToGraph(
+ ReallocatePropertyStorage, OpInfo(transitionData),
+ base, addToGraph(GetButterfly, base));
+ }
+ } else {
+ if (isInlineOffset(variant.offset()))
+ propertyStorage = base;
+ else
+ propertyStorage = addToGraph(GetButterfly, base);
+ }
+
+ addToGraph(PutStructure, OpInfo(transitionData), base);
+
+ addToGraph(
+ PutByOffset,
+ OpInfo(m_graph.m_storageAccessData.size()),
+ propertyStorage,
+ base,
+ value);
+
+ StorageAccessData storageAccessData;
+ storageAccessData.offset = variant.offset();
+ storageAccessData.identifierNumber = identifierNumber;
+ m_graph.m_storageAccessData.append(storageAccessData);
+
+ if (m_graph.compilation())
+ m_graph.compilation()->noticeInlinedPutById();
+}
+
void ByteCodeParser::prepareToParseBlock()
{
for (unsigned i = 0; i < m_constants.size(); ++i)
m_cellConstantNodes.clear();
}
+Node* ByteCodeParser::getScope(bool skipTop, unsigned skipCount)
+{
+ Node* localBase = get(VirtualRegister(JSStack::ScopeChain));
+ if (skipTop) {
+ ASSERT(!inlineCallFrame());
+ localBase = addToGraph(SkipTopScope, localBase);
+ }
+ for (unsigned n = skipCount; n--;)
+ localBase = addToGraph(SkipScope, localBase);
+ return localBase;
+}
+
bool ByteCodeParser::parseBlock(unsigned limit)
{
bool shouldContinueParsing = true;
-
- Interpreter* interpreter = m_globalData->interpreter;
+
+ Interpreter* interpreter = m_vm->interpreter;
Instruction* instructionsBegin = m_inlineStackTop->m_codeBlock->instructions().begin();
unsigned blockBegin = m_currentIndex;
// If we are the first basic block, introduce markers for arguments. This allows
// us to track if a use of an argument may use the actual argument passed, as
// opposed to using a value we set explicitly.
- if (m_currentBlock == m_graph.m_blocks[0].get() && !m_inlineStackTop->m_inlineCallFrame) {
+ if (m_currentBlock == m_graph.block(0) && !inlineCallFrame()) {
m_graph.m_arguments.resize(m_numArguments);
for (unsigned argument = 0; argument < m_numArguments; ++argument) {
- NodeIndex setArgument = addToGraph(SetArgument, OpInfo(newVariableAccessData(argumentToOperand(argument))));
+ VariableAccessData* variable = newVariableAccessData(
+ virtualRegisterForArgument(argument), m_codeBlock->isCaptured(virtualRegisterForArgument(argument)));
+ variable->mergeStructureCheckHoistingFailed(
+ m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache)
+ || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCacheWatchpoint));
+ variable->mergeCheckArrayHoistingFailed(
+ m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadIndexingType));
+
+ Node* setArgument = addToGraph(SetArgument, OpInfo(variable));
m_graph.m_arguments[argument] = setArgument;
- m_currentBlock->variablesAtHead.setArgumentFirstTime(argument, setArgument);
m_currentBlock->variablesAtTail.setArgumentFirstTime(argument, setArgument);
}
}
while (true) {
- m_currentProfilingIndex = m_currentIndex;
-
+ for (unsigned i = 0; i < m_setLocalQueue.size(); ++i)
+ m_setLocalQueue[i].execute(this);
+ m_setLocalQueue.resize(0);
+
// Don't extend over jump destinations.
if (m_currentIndex == limit) {
// Ordinarily we want to plant a jump. But refuse to do this if the block is
// to be true.
if (!m_currentBlock->isEmpty())
addToGraph(Jump, OpInfo(m_currentIndex));
- else {
-#if DFG_ENABLE(DEBUG_VERBOSE)
- dataLog("Refusing to plant jump at limit %u because block %p is empty.\n", limit, m_currentBlock);
-#endif
- }
return shouldContinueParsing;
}
// Switch on the current bytecode opcode.
Instruction* currentInstruction = instructionsBegin + m_currentIndex;
+ m_currentInstruction = currentInstruction; // Some methods want to use this, and we'd rather not thread it through calls.
OpcodeID opcodeID = interpreter->getOpcodeID(currentInstruction->u.opcode);
+
+ if (Options::verboseDFGByteCodeParsing())
+ dataLog(" parsing ", currentCodeOrigin(), "\n");
+
+ if (m_graph.compilation()) {
+ addToGraph(CountExecution, OpInfo(m_graph.compilation()->executionCounterFor(
+ Profiler::OriginStack(*m_vm->m_perBytecodeProfiler, m_codeBlock, currentCodeOrigin()))));
+ }
+
switch (opcodeID) {
// === Function entry opcodes ===
case op_enter:
// Initialize all locals to undefined.
for (int i = 0; i < m_inlineStackTop->m_codeBlock->m_numVars; ++i)
- set(i, constantUndefined());
+ set(virtualRegisterForLocal(i), constantUndefined(), ImmediateNakedSet);
+ if (m_inlineStackTop->m_codeBlock->specializationKind() == CodeForConstruct)
+ set(virtualRegisterForArgument(0), constantUndefined(), ImmediateNakedSet);
NEXT_OPCODE(op_enter);
-
- case op_convert_this: {
- NodeIndex op1 = getThis();
- if (m_graph[op1].op() == ConvertThis)
- setThis(op1);
- else
- setThis(addToGraph(ConvertThis, op1));
- NEXT_OPCODE(op_convert_this);
+
+ case op_touch_entry:
+ if (m_inlineStackTop->m_codeBlock->symbolTable()->m_functionEnteredOnce.isStillValid())
+ addToGraph(ForceOSRExit);
+ NEXT_OPCODE(op_touch_entry);
+
+ case op_to_this: {
+ Node* op1 = getThis();
+ if (op1->op() != ToThis) {
+ Structure* cachedStructure = currentInstruction[2].u.structure.get();
+ if (!cachedStructure
+ || cachedStructure->classInfo()->methodTable.toThis != JSObject::info()->methodTable.toThis
+ || m_inlineStackTop->m_profiledBlock->couldTakeSlowCase(m_currentIndex)
+ || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache)
+ || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCacheWatchpoint)
+ || (op1->op() == GetLocal && op1->variableAccessData()->structureCheckHoistingFailed())) {
+ setThis(addToGraph(ToThis, op1));
+ } else {
+ addToGraph(
+ CheckStructure,
+ OpInfo(m_graph.addStructureSet(cachedStructure)),
+ op1);
+ }
+ }
+ NEXT_OPCODE(op_to_this);
}
case op_create_this: {
- NodeIndex op1 = get(currentInstruction[2].u.operand);
- set(currentInstruction[1].u.operand, addToGraph(CreateThis, op1));
+ int calleeOperand = currentInstruction[2].u.operand;
+ Node* callee = get(VirtualRegister(calleeOperand));
+ bool alreadyEmitted = false;
+ if (callee->op() == WeakJSConstant) {
+ JSCell* cell = callee->weakConstant();
+ ASSERT(cell->inherits(JSFunction::info()));
+
+ JSFunction* function = jsCast<JSFunction*>(cell);
+ if (Structure* structure = function->allocationStructure()) {
+ addToGraph(AllocationProfileWatchpoint, OpInfo(function));
+ // The callee is still live up to this point.
+ addToGraph(Phantom, callee);
+ set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(NewObject, OpInfo(structure)));
+ alreadyEmitted = true;
+ }
+ }
+ if (!alreadyEmitted) {
+ set(VirtualRegister(currentInstruction[1].u.operand),
+ addToGraph(CreateThis, OpInfo(currentInstruction[3].u.operand), callee));
+ }
NEXT_OPCODE(op_create_this);
}
-
+
case op_new_object: {
- set(currentInstruction[1].u.operand, addToGraph(NewObject));
+ set(VirtualRegister(currentInstruction[1].u.operand),
+ addToGraph(NewObject,
+ OpInfo(currentInstruction[3].u.objectAllocationProfile->structure())));
NEXT_OPCODE(op_new_object);
}
case op_new_array: {
int startOperand = currentInstruction[2].u.operand;
int numOperands = currentInstruction[3].u.operand;
- for (int operandIdx = startOperand; operandIdx < startOperand + numOperands; ++operandIdx)
- addVarArgChild(get(operandIdx));
- set(currentInstruction[1].u.operand, addToGraph(Node::VarArg, NewArray, OpInfo(0), OpInfo(0)));
+ ArrayAllocationProfile* profile = currentInstruction[4].u.arrayAllocationProfile;
+ for (int operandIdx = startOperand; operandIdx > startOperand - numOperands; --operandIdx)
+ addVarArgChild(get(VirtualRegister(operandIdx)));
+ set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(Node::VarArg, NewArray, OpInfo(profile->selectIndexingType()), OpInfo(0)));
NEXT_OPCODE(op_new_array);
}
+ case op_new_array_with_size: {
+ int lengthOperand = currentInstruction[2].u.operand;
+ ArrayAllocationProfile* profile = currentInstruction[3].u.arrayAllocationProfile;
+ set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(NewArrayWithSize, OpInfo(profile->selectIndexingType()), get(VirtualRegister(lengthOperand))));
+ NEXT_OPCODE(op_new_array_with_size);
+ }
+
case op_new_array_buffer: {
int startConstant = currentInstruction[2].u.operand;
int numConstants = currentInstruction[3].u.operand;
- set(currentInstruction[1].u.operand, addToGraph(NewArrayBuffer, OpInfo(startConstant), OpInfo(numConstants)));
+ ArrayAllocationProfile* profile = currentInstruction[4].u.arrayAllocationProfile;
+ NewArrayBufferData data;
+ data.startConstant = m_inlineStackTop->m_constantBufferRemap[startConstant];
+ data.numConstants = numConstants;
+ data.indexingType = profile->selectIndexingType();
+
+ // If this statement has never executed, we'll have the wrong indexing type in the profile.
+ for (int i = 0; i < numConstants; ++i) {
+ data.indexingType =
+ leastUpperBoundOfIndexingTypeAndValue(
+ data.indexingType,
+ m_codeBlock->constantBuffer(data.startConstant)[i]);
+ }
+
+ m_graph.m_newArrayBufferData.append(data);
+ set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(NewArrayBuffer, OpInfo(&m_graph.m_newArrayBufferData.last())));
NEXT_OPCODE(op_new_array_buffer);
}
case op_new_regexp: {
- set(currentInstruction[1].u.operand, addToGraph(NewRegexp, OpInfo(currentInstruction[2].u.operand)));
+ set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(NewRegexp, OpInfo(currentInstruction[2].u.operand)));
NEXT_OPCODE(op_new_regexp);
}
case op_get_callee: {
- if (m_inlineStackTop->m_inlineCallFrame)
- set(currentInstruction[1].u.operand, getDirect(m_inlineStackTop->m_calleeVR));
- else
- set(currentInstruction[1].u.operand, addToGraph(GetCallee));
+ JSCell* cachedFunction = currentInstruction[2].u.jsCell.get();
+ if (!cachedFunction
+ || m_inlineStackTop->m_profiledBlock->couldTakeSlowCase(m_currentIndex)
+ || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadFunction)) {
+ set(VirtualRegister(currentInstruction[1].u.operand), get(VirtualRegister(JSStack::Callee)));
+ } else {
+ ASSERT(cachedFunction->inherits(JSFunction::info()));
+ Node* actualCallee = get(VirtualRegister(JSStack::Callee));
+ addToGraph(CheckFunction, OpInfo(cachedFunction), actualCallee);
+ set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(WeakJSConstant, OpInfo(cachedFunction)));
+ }
NEXT_OPCODE(op_get_callee);
}
// === Bitwise operations ===
case op_bitand: {
- NodeIndex op1 = getToInt32(currentInstruction[2].u.operand);
- NodeIndex op2 = getToInt32(currentInstruction[3].u.operand);
- set(currentInstruction[1].u.operand, addToGraph(BitAnd, op1, op2));
+ Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
+ Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
+ set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(BitAnd, op1, op2));
NEXT_OPCODE(op_bitand);
}
case op_bitor: {
- NodeIndex op1 = getToInt32(currentInstruction[2].u.operand);
- NodeIndex op2 = getToInt32(currentInstruction[3].u.operand);
- set(currentInstruction[1].u.operand, addToGraph(BitOr, op1, op2));
+ Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
+ Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
+ set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(BitOr, op1, op2));
NEXT_OPCODE(op_bitor);
}
case op_bitxor: {
- NodeIndex op1 = getToInt32(currentInstruction[2].u.operand);
- NodeIndex op2 = getToInt32(currentInstruction[3].u.operand);
- set(currentInstruction[1].u.operand, addToGraph(BitXor, op1, op2));
+ Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
+ Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
+ set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(BitXor, op1, op2));
NEXT_OPCODE(op_bitxor);
}
case op_rshift: {
- NodeIndex op1 = getToInt32(currentInstruction[2].u.operand);
- NodeIndex op2 = getToInt32(currentInstruction[3].u.operand);
- NodeIndex result;
- // Optimize out shifts by zero.
- if (isInt32Constant(op2) && !(valueOfInt32Constant(op2) & 0x1f))
- result = op1;
- else
- result = addToGraph(BitRShift, op1, op2);
- set(currentInstruction[1].u.operand, result);
+ Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
+ Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
+ set(VirtualRegister(currentInstruction[1].u.operand),
+ addToGraph(BitRShift, op1, op2));
NEXT_OPCODE(op_rshift);
}
case op_lshift: {
- NodeIndex op1 = getToInt32(currentInstruction[2].u.operand);
- NodeIndex op2 = getToInt32(currentInstruction[3].u.operand);
- NodeIndex result;
- // Optimize out shifts by zero.
- if (isInt32Constant(op2) && !(valueOfInt32Constant(op2) & 0x1f))
- result = op1;
- else
- result = addToGraph(BitLShift, op1, op2);
- set(currentInstruction[1].u.operand, result);
+ Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
+ Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
+ set(VirtualRegister(currentInstruction[1].u.operand),
+ addToGraph(BitLShift, op1, op2));
NEXT_OPCODE(op_lshift);
}
case op_urshift: {
- NodeIndex op1 = getToInt32(currentInstruction[2].u.operand);
- NodeIndex op2 = getToInt32(currentInstruction[3].u.operand);
- NodeIndex result;
- // The result of a zero-extending right shift is treated as an unsigned value.
- // This means that if the top bit is set, the result is not in the int32 range,
- // and as such must be stored as a double. If the shift amount is a constant,
- // we may be able to optimize.
- if (isInt32Constant(op2)) {
- // If we know we are shifting by a non-zero amount, then since the operation
- // zero fills we know the top bit of the result must be zero, and as such the
- // result must be within the int32 range. Conversely, if this is a shift by
- // zero, then the result may be changed by the conversion to unsigned, but it
- // is not necessary to perform the shift!
- if (valueOfInt32Constant(op2) & 0x1f)
- result = addToGraph(BitURShift, op1, op2);
- else
- result = makeSafe(addToGraph(UInt32ToNumber, op1));
- } else {
- // Cannot optimize at this stage; shift & potentially rebox as a double.
- result = addToGraph(BitURShift, op1, op2);
- result = makeSafe(addToGraph(UInt32ToNumber, result));
- }
- set(currentInstruction[1].u.operand, result);
+ Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
+ Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
+ set(VirtualRegister(currentInstruction[1].u.operand),
+ addToGraph(BitURShift, op1, op2));
NEXT_OPCODE(op_urshift);
}
-
- // === Increment/Decrement opcodes ===
-
- case op_pre_inc: {
- unsigned srcDst = currentInstruction[1].u.operand;
- NodeIndex op = get(srcDst);
- set(srcDst, makeSafe(addToGraph(ArithAdd, op, one())));
- NEXT_OPCODE(op_pre_inc);
+
+ case op_unsigned: {
+ set(VirtualRegister(currentInstruction[1].u.operand),
+ makeSafe(addToGraph(UInt32ToNumber, get(VirtualRegister(currentInstruction[2].u.operand)))));
+ NEXT_OPCODE(op_unsigned);
}
- case op_post_inc: {
- unsigned result = currentInstruction[1].u.operand;
- unsigned srcDst = currentInstruction[2].u.operand;
- ASSERT(result != srcDst); // Required for assumptions we make during OSR.
- NodeIndex op = get(srcDst);
- set(result, op);
- set(srcDst, makeSafe(addToGraph(ArithAdd, op, one())));
- NEXT_OPCODE(op_post_inc);
- }
+ // === Increment/Decrement opcodes ===
- case op_pre_dec: {
- unsigned srcDst = currentInstruction[1].u.operand;
- NodeIndex op = get(srcDst);
- set(srcDst, makeSafe(addToGraph(ArithSub, op, one())));
- NEXT_OPCODE(op_pre_dec);
+ case op_inc: {
+ int srcDst = currentInstruction[1].u.operand;
+ VirtualRegister srcDstVirtualRegister = VirtualRegister(srcDst);
+ Node* op = get(srcDstVirtualRegister);
+ set(srcDstVirtualRegister, makeSafe(addToGraph(ArithAdd, op, one())));
+ NEXT_OPCODE(op_inc);
}
- case op_post_dec: {
- unsigned result = currentInstruction[1].u.operand;
- unsigned srcDst = currentInstruction[2].u.operand;
- NodeIndex op = get(srcDst);
- set(result, op);
- set(srcDst, makeSafe(addToGraph(ArithSub, op, one())));
- NEXT_OPCODE(op_post_dec);
+ case op_dec: {
+ int srcDst = currentInstruction[1].u.operand;
+ VirtualRegister srcDstVirtualRegister = VirtualRegister(srcDst);
+ Node* op = get(srcDstVirtualRegister);
+ set(srcDstVirtualRegister, makeSafe(addToGraph(ArithSub, op, one())));
+ NEXT_OPCODE(op_dec);
}
// === Arithmetic operations ===
case op_add: {
- NodeIndex op1 = get(currentInstruction[2].u.operand);
- NodeIndex op2 = get(currentInstruction[3].u.operand);
- if (m_graph[op1].hasNumberResult() && m_graph[op2].hasNumberResult())
- set(currentInstruction[1].u.operand, makeSafe(addToGraph(ArithAdd, op1, op2)));
+ Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
+ Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
+ if (op1->hasNumberResult() && op2->hasNumberResult())
+ set(VirtualRegister(currentInstruction[1].u.operand), makeSafe(addToGraph(ArithAdd, op1, op2)));
else
- set(currentInstruction[1].u.operand, makeSafe(addToGraph(ValueAdd, op1, op2)));
+ set(VirtualRegister(currentInstruction[1].u.operand), makeSafe(addToGraph(ValueAdd, op1, op2)));
NEXT_OPCODE(op_add);
}
case op_sub: {
- NodeIndex op1 = get(currentInstruction[2].u.operand);
- NodeIndex op2 = get(currentInstruction[3].u.operand);
- set(currentInstruction[1].u.operand, makeSafe(addToGraph(ArithSub, op1, op2)));
+ Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
+ Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
+ set(VirtualRegister(currentInstruction[1].u.operand), makeSafe(addToGraph(ArithSub, op1, op2)));
NEXT_OPCODE(op_sub);
}
case op_negate: {
- NodeIndex op1 = get(currentInstruction[2].u.operand);
- set(currentInstruction[1].u.operand, makeSafe(addToGraph(ArithNegate, op1)));
+ Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
+ set(VirtualRegister(currentInstruction[1].u.operand), makeSafe(addToGraph(ArithNegate, op1)));
NEXT_OPCODE(op_negate);
}
case op_mul: {
// Multiply requires that the inputs are not truncated, unfortunately.
- NodeIndex op1 = get(currentInstruction[2].u.operand);
- NodeIndex op2 = get(currentInstruction[3].u.operand);
- set(currentInstruction[1].u.operand, makeSafe(addToGraph(ArithMul, op1, op2)));
+ Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
+ Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
+ set(VirtualRegister(currentInstruction[1].u.operand), makeSafe(addToGraph(ArithMul, op1, op2)));
NEXT_OPCODE(op_mul);
}
case op_mod: {
- NodeIndex op1 = get(currentInstruction[2].u.operand);
- NodeIndex op2 = get(currentInstruction[3].u.operand);
- set(currentInstruction[1].u.operand, makeSafe(addToGraph(ArithMod, op1, op2)));
+ Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
+ Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
+ set(VirtualRegister(currentInstruction[1].u.operand), makeSafe(addToGraph(ArithMod, op1, op2)));
NEXT_OPCODE(op_mod);
}
case op_div: {
- NodeIndex op1 = get(currentInstruction[2].u.operand);
- NodeIndex op2 = get(currentInstruction[3].u.operand);
- set(currentInstruction[1].u.operand, makeDivSafe(addToGraph(ArithDiv, op1, op2)));
+ Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
+ Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
+ set(VirtualRegister(currentInstruction[1].u.operand), makeDivSafe(addToGraph(ArithDiv, op1, op2)));
NEXT_OPCODE(op_div);
}
// === Misc operations ===
-#if ENABLE(DEBUG_WITH_BREAKPOINT)
case op_debug:
addToGraph(Breakpoint);
NEXT_OPCODE(op_debug);
-#endif
+
+ case op_profile_will_call: {
+ addToGraph(ProfileWillCall);
+ NEXT_OPCODE(op_profile_will_call);
+ }
+
+ case op_profile_did_call: {
+ addToGraph(ProfileDidCall);
+ NEXT_OPCODE(op_profile_did_call);
+ }
+
case op_mov: {
- NodeIndex op = get(currentInstruction[2].u.operand);
- set(currentInstruction[1].u.operand, op);
+ Node* op = get(VirtualRegister(currentInstruction[2].u.operand));
+ set(VirtualRegister(currentInstruction[1].u.operand), op);
NEXT_OPCODE(op_mov);
}
+
+ case op_captured_mov: {
+ Node* op = get(VirtualRegister(currentInstruction[2].u.operand));
+ if (VariableWatchpointSet* set = currentInstruction[3].u.watchpointSet) {
+ if (set->state() != IsInvalidated)
+ addToGraph(NotifyWrite, OpInfo(set), op);
+ }
+ set(VirtualRegister(currentInstruction[1].u.operand), op);
+ NEXT_OPCODE(op_captured_mov);
+ }
case op_check_has_instance:
- addToGraph(CheckHasInstance, get(currentInstruction[1].u.operand));
+ addToGraph(CheckHasInstance, get(VirtualRegister(currentInstruction[3].u.operand)));
NEXT_OPCODE(op_check_has_instance);
case op_instanceof: {
- NodeIndex value = get(currentInstruction[2].u.operand);
- NodeIndex baseValue = get(currentInstruction[3].u.operand);
- NodeIndex prototype = get(currentInstruction[4].u.operand);
- set(currentInstruction[1].u.operand, addToGraph(InstanceOf, value, baseValue, prototype));
+ Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
+ Node* prototype = get(VirtualRegister(currentInstruction[3].u.operand));
+ set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(InstanceOf, value, prototype));
NEXT_OPCODE(op_instanceof);
}
case op_is_undefined: {
- NodeIndex value = get(currentInstruction[2].u.operand);
- set(currentInstruction[1].u.operand, addToGraph(IsUndefined, value));
+ Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
+ set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(IsUndefined, value));
NEXT_OPCODE(op_is_undefined);
}
case op_is_boolean: {
- NodeIndex value = get(currentInstruction[2].u.operand);
- set(currentInstruction[1].u.operand, addToGraph(IsBoolean, value));
+ Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
+ set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(IsBoolean, value));
NEXT_OPCODE(op_is_boolean);
}
case op_is_number: {
- NodeIndex value = get(currentInstruction[2].u.operand);
- set(currentInstruction[1].u.operand, addToGraph(IsNumber, value));
+ Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
+ set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(IsNumber, value));
NEXT_OPCODE(op_is_number);
}
case op_is_string: {
- NodeIndex value = get(currentInstruction[2].u.operand);
- set(currentInstruction[1].u.operand, addToGraph(IsString, value));
+ Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
+ set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(IsString, value));
NEXT_OPCODE(op_is_string);
}
case op_is_object: {
- NodeIndex value = get(currentInstruction[2].u.operand);
- set(currentInstruction[1].u.operand, addToGraph(IsObject, value));
+ Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
+ set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(IsObject, value));
NEXT_OPCODE(op_is_object);
}
case op_is_function: {
- NodeIndex value = get(currentInstruction[2].u.operand);
- set(currentInstruction[1].u.operand, addToGraph(IsFunction, value));
+ Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
+ set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(IsFunction, value));
NEXT_OPCODE(op_is_function);
}
case op_not: {
- NodeIndex value = get(currentInstruction[2].u.operand);
- set(currentInstruction[1].u.operand, addToGraph(LogicalNot, value));
+ Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
+ set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(LogicalNot, value));
NEXT_OPCODE(op_not);
}
case op_to_primitive: {
- NodeIndex value = get(currentInstruction[2].u.operand);
- set(currentInstruction[1].u.operand, addToGraph(ToPrimitive, value));
+ Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
+ set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(ToPrimitive, value));
NEXT_OPCODE(op_to_primitive);
}
case op_strcat: {
int startOperand = currentInstruction[2].u.operand;
int numOperands = currentInstruction[3].u.operand;
- for (int operandIdx = startOperand; operandIdx < startOperand + numOperands; ++operandIdx)
- addVarArgChild(get(operandIdx));
- set(currentInstruction[1].u.operand, addToGraph(Node::VarArg, StrCat, OpInfo(0), OpInfo(0)));
+#if CPU(X86)
+ // X86 doesn't have enough registers to compile MakeRope with three arguments.
+ // Rather than try to be clever, we just make MakeRope dumber on this processor.
+ const unsigned maxRopeArguments = 2;
+#else
+ const unsigned maxRopeArguments = 3;
+#endif
+ auto toStringNodes = std::make_unique<Node*[]>(numOperands);
+ for (int i = 0; i < numOperands; i++)
+ toStringNodes[i] = addToGraph(ToString, get(VirtualRegister(startOperand - i)));
+
+ for (int i = 0; i < numOperands; i++)
+ addToGraph(Phantom, toStringNodes[i]);
+
+ Node* operands[AdjacencyList::Size];
+ unsigned indexInOperands = 0;
+ for (unsigned i = 0; i < AdjacencyList::Size; ++i)
+ operands[i] = 0;
+ for (int operandIdx = 0; operandIdx < numOperands; ++operandIdx) {
+ if (indexInOperands == maxRopeArguments) {
+ operands[0] = addToGraph(MakeRope, operands[0], operands[1], operands[2]);
+ for (unsigned i = 1; i < AdjacencyList::Size; ++i)
+ operands[i] = 0;
+ indexInOperands = 1;
+ }
+
+ ASSERT(indexInOperands < AdjacencyList::Size);
+ ASSERT(indexInOperands < maxRopeArguments);
+ operands[indexInOperands++] = toStringNodes[operandIdx];
+ }
+ set(VirtualRegister(currentInstruction[1].u.operand),
+ addToGraph(MakeRope, operands[0], operands[1], operands[2]));
NEXT_OPCODE(op_strcat);
}
case op_less: {
- NodeIndex op1 = get(currentInstruction[2].u.operand);
- NodeIndex op2 = get(currentInstruction[3].u.operand);
- set(currentInstruction[1].u.operand, addToGraph(CompareLess, op1, op2));
+ Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
+ Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
+ set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareLess, op1, op2));
NEXT_OPCODE(op_less);
}
case op_lesseq: {
- NodeIndex op1 = get(currentInstruction[2].u.operand);
- NodeIndex op2 = get(currentInstruction[3].u.operand);
- set(currentInstruction[1].u.operand, addToGraph(CompareLessEq, op1, op2));
+ Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
+ Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
+ set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareLessEq, op1, op2));
NEXT_OPCODE(op_lesseq);
}
case op_greater: {
- NodeIndex op1 = get(currentInstruction[2].u.operand);
- NodeIndex op2 = get(currentInstruction[3].u.operand);
- set(currentInstruction[1].u.operand, addToGraph(CompareGreater, op1, op2));
+ Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
+ Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
+ set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareGreater, op1, op2));
NEXT_OPCODE(op_greater);
}
case op_greatereq: {
- NodeIndex op1 = get(currentInstruction[2].u.operand);
- NodeIndex op2 = get(currentInstruction[3].u.operand);
- set(currentInstruction[1].u.operand, addToGraph(CompareGreaterEq, op1, op2));
+ Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
+ Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
+ set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareGreaterEq, op1, op2));
NEXT_OPCODE(op_greatereq);
}
case op_eq: {
- NodeIndex op1 = get(currentInstruction[2].u.operand);
- NodeIndex op2 = get(currentInstruction[3].u.operand);
- set(currentInstruction[1].u.operand, addToGraph(CompareEq, op1, op2));
+ Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
+ Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
+ set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareEq, op1, op2));
NEXT_OPCODE(op_eq);
}
case op_eq_null: {
- NodeIndex value = get(currentInstruction[2].u.operand);
- set(currentInstruction[1].u.operand, addToGraph(CompareEq, value, constantNull()));
+ Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
+ set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareEqConstant, value, constantNull()));
NEXT_OPCODE(op_eq_null);
}
case op_stricteq: {
- NodeIndex op1 = get(currentInstruction[2].u.operand);
- NodeIndex op2 = get(currentInstruction[3].u.operand);
- set(currentInstruction[1].u.operand, addToGraph(CompareStrictEq, op1, op2));
+ Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
+ Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
+ set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareStrictEq, op1, op2));
NEXT_OPCODE(op_stricteq);
}
case op_neq: {
- NodeIndex op1 = get(currentInstruction[2].u.operand);
- NodeIndex op2 = get(currentInstruction[3].u.operand);
- set(currentInstruction[1].u.operand, addToGraph(LogicalNot, addToGraph(CompareEq, op1, op2)));
+ Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
+ Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
+ set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(LogicalNot, addToGraph(CompareEq, op1, op2)));
NEXT_OPCODE(op_neq);
}
case op_neq_null: {
- NodeIndex value = get(currentInstruction[2].u.operand);
- set(currentInstruction[1].u.operand, addToGraph(LogicalNot, addToGraph(CompareEq, value, constantNull())));
+ Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
+ set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(LogicalNot, addToGraph(CompareEqConstant, value, constantNull())));
NEXT_OPCODE(op_neq_null);
}
case op_nstricteq: {
- NodeIndex op1 = get(currentInstruction[2].u.operand);
- NodeIndex op2 = get(currentInstruction[3].u.operand);
- set(currentInstruction[1].u.operand, addToGraph(LogicalNot, addToGraph(CompareStrictEq, op1, op2)));
+ Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
+ Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
+ Node* invertedResult;
+ invertedResult = addToGraph(CompareStrictEq, op1, op2);
+ set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(LogicalNot, invertedResult));
NEXT_OPCODE(op_nstricteq);
}
// === Property access operations ===
case op_get_by_val: {
- PredictedType prediction = getPrediction();
+ SpeculatedType prediction = getPrediction();
- NodeIndex base = get(currentInstruction[2].u.operand);
- NodeIndex property = get(currentInstruction[3].u.operand);
- NodeIndex propertyStorage = addToGraph(GetIndexedPropertyStorage, base, property);
- NodeIndex getByVal = addToGraph(GetByVal, OpInfo(0), OpInfo(prediction), base, property, propertyStorage);
- set(currentInstruction[1].u.operand, getByVal);
+ Node* base = get(VirtualRegister(currentInstruction[2].u.operand));
+ ArrayMode arrayMode = getArrayModeConsideringSlowPath(currentInstruction[4].u.arrayProfile, Array::Read);
+ Node* property = get(VirtualRegister(currentInstruction[3].u.operand));
+ Node* getByVal = addToGraph(GetByVal, OpInfo(arrayMode.asWord()), OpInfo(prediction), base, property);
+ set(VirtualRegister(currentInstruction[1].u.operand), getByVal);
NEXT_OPCODE(op_get_by_val);
}
+ case op_put_by_val_direct:
case op_put_by_val: {
- NodeIndex base = get(currentInstruction[1].u.operand);
- NodeIndex property = get(currentInstruction[2].u.operand);
- NodeIndex value = get(currentInstruction[3].u.operand);
+ Node* base = get(VirtualRegister(currentInstruction[1].u.operand));
- addToGraph(PutByVal, base, property, value);
+ ArrayMode arrayMode = getArrayModeConsideringSlowPath(currentInstruction[4].u.arrayProfile, Array::Write);
+
+ Node* property = get(VirtualRegister(currentInstruction[2].u.operand));
+ Node* value = get(VirtualRegister(currentInstruction[3].u.operand));
+
+ addVarArgChild(base);
+ addVarArgChild(property);
+ addVarArgChild(value);
+ addVarArgChild(0); // Leave room for property storage.
+ addVarArgChild(0); // Leave room for length.
+ addToGraph(Node::VarArg, opcodeID == op_put_by_val_direct ? PutByValDirect : PutByVal, OpInfo(arrayMode.asWord()), OpInfo(0));
NEXT_OPCODE(op_put_by_val);
}
- case op_method_check: {
- m_currentProfilingIndex += OPCODE_LENGTH(op_method_check);
- Instruction* getInstruction = currentInstruction + OPCODE_LENGTH(op_method_check);
-
- PredictedType prediction = getPrediction();
-
- ASSERT(interpreter->getOpcodeID(getInstruction->u.opcode) == op_get_by_id);
-
- NodeIndex base = get(getInstruction[2].u.operand);
- unsigned identifier = m_inlineStackTop->m_identifierRemap[getInstruction[3].u.operand];
-
- // Check if the method_check was monomorphic. If so, emit a CheckXYZMethod
- // node, which is a lot more efficient.
- GetByIdStatus getByIdStatus = GetByIdStatus::computeFor(
- m_inlineStackTop->m_profiledBlock,
- m_currentIndex,
- m_codeBlock->identifier(identifier));
- MethodCallLinkStatus methodCallStatus = MethodCallLinkStatus::computeFor(
- m_inlineStackTop->m_profiledBlock, m_currentIndex);
-
- if (methodCallStatus.isSet()
- && !getByIdStatus.wasSeenInJIT()
- && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache)) {
- // It's monomorphic as far as we can tell, since the method_check was linked
- // but the slow path (i.e. the normal get_by_id) never fired.
-
- addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(methodCallStatus.structure())), base);
- if (methodCallStatus.needsPrototypeCheck())
- addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(methodCallStatus.prototypeStructure())), cellConstant(methodCallStatus.prototype()));
-
- // Keep the base of the access alive past the speculations.
- addToGraph(Phantom, base);
-
- set(getInstruction[1].u.operand, cellConstant(methodCallStatus.function()));
- } else
- set(getInstruction[1].u.operand, addToGraph(getByIdStatus.makesCalls() ? GetByIdFlush : GetById, OpInfo(identifier), OpInfo(prediction), base));
-
- m_currentIndex += OPCODE_LENGTH(op_method_check) + OPCODE_LENGTH(op_get_by_id);
- continue;
- }
- case op_get_scoped_var: {
- PredictedType prediction = getPrediction();
- int dst = currentInstruction[1].u.operand;
- int slot = currentInstruction[2].u.operand;
- int depth = currentInstruction[3].u.operand;
- NodeIndex getScopeChain = addToGraph(GetScopeChain, OpInfo(depth));
- NodeIndex getScopedVar = addToGraph(GetScopedVar, OpInfo(slot), OpInfo(prediction), getScopeChain);
- set(dst, getScopedVar);
- NEXT_OPCODE(op_get_scoped_var);
- }
- case op_put_scoped_var: {
- int slot = currentInstruction[1].u.operand;
- int depth = currentInstruction[2].u.operand;
- int source = currentInstruction[3].u.operand;
- NodeIndex getScopeChain = addToGraph(GetScopeChain, OpInfo(depth));
- addToGraph(PutScopedVar, OpInfo(slot), getScopeChain, get(source));
- NEXT_OPCODE(op_put_scoped_var);
- }
- case op_get_by_id: {
- PredictedType prediction = getPredictionWithoutOSRExit();
+ case op_get_by_id:
+ case op_get_by_id_out_of_line:
+ case op_get_array_length: {
+ SpeculatedType prediction = getPrediction();
- NodeIndex base = get(currentInstruction[2].u.operand);
+ Node* base = get(VirtualRegister(currentInstruction[2].u.operand));
unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[3].u.operand];
- Identifier identifier = m_codeBlock->identifier(identifierNumber);
+ StringImpl* uid = m_graph.identifiers()[identifierNumber];
GetByIdStatus getByIdStatus = GetByIdStatus::computeFor(
- m_inlineStackTop->m_profiledBlock, m_currentIndex, identifier);
+ m_inlineStackTop->m_profiledBlock, m_dfgCodeBlock,
+ m_inlineStackTop->m_stubInfos, m_dfgStubInfos,
+ currentCodeOrigin(), uid);
- if (getByIdStatus.isSimpleDirect()
- && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache)) {
- ASSERT(getByIdStatus.structureSet().size());
-
- // The implementation of GetByOffset does not know to terminate speculative
- // execution if it doesn't have a prediction, so we do it manually.
- if (prediction == PredictNone)
- addToGraph(ForceOSRExit);
-
- addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(getByIdStatus.structureSet())), base);
- NodeIndex propertyStorage;
- size_t offsetOffset;
- if (getByIdStatus.structureSet().allAreUsingInlinePropertyStorage()) {
- propertyStorage = base;
- ASSERT(!(sizeof(JSObject) % sizeof(EncodedJSValue)));
- offsetOffset = sizeof(JSObject) / sizeof(EncodedJSValue);
- } else {
- propertyStorage = addToGraph(GetPropertyStorage, base);
- offsetOffset = 0;
- }
- set(currentInstruction[1].u.operand, addToGraph(GetByOffset, OpInfo(m_graph.m_storageAccessData.size()), OpInfo(prediction), propertyStorage));
-
- StorageAccessData storageAccessData;
- storageAccessData.offset = getByIdStatus.offset() + offsetOffset;
- storageAccessData.identifierNumber = identifierNumber;
- m_graph.m_storageAccessData.append(storageAccessData);
- } else
- set(currentInstruction[1].u.operand, addToGraph(getByIdStatus.makesCalls() ? GetByIdFlush : GetById, OpInfo(identifierNumber), OpInfo(prediction), base));
+ handleGetById(
+ currentInstruction[1].u.operand, prediction, base, identifierNumber, getByIdStatus);
NEXT_OPCODE(op_get_by_id);
}
case op_put_by_id:
+ case op_put_by_id_out_of_line:
case op_put_by_id_transition_direct:
- case op_put_by_id_transition_normal: {
- NodeIndex value = get(currentInstruction[3].u.operand);
- NodeIndex base = get(currentInstruction[1].u.operand);
+ case op_put_by_id_transition_normal:
+ case op_put_by_id_transition_direct_out_of_line:
+ case op_put_by_id_transition_normal_out_of_line: {
+ Node* value = get(VirtualRegister(currentInstruction[3].u.operand));
+ Node* base = get(VirtualRegister(currentInstruction[1].u.operand));
unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[2].u.operand];
bool direct = currentInstruction[8].u.operand;
PutByIdStatus putByIdStatus = PutByIdStatus::computeFor(
- m_inlineStackTop->m_profiledBlock,
- m_currentIndex,
- m_codeBlock->identifier(identifierNumber));
- if (!putByIdStatus.isSet())
- addToGraph(ForceOSRExit);
+ m_inlineStackTop->m_profiledBlock, m_dfgCodeBlock,
+ m_inlineStackTop->m_stubInfos, m_dfgStubInfos,
+ currentCodeOrigin(), m_graph.identifiers()[identifierNumber]);
- bool hasExitSite = m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache);
-
- if (!hasExitSite && putByIdStatus.isSimpleReplace()) {
- addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(putByIdStatus.oldStructure())), base);
- addToGraph(PutByOffset, OpInfo(m_graph.m_storageAccessData.size()), base, addToGraph(GetPropertyStorage, base), value);
-
- StorageAccessData storageAccessData;
- storageAccessData.offset = putByIdStatus.offset();
- storageAccessData.identifierNumber = identifierNumber;
- m_graph.m_storageAccessData.append(storageAccessData);
- } else if (!hasExitSite
- && putByIdStatus.isSimpleTransition()
- && putByIdStatus.oldStructure()->propertyStorageCapacity() == putByIdStatus.newStructure()->propertyStorageCapacity()
- && structureChainIsStillValid(
- direct,
- putByIdStatus.oldStructure(),
- putByIdStatus.structureChain())) {
-
- addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(putByIdStatus.oldStructure())), base);
- if (!direct) {
- if (!putByIdStatus.oldStructure()->storedPrototype().isNull())
- addToGraph(
- CheckStructure,
- OpInfo(m_graph.addStructureSet(putByIdStatus.oldStructure()->storedPrototype().asCell()->structure())),
- cellConstant(putByIdStatus.oldStructure()->storedPrototype().asCell()));
-
- for (WriteBarrier<Structure>* it = putByIdStatus.structureChain()->head(); *it; ++it) {
- JSValue prototype = (*it)->storedPrototype();
- if (prototype.isNull())
- continue;
- ASSERT(prototype.isCell());
- addToGraph(
- CheckStructure,
- OpInfo(m_graph.addStructureSet(prototype.asCell()->structure())),
- cellConstant(prototype.asCell()));
- }
- }
- addToGraph(
- PutStructure,
- OpInfo(
- m_graph.addStructureTransitionData(
- StructureTransitionData(
- putByIdStatus.oldStructure(),
- putByIdStatus.newStructure()))),
- base);
-
- addToGraph(
- PutByOffset,
- OpInfo(m_graph.m_storageAccessData.size()),
- base,
- addToGraph(GetPropertyStorage, base),
- value);
-
- StorageAccessData storageAccessData;
- storageAccessData.offset = putByIdStatus.offset();
- storageAccessData.identifierNumber = identifierNumber;
- m_graph.m_storageAccessData.append(storageAccessData);
- } else {
- if (direct)
- addToGraph(PutByIdDirect, OpInfo(identifierNumber), base, value);
- else
- addToGraph(PutById, OpInfo(identifierNumber), base, value);
- }
-
+ handlePutById(base, identifierNumber, value, putByIdStatus, direct);
NEXT_OPCODE(op_put_by_id);
}
- case op_get_global_var: {
- PredictedType prediction = getPrediction();
-
- NodeIndex getGlobalVar = addToGraph(GetGlobalVar, OpInfo(currentInstruction[2].u.operand), OpInfo(prediction));
- set(currentInstruction[1].u.operand, getGlobalVar);
- NEXT_OPCODE(op_get_global_var);
+ case op_init_global_const_nop: {
+ NEXT_OPCODE(op_init_global_const_nop);
}
- case op_put_global_var: {
- NodeIndex value = get(currentInstruction[2].u.operand);
- addToGraph(PutGlobalVar, OpInfo(currentInstruction[1].u.operand), value);
- NEXT_OPCODE(op_put_global_var);
+ case op_init_global_const: {
+ Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
+ addToGraph(
+ PutGlobalVar,
+ OpInfo(m_inlineStackTop->m_codeBlock->globalObject()->assertRegisterIsInThisObject(currentInstruction[1].u.registerPointer)),
+ value);
+ NEXT_OPCODE(op_init_global_const);
}
// === Block terminators. ===
case op_jmp: {
- unsigned relativeOffset = currentInstruction[1].u.operand;
+ int relativeOffset = currentInstruction[1].u.operand;
+ if (relativeOffset <= 0)
+ flushForTerminal();
addToGraph(Jump, OpInfo(m_currentIndex + relativeOffset));
LAST_OPCODE(op_jmp);
}
- case op_loop: {
- unsigned relativeOffset = currentInstruction[1].u.operand;
- addToGraph(Jump, OpInfo(m_currentIndex + relativeOffset));
- LAST_OPCODE(op_loop);
- }
-
case op_jtrue: {
unsigned relativeOffset = currentInstruction[2].u.operand;
- NodeIndex condition = get(currentInstruction[1].u.operand);
- addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_jtrue)), condition);
+ Node* condition = get(VirtualRegister(currentInstruction[1].u.operand));
+ addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + OPCODE_LENGTH(op_jtrue))), condition);
LAST_OPCODE(op_jtrue);
}
case op_jfalse: {
unsigned relativeOffset = currentInstruction[2].u.operand;
- NodeIndex condition = get(currentInstruction[1].u.operand);
- addToGraph(Branch, OpInfo(m_currentIndex + OPCODE_LENGTH(op_jfalse)), OpInfo(m_currentIndex + relativeOffset), condition);
+ Node* condition = get(VirtualRegister(currentInstruction[1].u.operand));
+ addToGraph(Branch, OpInfo(branchData(m_currentIndex + OPCODE_LENGTH(op_jfalse), m_currentIndex + relativeOffset)), condition);
LAST_OPCODE(op_jfalse);
}
- case op_loop_if_true: {
- unsigned relativeOffset = currentInstruction[2].u.operand;
- NodeIndex condition = get(currentInstruction[1].u.operand);
- addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_loop_if_true)), condition);
- LAST_OPCODE(op_loop_if_true);
- }
-
- case op_loop_if_false: {
- unsigned relativeOffset = currentInstruction[2].u.operand;
- NodeIndex condition = get(currentInstruction[1].u.operand);
- addToGraph(Branch, OpInfo(m_currentIndex + OPCODE_LENGTH(op_loop_if_false)), OpInfo(m_currentIndex + relativeOffset), condition);
- LAST_OPCODE(op_loop_if_false);
- }
-
case op_jeq_null: {
unsigned relativeOffset = currentInstruction[2].u.operand;
- NodeIndex value = get(currentInstruction[1].u.operand);
- NodeIndex condition = addToGraph(CompareEq, value, constantNull());
- addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_jeq_null)), condition);
+ Node* value = get(VirtualRegister(currentInstruction[1].u.operand));
+ Node* condition = addToGraph(CompareEqConstant, value, constantNull());
+ addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + OPCODE_LENGTH(op_jeq_null))), condition);
LAST_OPCODE(op_jeq_null);
}
case op_jneq_null: {
unsigned relativeOffset = currentInstruction[2].u.operand;
- NodeIndex value = get(currentInstruction[1].u.operand);
- NodeIndex condition = addToGraph(CompareEq, value, constantNull());
- addToGraph(Branch, OpInfo(m_currentIndex + OPCODE_LENGTH(op_jneq_null)), OpInfo(m_currentIndex + relativeOffset), condition);
+ Node* value = get(VirtualRegister(currentInstruction[1].u.operand));
+ Node* condition = addToGraph(CompareEqConstant, value, constantNull());
+ addToGraph(Branch, OpInfo(branchData(m_currentIndex + OPCODE_LENGTH(op_jneq_null), m_currentIndex + relativeOffset)), condition);
LAST_OPCODE(op_jneq_null);
}
case op_jless: {
unsigned relativeOffset = currentInstruction[3].u.operand;
- NodeIndex op1 = get(currentInstruction[1].u.operand);
- NodeIndex op2 = get(currentInstruction[2].u.operand);
- NodeIndex condition = addToGraph(CompareLess, op1, op2);
- addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_jless)), condition);
+ Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand));
+ Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand));
+ Node* condition = addToGraph(CompareLess, op1, op2);
+ addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + OPCODE_LENGTH(op_jless))), condition);
LAST_OPCODE(op_jless);
}
case op_jlesseq: {
unsigned relativeOffset = currentInstruction[3].u.operand;
- NodeIndex op1 = get(currentInstruction[1].u.operand);
- NodeIndex op2 = get(currentInstruction[2].u.operand);
- NodeIndex condition = addToGraph(CompareLessEq, op1, op2);
- addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_jlesseq)), condition);
+ Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand));
+ Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand));
+ Node* condition = addToGraph(CompareLessEq, op1, op2);
+ addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + OPCODE_LENGTH(op_jlesseq))), condition);
LAST_OPCODE(op_jlesseq);
}
case op_jgreater: {
unsigned relativeOffset = currentInstruction[3].u.operand;
- NodeIndex op1 = get(currentInstruction[1].u.operand);
- NodeIndex op2 = get(currentInstruction[2].u.operand);
- NodeIndex condition = addToGraph(CompareGreater, op1, op2);
- addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_jgreater)), condition);
+ Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand));
+ Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand));
+ Node* condition = addToGraph(CompareGreater, op1, op2);
+ addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + OPCODE_LENGTH(op_jgreater))), condition);
LAST_OPCODE(op_jgreater);
}
case op_jgreatereq: {
unsigned relativeOffset = currentInstruction[3].u.operand;
- NodeIndex op1 = get(currentInstruction[1].u.operand);
- NodeIndex op2 = get(currentInstruction[2].u.operand);
- NodeIndex condition = addToGraph(CompareGreaterEq, op1, op2);
- addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_jgreatereq)), condition);
+ Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand));
+ Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand));
+ Node* condition = addToGraph(CompareGreaterEq, op1, op2);
+ addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + OPCODE_LENGTH(op_jgreatereq))), condition);
LAST_OPCODE(op_jgreatereq);
}
case op_jnless: {
unsigned relativeOffset = currentInstruction[3].u.operand;
- NodeIndex op1 = get(currentInstruction[1].u.operand);
- NodeIndex op2 = get(currentInstruction[2].u.operand);
- NodeIndex condition = addToGraph(CompareLess, op1, op2);
- addToGraph(Branch, OpInfo(m_currentIndex + OPCODE_LENGTH(op_jnless)), OpInfo(m_currentIndex + relativeOffset), condition);
+ Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand));
+ Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand));
+ Node* condition = addToGraph(CompareLess, op1, op2);
+ addToGraph(Branch, OpInfo(branchData(m_currentIndex + OPCODE_LENGTH(op_jnless), m_currentIndex + relativeOffset)), condition);
LAST_OPCODE(op_jnless);
}
case op_jnlesseq: {
unsigned relativeOffset = currentInstruction[3].u.operand;
- NodeIndex op1 = get(currentInstruction[1].u.operand);
- NodeIndex op2 = get(currentInstruction[2].u.operand);
- NodeIndex condition = addToGraph(CompareLessEq, op1, op2);
- addToGraph(Branch, OpInfo(m_currentIndex + OPCODE_LENGTH(op_jnlesseq)), OpInfo(m_currentIndex + relativeOffset), condition);
+ Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand));
+ Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand));
+ Node* condition = addToGraph(CompareLessEq, op1, op2);
+ addToGraph(Branch, OpInfo(branchData(m_currentIndex + OPCODE_LENGTH(op_jnlesseq), m_currentIndex + relativeOffset)), condition);
LAST_OPCODE(op_jnlesseq);
}
case op_jngreater: {
unsigned relativeOffset = currentInstruction[3].u.operand;
- NodeIndex op1 = get(currentInstruction[1].u.operand);
- NodeIndex op2 = get(currentInstruction[2].u.operand);
- NodeIndex condition = addToGraph(CompareGreater, op1, op2);
- addToGraph(Branch, OpInfo(m_currentIndex + OPCODE_LENGTH(op_jngreater)), OpInfo(m_currentIndex + relativeOffset), condition);
+ Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand));
+ Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand));
+ Node* condition = addToGraph(CompareGreater, op1, op2);
+ addToGraph(Branch, OpInfo(branchData(m_currentIndex + OPCODE_LENGTH(op_jngreater), m_currentIndex + relativeOffset)), condition);
LAST_OPCODE(op_jngreater);
}
case op_jngreatereq: {
unsigned relativeOffset = currentInstruction[3].u.operand;
- NodeIndex op1 = get(currentInstruction[1].u.operand);
- NodeIndex op2 = get(currentInstruction[2].u.operand);
- NodeIndex condition = addToGraph(CompareGreaterEq, op1, op2);
- addToGraph(Branch, OpInfo(m_currentIndex + OPCODE_LENGTH(op_jngreatereq)), OpInfo(m_currentIndex + relativeOffset), condition);
+ Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand));
+ Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand));
+ Node* condition = addToGraph(CompareGreaterEq, op1, op2);
+ addToGraph(Branch, OpInfo(branchData(m_currentIndex + OPCODE_LENGTH(op_jngreatereq), m_currentIndex + relativeOffset)), condition);
LAST_OPCODE(op_jngreatereq);
}
-
- case op_loop_if_less: {
- unsigned relativeOffset = currentInstruction[3].u.operand;
- NodeIndex op1 = get(currentInstruction[1].u.operand);
- NodeIndex op2 = get(currentInstruction[2].u.operand);
- NodeIndex condition = addToGraph(CompareLess, op1, op2);
- addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_loop_if_less)), condition);
- LAST_OPCODE(op_loop_if_less);
- }
-
- case op_loop_if_lesseq: {
- unsigned relativeOffset = currentInstruction[3].u.operand;
- NodeIndex op1 = get(currentInstruction[1].u.operand);
- NodeIndex op2 = get(currentInstruction[2].u.operand);
- NodeIndex condition = addToGraph(CompareLessEq, op1, op2);
- addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_loop_if_lesseq)), condition);
- LAST_OPCODE(op_loop_if_lesseq);
- }
-
- case op_loop_if_greater: {
- unsigned relativeOffset = currentInstruction[3].u.operand;
- NodeIndex op1 = get(currentInstruction[1].u.operand);
- NodeIndex op2 = get(currentInstruction[2].u.operand);
- NodeIndex condition = addToGraph(CompareGreater, op1, op2);
- addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_loop_if_greater)), condition);
- LAST_OPCODE(op_loop_if_greater);
+
+ case op_switch_imm: {
+ SwitchData& data = *m_graph.m_switchData.add();
+ data.kind = SwitchImm;
+ data.switchTableIndex = m_inlineStackTop->m_switchRemap[currentInstruction[1].u.operand];
+ data.fallThrough.setBytecodeIndex(m_currentIndex + currentInstruction[2].u.operand);
+ SimpleJumpTable& table = m_codeBlock->switchJumpTable(data.switchTableIndex);
+ for (unsigned i = 0; i < table.branchOffsets.size(); ++i) {
+ if (!table.branchOffsets[i])
+ continue;
+ unsigned target = m_currentIndex + table.branchOffsets[i];
+ if (target == data.fallThrough.bytecodeIndex())
+ continue;
+ data.cases.append(SwitchCase::withBytecodeIndex(jsNumber(static_cast<int32_t>(table.min + i)), target));
+ }
+ flushIfTerminal(data);
+ addToGraph(Switch, OpInfo(&data), get(VirtualRegister(currentInstruction[3].u.operand)));
+ LAST_OPCODE(op_switch_imm);
}
-
- case op_loop_if_greatereq: {
- unsigned relativeOffset = currentInstruction[3].u.operand;
- NodeIndex op1 = get(currentInstruction[1].u.operand);
- NodeIndex op2 = get(currentInstruction[2].u.operand);
- NodeIndex condition = addToGraph(CompareGreaterEq, op1, op2);
- addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_loop_if_greatereq)), condition);
- LAST_OPCODE(op_loop_if_greatereq);
+
+ case op_switch_char: {
+ SwitchData& data = *m_graph.m_switchData.add();
+ data.kind = SwitchChar;
+ data.switchTableIndex = m_inlineStackTop->m_switchRemap[currentInstruction[1].u.operand];
+ data.fallThrough.setBytecodeIndex(m_currentIndex + currentInstruction[2].u.operand);
+ SimpleJumpTable& table = m_codeBlock->switchJumpTable(data.switchTableIndex);
+ for (unsigned i = 0; i < table.branchOffsets.size(); ++i) {
+ if (!table.branchOffsets[i])
+ continue;
+ unsigned target = m_currentIndex + table.branchOffsets[i];
+ if (target == data.fallThrough.bytecodeIndex())
+ continue;
+ data.cases.append(
+ SwitchCase::withBytecodeIndex(LazyJSValue::singleCharacterString(table.min + i), target));
+ }
+ flushIfTerminal(data);
+ addToGraph(Switch, OpInfo(&data), get(VirtualRegister(currentInstruction[3].u.operand)));
+ LAST_OPCODE(op_switch_char);
+ }
+
+ case op_switch_string: {
+ SwitchData& data = *m_graph.m_switchData.add();
+ data.kind = SwitchString;
+ data.switchTableIndex = currentInstruction[1].u.operand;
+ data.fallThrough.setBytecodeIndex(m_currentIndex + currentInstruction[2].u.operand);
+ StringJumpTable& table = m_codeBlock->stringSwitchJumpTable(data.switchTableIndex);
+ StringJumpTable::StringOffsetTable::iterator iter;
+ StringJumpTable::StringOffsetTable::iterator end = table.offsetTable.end();
+ for (iter = table.offsetTable.begin(); iter != end; ++iter) {
+ unsigned target = m_currentIndex + iter->value.branchOffset;
+ if (target == data.fallThrough.bytecodeIndex())
+ continue;
+ data.cases.append(
+ SwitchCase::withBytecodeIndex(LazyJSValue::knownStringImpl(iter->key.get()), target));
+ }
+ flushIfTerminal(data);
+ addToGraph(Switch, OpInfo(&data), get(VirtualRegister(currentInstruction[3].u.operand)));
+ LAST_OPCODE(op_switch_string);
}
case op_ret:
- if (m_inlineStackTop->m_inlineCallFrame) {
- if (m_inlineStackTop->m_returnValue != InvalidVirtualRegister)
- setDirect(m_inlineStackTop->m_returnValue, get(currentInstruction[1].u.operand));
+ flushForReturn();
+ if (inlineCallFrame()) {
+ ASSERT(m_inlineStackTop->m_returnValue.isValid());
+ setDirect(m_inlineStackTop->m_returnValue, get(VirtualRegister(currentInstruction[1].u.operand)), ImmediateSetWithFlush);
m_inlineStackTop->m_didReturn = true;
if (m_inlineStackTop->m_unlinkedBlocks.isEmpty()) {
// If we're returning from the first block, then we're done parsing.
- ASSERT(m_inlineStackTop->m_callsiteBlockHead == m_graph.m_blocks.size() - 1);
+ ASSERT(m_inlineStackTop->m_callsiteBlockHead == m_graph.lastBlock());
shouldContinueParsing = false;
LAST_OPCODE(op_ret);
} else {
// If inlining created blocks, and we're doing a return, then we need some
// special linking.
- ASSERT(m_inlineStackTop->m_unlinkedBlocks.last().m_blockIndex == m_graph.m_blocks.size() - 1);
+ ASSERT(m_inlineStackTop->m_unlinkedBlocks.last().m_block == m_graph.lastBlock());
m_inlineStackTop->m_unlinkedBlocks.last().m_needsNormalLinking = false;
}
if (m_currentIndex + OPCODE_LENGTH(op_ret) != m_inlineStackTop->m_codeBlock->instructions().size() || m_inlineStackTop->m_didEarlyReturn) {
ASSERT(m_currentIndex + OPCODE_LENGTH(op_ret) <= m_inlineStackTop->m_codeBlock->instructions().size());
- addToGraph(Jump, OpInfo(NoBlock));
+ addToGraph(Jump, OpInfo(0));
m_inlineStackTop->m_unlinkedBlocks.last().m_needsEarlyReturnLinking = true;
m_inlineStackTop->m_didEarlyReturn = true;
}
LAST_OPCODE(op_ret);
}
- addToGraph(Return, get(currentInstruction[1].u.operand));
+ addToGraph(Return, get(VirtualRegister(currentInstruction[1].u.operand)));
LAST_OPCODE(op_ret);
case op_end:
- ASSERT(!m_inlineStackTop->m_inlineCallFrame);
- addToGraph(Return, get(currentInstruction[1].u.operand));
+ flushForReturn();
+ ASSERT(!inlineCallFrame());
+ addToGraph(Return, get(VirtualRegister(currentInstruction[1].u.operand)));
LAST_OPCODE(op_end);
case op_throw:
- addToGraph(Throw, get(currentInstruction[1].u.operand));
+ addToGraph(Throw, get(VirtualRegister(currentInstruction[1].u.operand)));
+ flushForTerminal();
+ addToGraph(Unreachable);
LAST_OPCODE(op_throw);
- case op_throw_reference_error:
+ case op_throw_static_error:
addToGraph(ThrowReferenceError);
- LAST_OPCODE(op_throw_reference_error);
+ flushForTerminal();
+ addToGraph(Unreachable);
+ LAST_OPCODE(op_throw_static_error);
case op_call:
- handleCall(interpreter, currentInstruction, Call, CodeForCall);
+ handleCall(currentInstruction, Call, CodeForCall);
NEXT_OPCODE(op_call);
case op_construct:
- handleCall(interpreter, currentInstruction, Construct, CodeForConstruct);
+ handleCall(currentInstruction, Construct, CodeForConstruct);
NEXT_OPCODE(op_construct);
- case op_call_put_result:
- NEXT_OPCODE(op_call_put_result);
-
- case op_resolve: {
- PredictedType prediction = getPrediction();
+ case op_call_varargs: {
+ int result = currentInstruction[1].u.operand;
+ int callee = currentInstruction[2].u.operand;
+ int thisReg = currentInstruction[3].u.operand;
+ int arguments = currentInstruction[4].u.operand;
+ int firstFreeReg = currentInstruction[5].u.operand;
- unsigned identifier = m_inlineStackTop->m_identifierRemap[currentInstruction[2].u.operand];
+ ASSERT(inlineCallFrame());
+ ASSERT_UNUSED(arguments, arguments == m_inlineStackTop->m_codeBlock->argumentsRegister().offset());
+ ASSERT(!m_inlineStackTop->m_codeBlock->symbolTable()->slowArguments());
- NodeIndex resolve = addToGraph(Resolve, OpInfo(identifier), OpInfo(prediction));
- set(currentInstruction[1].u.operand, resolve);
+ addToGraph(CheckArgumentsNotCreated);
- NEXT_OPCODE(op_resolve);
+ unsigned argCount = inlineCallFrame()->arguments.size();
+
+ // Let's compute the register offset. We start with the last used register, and
+ // then adjust for the things we want in the call frame.
+ int registerOffset = firstFreeReg + 1;
+ registerOffset -= argCount; // We will be passing some arguments.
+ registerOffset -= JSStack::CallFrameHeaderSize; // We will pretend to have a call frame header.
+
+ // Get the alignment right.
+ registerOffset = -WTF::roundUpToMultipleOf(
+ stackAlignmentRegisters(),
+ -registerOffset);
+
+ ensureLocals(
+ m_inlineStackTop->remapOperand(
+ VirtualRegister(registerOffset)).toLocal());
+
+ // The bytecode wouldn't have set up the arguments. But we'll do it and make it
+ // look like the bytecode had done it.
+ int nextRegister = registerOffset + JSStack::CallFrameHeaderSize;
+ set(VirtualRegister(nextRegister++), get(VirtualRegister(thisReg)), ImmediateNakedSet);
+ for (unsigned argument = 1; argument < argCount; ++argument)
+ set(VirtualRegister(nextRegister++), get(virtualRegisterForArgument(argument)), ImmediateNakedSet);
+
+ handleCall(
+ result, Call, CodeForCall, OPCODE_LENGTH(op_call_varargs),
+ callee, argCount, registerOffset);
+ NEXT_OPCODE(op_call_varargs);
}
-
- case op_resolve_base: {
- PredictedType prediction = getPrediction();
- unsigned identifier = m_inlineStackTop->m_identifierRemap[currentInstruction[2].u.operand];
+ case op_jneq_ptr:
+ // Statically speculate for now. It makes sense to let speculate-only jneq_ptr
+ // support simmer for a while before making it more general, since it's
+ // already gnarly enough as it is.
+ ASSERT(pointerIsFunction(currentInstruction[2].u.specialPointer));
+ addToGraph(
+ CheckFunction,
+ OpInfo(actualPointerFor(m_inlineStackTop->m_codeBlock, currentInstruction[2].u.specialPointer)),
+ get(VirtualRegister(currentInstruction[1].u.operand)));
+ addToGraph(Jump, OpInfo(m_currentIndex + OPCODE_LENGTH(op_jneq_ptr)));
+ LAST_OPCODE(op_jneq_ptr);
+
+ case op_resolve_scope: {
+ int dst = currentInstruction[1].u.operand;
+ ResolveType resolveType = static_cast<ResolveType>(currentInstruction[3].u.operand);
+ unsigned depth = currentInstruction[4].u.operand;
+
+ // get_from_scope and put_to_scope depend on this watchpoint forcing OSR exit, so they don't add their own watchpoints.
+ if (needsVarInjectionChecks(resolveType))
+ addToGraph(VarInjectionWatchpoint);
+
+ switch (resolveType) {
+ case GlobalProperty:
+ case GlobalVar:
+ case GlobalPropertyWithVarInjectionChecks:
+ case GlobalVarWithVarInjectionChecks:
+ set(VirtualRegister(dst), cellConstant(m_inlineStackTop->m_codeBlock->globalObject()));
+ break;
+ case ClosureVar:
+ case ClosureVarWithVarInjectionChecks: {
+ JSActivation* activation = currentInstruction[5].u.activation.get();
+ if (activation
+ && activation->symbolTable()->m_functionEnteredOnce.isStillValid()) {
+ addToGraph(FunctionReentryWatchpoint, OpInfo(activation->symbolTable()));
+ set(VirtualRegister(dst), cellConstant(activation));
+ break;
+ }
+ set(VirtualRegister(dst),
+ getScope(m_inlineStackTop->m_codeBlock->needsActivation(), depth));
+ break;
+ }
+ case Dynamic:
+ RELEASE_ASSERT_NOT_REACHED();
+ break;
+ }
+ NEXT_OPCODE(op_resolve_scope);
+ }
+
+ case op_get_from_scope: {
+ int dst = currentInstruction[1].u.operand;
+ int scope = currentInstruction[2].u.operand;
+ unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[3].u.operand];
+ StringImpl* uid = m_graph.identifiers()[identifierNumber];
+ ResolveType resolveType = ResolveModeAndType(currentInstruction[4].u.operand).type();
+
+ Structure* structure = 0;
+ WatchpointSet* watchpoints = 0;
+ uintptr_t operand;
+ {
+ ConcurrentJITLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
+ if (resolveType == GlobalVar || resolveType == GlobalVarWithVarInjectionChecks)
+ watchpoints = currentInstruction[5].u.watchpointSet;
+ else
+ structure = currentInstruction[5].u.structure.get();
+ operand = reinterpret_cast<uintptr_t>(currentInstruction[6].u.pointer);
+ }
- NodeIndex resolve = addToGraph(currentInstruction[3].u.operand ? ResolveBaseStrictPut : ResolveBase, OpInfo(identifier), OpInfo(prediction));
- set(currentInstruction[1].u.operand, resolve);
+ UNUSED_PARAM(watchpoints); // We will use this in the future. For now we set it as a way of documenting the fact that that's what index 5 is in GlobalVar mode.
- NEXT_OPCODE(op_resolve_base);
+ SpeculatedType prediction = getPrediction();
+ JSGlobalObject* globalObject = m_inlineStackTop->m_codeBlock->globalObject();
+
+ switch (resolveType) {
+ case GlobalProperty:
+ case GlobalPropertyWithVarInjectionChecks: {
+ GetByIdStatus status = GetByIdStatus::computeFor(*m_vm, structure, uid);
+ if (status.state() != GetByIdStatus::Simple || status.numVariants() != 1) {
+ set(VirtualRegister(dst), addToGraph(GetByIdFlush, OpInfo(identifierNumber), OpInfo(prediction), get(VirtualRegister(scope))));
+ break;
+ }
+ Node* base = cellConstantWithStructureCheck(globalObject, status[0].structureSet().singletonStructure());
+ addToGraph(Phantom, get(VirtualRegister(scope)));
+ if (JSValue specificValue = status[0].specificValue())
+ set(VirtualRegister(dst), cellConstant(specificValue.asCell()));
+ else
+ set(VirtualRegister(dst), handleGetByOffset(prediction, base, identifierNumber, operand));
+ break;
+ }
+ case GlobalVar:
+ case GlobalVarWithVarInjectionChecks: {
+ addToGraph(Phantom, get(VirtualRegister(scope)));
+ SymbolTableEntry entry = globalObject->symbolTable()->get(uid);
+ VariableWatchpointSet* watchpointSet = entry.watchpointSet();
+ JSValue specificValue =
+ watchpointSet ? watchpointSet->inferredValue() : JSValue();
+ if (!specificValue) {
+ set(VirtualRegister(dst), addToGraph(GetGlobalVar, OpInfo(operand), OpInfo(prediction)));
+ break;
+ }
+
+ addToGraph(VariableWatchpoint, OpInfo(watchpointSet));
+ set(VirtualRegister(dst), inferredConstant(specificValue));
+ break;
+ }
+ case ClosureVar:
+ case ClosureVarWithVarInjectionChecks: {
+ Node* scopeNode = get(VirtualRegister(scope));
+ if (JSActivation* activation = m_graph.tryGetActivation(scopeNode)) {
+ SymbolTable* symbolTable = activation->symbolTable();
+ ConcurrentJITLocker locker(symbolTable->m_lock);
+ SymbolTable::Map::iterator iter = symbolTable->find(locker, uid);
+ ASSERT(iter != symbolTable->end(locker));
+ VariableWatchpointSet* watchpointSet = iter->value.watchpointSet();
+ if (watchpointSet) {
+ if (JSValue value = watchpointSet->inferredValue()) {
+ addToGraph(Phantom, scopeNode);
+ addToGraph(VariableWatchpoint, OpInfo(watchpointSet));
+ set(VirtualRegister(dst), inferredConstant(value));
+ break;
+ }
+ }
+ }
+ set(VirtualRegister(dst),
+ addToGraph(GetClosureVar, OpInfo(operand), OpInfo(prediction),
+ addToGraph(GetClosureRegisters, scopeNode)));
+ break;
+ }
+ case Dynamic:
+ RELEASE_ASSERT_NOT_REACHED();
+ break;
+ }
+ NEXT_OPCODE(op_get_from_scope);
}
-
- case op_resolve_global: {
- PredictedType prediction = getPrediction();
-
- NodeIndex resolve = addToGraph(ResolveGlobal, OpInfo(m_graph.m_resolveGlobalData.size()), OpInfo(prediction));
- m_graph.m_resolveGlobalData.append(ResolveGlobalData());
- ResolveGlobalData& data = m_graph.m_resolveGlobalData.last();
- data.identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[2].u.operand];
- data.resolveInfoIndex = m_globalResolveNumber++;
- set(currentInstruction[1].u.operand, resolve);
- NEXT_OPCODE(op_resolve_global);
+ case op_put_to_scope: {
+ unsigned scope = currentInstruction[1].u.operand;
+ unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[2].u.operand];
+ unsigned value = currentInstruction[3].u.operand;
+ ResolveType resolveType = ResolveModeAndType(currentInstruction[4].u.operand).type();
+ StringImpl* uid = m_graph.identifiers()[identifierNumber];
+
+ Structure* structure = 0;
+ VariableWatchpointSet* watchpoints = 0;
+ uintptr_t operand;
+ {
+ ConcurrentJITLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
+ if (resolveType == GlobalVar || resolveType == GlobalVarWithVarInjectionChecks)
+ watchpoints = currentInstruction[5].u.watchpointSet;
+ else
+ structure = currentInstruction[5].u.structure.get();
+ operand = reinterpret_cast<uintptr_t>(currentInstruction[6].u.pointer);
+ }
+
+ JSGlobalObject* globalObject = m_inlineStackTop->m_codeBlock->globalObject();
+
+ switch (resolveType) {
+ case GlobalProperty:
+ case GlobalPropertyWithVarInjectionChecks: {
+ PutByIdStatus status = PutByIdStatus::computeFor(*m_vm, globalObject, structure, uid, false);
+ if (status.numVariants() != 1 || status[0].kind() != PutByIdVariant::Replace) {
+ addToGraph(PutById, OpInfo(identifierNumber), get(VirtualRegister(scope)), get(VirtualRegister(value)));
+ break;
+ }
+ Node* base = cellConstantWithStructureCheck(globalObject, status[0].structure());
+ addToGraph(Phantom, get(VirtualRegister(scope)));
+ handlePutByOffset(base, identifierNumber, static_cast<PropertyOffset>(operand), get(VirtualRegister(value)));
+ // Keep scope alive until after put.
+ addToGraph(Phantom, get(VirtualRegister(scope)));
+ break;
+ }
+ case GlobalVar:
+ case GlobalVarWithVarInjectionChecks: {
+ SymbolTableEntry entry = globalObject->symbolTable()->get(uid);
+ ASSERT(watchpoints == entry.watchpointSet());
+ Node* valueNode = get(VirtualRegister(value));
+ addToGraph(PutGlobalVar, OpInfo(operand), valueNode);
+ if (watchpoints->state() != IsInvalidated)
+ addToGraph(NotifyWrite, OpInfo(watchpoints), valueNode);
+ // Keep scope alive until after put.
+ addToGraph(Phantom, get(VirtualRegister(scope)));
+ break;
+ }
+ case ClosureVar:
+ case ClosureVarWithVarInjectionChecks: {
+ Node* scopeNode = get(VirtualRegister(scope));
+ Node* scopeRegisters = addToGraph(GetClosureRegisters, scopeNode);
+ addToGraph(PutClosureVar, OpInfo(operand), scopeNode, scopeRegisters, get(VirtualRegister(value)));
+ break;
+ }
+ case Dynamic:
+ RELEASE_ASSERT_NOT_REACHED();
+ break;
+ }
+ NEXT_OPCODE(op_put_to_scope);
}
case op_loop_hint: {
// Baseline->DFG OSR jumps between loop hints. The DFG assumes that Baseline->DFG
// OSR can only happen at basic block boundaries. Assert that these two statements
// are compatible.
- ASSERT_UNUSED(blockBegin, m_currentIndex == blockBegin);
+ RELEASE_ASSERT(m_currentIndex == blockBegin);
// We never do OSR into an inlined code block. That could not happen, since OSR
// looks up the code block that is the replacement for the baseline JIT code
// block. Hence, machine code block = true code block = not inline code block.
if (!m_inlineStackTop->m_caller)
m_currentBlock->isOSRTarget = true;
+
+ addToGraph(LoopHint);
- // Emit a phantom node to ensure that there is a placeholder node for this bytecode
- // op.
- addToGraph(Phantom);
+ if (m_vm->watchdog && m_vm->watchdog->isEnabled())
+ addToGraph(CheckWatchdogTimer);
NEXT_OPCODE(op_loop_hint);
}
case op_init_lazy_reg: {
- set(currentInstruction[1].u.operand, getJSConstantForValue(JSValue()));
+ set(VirtualRegister(currentInstruction[1].u.operand), getJSConstantForValue(JSValue()));
+ ASSERT(operandIsLocal(currentInstruction[1].u.operand));
+ m_graph.m_lazyVars.set(VirtualRegister(currentInstruction[1].u.operand).toLocal());
NEXT_OPCODE(op_init_lazy_reg);
}
case op_create_activation: {
- set(currentInstruction[1].u.operand, addToGraph(CreateActivation, get(currentInstruction[1].u.operand)));
+ set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CreateActivation, get(VirtualRegister(currentInstruction[1].u.operand))));
NEXT_OPCODE(op_create_activation);
}
+ case op_create_arguments: {
+ m_graph.m_hasArguments = true;
+ Node* createArguments = addToGraph(CreateArguments, get(VirtualRegister(currentInstruction[1].u.operand)));
+ set(VirtualRegister(currentInstruction[1].u.operand), createArguments);
+ set(unmodifiedArgumentsRegister(VirtualRegister(currentInstruction[1].u.operand)), createArguments);
+ NEXT_OPCODE(op_create_arguments);
+ }
+
case op_tear_off_activation: {
- // This currently ignores arguments because we don't support them yet.
- addToGraph(TearOffActivation, get(currentInstruction[1].u.operand));
+ addToGraph(TearOffActivation, get(VirtualRegister(currentInstruction[1].u.operand)));
NEXT_OPCODE(op_tear_off_activation);
}
+
+ case op_tear_off_arguments: {
+ m_graph.m_hasArguments = true;
+ addToGraph(TearOffArguments, get(unmodifiedArgumentsRegister(VirtualRegister(currentInstruction[1].u.operand))), get(VirtualRegister(currentInstruction[2].u.operand)));
+ NEXT_OPCODE(op_tear_off_arguments);
+ }
+
+ case op_get_arguments_length: {
+ m_graph.m_hasArguments = true;
+ set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(GetMyArgumentsLengthSafe));
+ NEXT_OPCODE(op_get_arguments_length);
+ }
+
+ case op_get_argument_by_val: {
+ m_graph.m_hasArguments = true;
+ set(VirtualRegister(currentInstruction[1].u.operand),
+ addToGraph(
+ GetMyArgumentByValSafe, OpInfo(0), OpInfo(getPrediction()),
+ get(VirtualRegister(currentInstruction[3].u.operand))));
+ NEXT_OPCODE(op_get_argument_by_val);
+ }
case op_new_func: {
if (!currentInstruction[3].u.operand) {
- set(currentInstruction[1].u.operand,
+ set(VirtualRegister(currentInstruction[1].u.operand),
addToGraph(NewFunctionNoCheck, OpInfo(currentInstruction[2].u.operand)));
} else {
- set(currentInstruction[1].u.operand,
+ set(VirtualRegister(currentInstruction[1].u.operand),
addToGraph(
NewFunction,
OpInfo(currentInstruction[2].u.operand),
- get(currentInstruction[1].u.operand)));
+ get(VirtualRegister(currentInstruction[1].u.operand))));
}
NEXT_OPCODE(op_new_func);
}
+ case op_new_captured_func: {
+ Node* function = addToGraph(
+ NewFunctionNoCheck, OpInfo(currentInstruction[2].u.operand));
+ if (VariableWatchpointSet* set = currentInstruction[3].u.watchpointSet)
+ addToGraph(NotifyWrite, OpInfo(set), function);
+ set(VirtualRegister(currentInstruction[1].u.operand), function);
+ NEXT_OPCODE(op_new_captured_func);
+ }
+
case op_new_func_exp: {
- set(currentInstruction[1].u.operand,
+ set(VirtualRegister(currentInstruction[1].u.operand),
addToGraph(NewFunctionExpression, OpInfo(currentInstruction[2].u.operand)));
NEXT_OPCODE(op_new_func_exp);
}
- default:
- // Parse failed! This should not happen because the capabilities checker
- // should have caught it.
- ASSERT_NOT_REACHED();
- return false;
+ case op_typeof: {
+ set(VirtualRegister(currentInstruction[1].u.operand),
+ addToGraph(TypeOf, get(VirtualRegister(currentInstruction[2].u.operand))));
+ NEXT_OPCODE(op_typeof);
}
-
- ASSERT(canCompileOpcode(opcodeID));
- }
-}
-
-template<ByteCodeParser::PhiStackType stackType>
-void ByteCodeParser::processPhiStack()
-{
- Vector<PhiStackEntry, 16>& phiStack = (stackType == ArgumentPhiStack) ? m_argumentPhiStack : m_localPhiStack;
-
- while (!phiStack.isEmpty()) {
- PhiStackEntry entry = phiStack.last();
- phiStack.removeLast();
-
- PredecessorList& predecessors = entry.m_block->m_predecessors;
- unsigned varNo = entry.m_varNo;
- VariableAccessData* dataForPhi = m_graph[entry.m_phi].variableAccessData();
-
-#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
- dataLog(" Handling phi entry for var %u, phi @%u.\n", entry.m_varNo, entry.m_phi);
-#endif
-
- for (size_t i = 0; i < predecessors.size(); ++i) {
-#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
- dataLog(" Dealing with predecessor block %u.\n", predecessors[i]);
-#endif
-
- BasicBlock* predecessorBlock = m_graph.m_blocks[predecessors[i]].get();
-
- NodeIndex& var = (stackType == ArgumentPhiStack) ? predecessorBlock->variablesAtTail.argument(varNo) : predecessorBlock->variablesAtTail.local(varNo);
-
- NodeIndex valueInPredecessor = var;
- if (valueInPredecessor == NoNode) {
-#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
- dataLog(" Did not find node, adding phi.\n");
-#endif
-
- valueInPredecessor = insertPhiNode(OpInfo(newVariableAccessData(stackType == ArgumentPhiStack ? argumentToOperand(varNo) : static_cast<int>(varNo))), predecessorBlock);
- var = valueInPredecessor;
- if (stackType == ArgumentPhiStack)
- predecessorBlock->variablesAtHead.setArgumentFirstTime(varNo, valueInPredecessor);
- else
- predecessorBlock->variablesAtHead.setLocalFirstTime(varNo, valueInPredecessor);
- phiStack.append(PhiStackEntry(predecessorBlock, valueInPredecessor, varNo));
- } else if (m_graph[valueInPredecessor].op() == GetLocal) {
-#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
- dataLog(" Found GetLocal @%u.\n", valueInPredecessor);
-#endif
-
- // We want to ensure that the VariableAccessDatas are identical between the
- // GetLocal and its block-local Phi. Strictly speaking we only need the two
- // to be unified. But for efficiency, we want the code that creates GetLocals
- // and Phis to try to reuse VariableAccessDatas as much as possible.
- ASSERT(m_graph[valueInPredecessor].variableAccessData() == m_graph[m_graph[valueInPredecessor].child1().index()].variableAccessData());
-
- valueInPredecessor = m_graph[valueInPredecessor].child1().index();
- } else {
-#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
- dataLog(" Found @%u.\n", valueInPredecessor);
-#endif
- }
- ASSERT(m_graph[valueInPredecessor].op() == SetLocal
- || m_graph[valueInPredecessor].op() == Phi
- || m_graph[valueInPredecessor].op() == Flush
- || (m_graph[valueInPredecessor].op() == SetArgument
- && stackType == ArgumentPhiStack));
-
- VariableAccessData* dataForPredecessor = m_graph[valueInPredecessor].variableAccessData();
-
- dataForPredecessor->unify(dataForPhi);
-
- Node* phiNode = &m_graph[entry.m_phi];
-#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
- dataLog(" Ref count of @%u = %u.\n", entry.m_phi, phiNode->refCount());
-#endif
- if (phiNode->refCount()) {
-#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
- dataLog(" Reffing @%u.\n", valueInPredecessor);
-#endif
- m_graph.ref(valueInPredecessor);
- }
- if (!phiNode->child1()) {
-#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
- dataLog(" Setting @%u->child1 = @%u.\n", entry.m_phi, valueInPredecessor);
-#endif
- phiNode->children.setChild1(Edge(valueInPredecessor));
-#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
- dataLog(" Children of @%u: ", entry.m_phi);
- phiNode->dumpChildren(WTF::dataFile());
- dataLog(".\n");
-#endif
- continue;
- }
- if (!phiNode->child2()) {
-#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
- dataLog(" Setting @%u->child2 = @%u.\n", entry.m_phi, valueInPredecessor);
-#endif
- phiNode->children.setChild2(Edge(valueInPredecessor));
-#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
- dataLog(" Children of @%u: ", entry.m_phi);
- phiNode->dumpChildren(WTF::dataFile());
- dataLog(".\n");
-#endif
- continue;
- }
- if (!phiNode->child3()) {
-#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
- dataLog(" Setting @%u->child3 = @%u.\n", entry.m_phi, valueInPredecessor);
-#endif
- phiNode->children.setChild3(Edge(valueInPredecessor));
-#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
- dataLog(" Children of @%u: ", entry.m_phi);
- phiNode->dumpChildren(WTF::dataFile());
- dataLog(".\n");
-#endif
- continue;
- }
-
- NodeIndex newPhi = insertPhiNode(OpInfo(dataForPhi), entry.m_block);
+ case op_to_number: {
+ Node* node = get(VirtualRegister(currentInstruction[2].u.operand));
+ addToGraph(Phantom, Edge(node, NumberUse));
+ set(VirtualRegister(currentInstruction[1].u.operand), node);
+ NEXT_OPCODE(op_to_number);
+ }
-#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
- dataLog(" Splitting @%u, created @%u.\n", entry.m_phi, newPhi);
-#endif
-
- phiNode = &m_graph[entry.m_phi]; // reload after vector resize
- Node& newPhiNode = m_graph[newPhi];
- if (phiNode->refCount())
- m_graph.ref(newPhi);
-
- newPhiNode.children = phiNode->children;
-
-#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
- dataLog(" Children of @%u: ", newPhi);
- newPhiNode.dumpChildren(WTF::dataFile());
- dataLog(".\n");
-#endif
-
- phiNode->children.initialize(newPhi, valueInPredecessor, NoNode);
-
-#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
- dataLog(" Children of @%u: ", entry.m_phi);
- phiNode->dumpChildren(WTF::dataFile());
- dataLog(".\n");
-#endif
+ case op_in: {
+ set(VirtualRegister(currentInstruction[1].u.operand),
+ addToGraph(In, get(VirtualRegister(currentInstruction[2].u.operand)), get(VirtualRegister(currentInstruction[3].u.operand))));
+ NEXT_OPCODE(op_in);
}
- }
-}
-void ByteCodeParser::fixVariableAccessPredictions()
-{
- for (unsigned i = 0; i < m_graph.m_variableAccessData.size(); ++i) {
- VariableAccessData* data = &m_graph.m_variableAccessData[i];
- data->find()->predict(data->nonUnifiedPrediction());
+ default:
+ // Parse failed! This should not happen because the capabilities checker
+ // should have caught it.
+ RELEASE_ASSERT_NOT_REACHED();
+ return false;
+ }
}
}
-void ByteCodeParser::linkBlock(BasicBlock* block, Vector<BlockIndex>& possibleTargets)
+void ByteCodeParser::linkBlock(BasicBlock* block, Vector<BasicBlock*>& possibleTargets)
{
ASSERT(!block->isLinked);
ASSERT(!block->isEmpty());
- Node& node = m_graph[block->last()];
- ASSERT(node.isTerminal());
+ Node* node = block->last();
+ ASSERT(node->isTerminal());
- switch (node.op()) {
+ switch (node->op()) {
case Jump:
- node.setTakenBlockIndex(m_graph.blockIndexForBytecodeOffset(possibleTargets, node.takenBytecodeOffsetDuringParsing()));
-#if DFG_ENABLE(DEBUG_VERBOSE)
- dataLog("Linked basic block %p to %p, #%u.\n", block, m_graph.m_blocks[node.takenBlockIndex()].get(), node.takenBlockIndex());
-#endif
+ node->targetBlock() = blockForBytecodeOffset(possibleTargets, node->targetBytecodeOffsetDuringParsing());
break;
- case Branch:
- node.setTakenBlockIndex(m_graph.blockIndexForBytecodeOffset(possibleTargets, node.takenBytecodeOffsetDuringParsing()));
- node.setNotTakenBlockIndex(m_graph.blockIndexForBytecodeOffset(possibleTargets, node.notTakenBytecodeOffsetDuringParsing()));
-#if DFG_ENABLE(DEBUG_VERBOSE)
- dataLog("Linked basic block %p to %p, #%u and %p, #%u.\n", block, m_graph.m_blocks[node.takenBlockIndex()].get(), node.takenBlockIndex(), m_graph.m_blocks[node.notTakenBlockIndex()].get(), node.notTakenBlockIndex());
-#endif
+ case Branch: {
+ BranchData* data = node->branchData();
+ data->taken.block = blockForBytecodeOffset(possibleTargets, data->takenBytecodeIndex());
+ data->notTaken.block = blockForBytecodeOffset(possibleTargets, data->notTakenBytecodeIndex());
+ break;
+ }
+
+ case Switch: {
+ SwitchData* data = node->switchData();
+ for (unsigned i = node->switchData()->cases.size(); i--;)
+ data->cases[i].target.block = blockForBytecodeOffset(possibleTargets, data->cases[i].target.bytecodeIndex());
+ data->fallThrough.block = blockForBytecodeOffset(possibleTargets, data->fallThrough.bytecodeIndex());
break;
+ }
default:
-#if DFG_ENABLE(DEBUG_VERBOSE)
- dataLog("Marking basic block %p as linked.\n", block);
-#endif
break;
}
#endif
}
-void ByteCodeParser::linkBlocks(Vector<UnlinkedBlock>& unlinkedBlocks, Vector<BlockIndex>& possibleTargets)
+void ByteCodeParser::linkBlocks(Vector<UnlinkedBlock>& unlinkedBlocks, Vector<BasicBlock*>& possibleTargets)
{
for (size_t i = 0; i < unlinkedBlocks.size(); ++i) {
if (unlinkedBlocks[i].m_needsNormalLinking) {
- linkBlock(m_graph.m_blocks[unlinkedBlocks[i].m_blockIndex].get(), possibleTargets);
+ linkBlock(unlinkedBlocks[i].m_block, possibleTargets);
unlinkedBlocks[i].m_needsNormalLinking = false;
}
}
}
-void ByteCodeParser::handleSuccessor(Vector<BlockIndex, 16>& worklist, BlockIndex blockIndex, BlockIndex successorIndex)
-{
- BasicBlock* successor = m_graph.m_blocks[successorIndex].get();
- if (!successor->isReachable) {
- successor->isReachable = true;
- worklist.append(successorIndex);
- }
-
- successor->m_predecessors.append(blockIndex);
-}
-
-void ByteCodeParser::determineReachability()
-{
- Vector<BlockIndex, 16> worklist;
- worklist.append(0);
- m_graph.m_blocks[0]->isReachable = true;
- while (!worklist.isEmpty()) {
- BlockIndex index = worklist.last();
- worklist.removeLast();
-
- BasicBlock* block = m_graph.m_blocks[index].get();
- ASSERT(block->isLinked);
-
- Node& node = m_graph[block->last()];
- ASSERT(node.isTerminal());
-
- if (node.isJump())
- handleSuccessor(worklist, index, node.takenBlockIndex());
- else if (node.isBranch()) {
- handleSuccessor(worklist, index, node.takenBlockIndex());
- handleSuccessor(worklist, index, node.notTakenBlockIndex());
- }
- }
-}
-
void ByteCodeParser::buildOperandMapsIfNecessary()
{
if (m_haveBuiltOperandMaps)
m_haveBuiltOperandMaps = true;
}
-ByteCodeParser::InlineStackEntry::InlineStackEntry(ByteCodeParser* byteCodeParser, CodeBlock* codeBlock, CodeBlock* profiledBlock, BlockIndex callsiteBlockHead, VirtualRegister calleeVR, JSFunction* callee, VirtualRegister returnValueVR, VirtualRegister inlineCallFrameStart, CodeSpecializationKind kind)
+ByteCodeParser::InlineStackEntry::InlineStackEntry(
+ ByteCodeParser* byteCodeParser,
+ CodeBlock* codeBlock,
+ CodeBlock* profiledBlock,
+ BasicBlock* callsiteBlockHead,
+ JSFunction* callee, // Null if this is a closure call.
+ VirtualRegister returnValueVR,
+ VirtualRegister inlineCallFrameStart,
+ int argumentCountIncludingThis,
+ CodeSpecializationKind kind)
: m_byteCodeParser(byteCodeParser)
, m_codeBlock(codeBlock)
, m_profiledBlock(profiledBlock)
- , m_calleeVR(calleeVR)
- , m_exitProfile(profiledBlock->exitProfile())
, m_callsiteBlockHead(callsiteBlockHead)
, m_returnValue(returnValueVR)
- , m_lazyOperands(profiledBlock->lazyOperandValueProfiles())
, m_didReturn(false)
, m_didEarlyReturn(false)
, m_caller(byteCodeParser->m_inlineStackTop)
{
- m_argumentPositions.resize(codeBlock->numParameters());
- for (unsigned i = codeBlock->numParameters(); i--;) {
+ {
+ ConcurrentJITLocker locker(m_profiledBlock->m_lock);
+ m_lazyOperands.initialize(locker, m_profiledBlock->lazyOperandValueProfiles());
+ m_exitProfile.initialize(locker, profiledBlock->exitProfile());
+
+ // We do this while holding the lock because we want to encourage StructureStubInfo's
+ // to be potentially added to operations and because the profiled block could be in the
+ // middle of LLInt->JIT tier-up in which case we would be adding the info's right now.
+ if (m_profiledBlock->hasBaselineJITProfiling()) {
+ m_profiledBlock->getStubInfoMap(locker, m_stubInfos);
+ m_profiledBlock->getCallLinkInfoMap(locker, m_callLinkInfos);
+ }
+ }
+
+ m_argumentPositions.resize(argumentCountIncludingThis);
+ for (int i = 0; i < argumentCountIncludingThis; ++i) {
byteCodeParser->m_graph.m_argumentPositions.append(ArgumentPosition());
ArgumentPosition* argumentPosition = &byteCodeParser->m_graph.m_argumentPositions.last();
m_argumentPositions[i] = argumentPosition;
}
+
+ // Track the code-block-global exit sites.
+ if (m_exitProfile.hasExitSite(ArgumentsEscaped)) {
+ byteCodeParser->m_graph.m_executablesWhoseArgumentsEscaped.add(
+ codeBlock->ownerExecutable());
+ }
if (m_caller) {
// Inline case.
ASSERT(codeBlock != byteCodeParser->m_codeBlock);
- ASSERT(callee);
- ASSERT(calleeVR != InvalidVirtualRegister);
- ASSERT(inlineCallFrameStart != InvalidVirtualRegister);
- ASSERT(callsiteBlockHead != NoBlock);
-
- InlineCallFrame inlineCallFrame;
- inlineCallFrame.executable.set(*byteCodeParser->m_globalData, byteCodeParser->m_codeBlock->ownerExecutable(), codeBlock->ownerExecutable());
- inlineCallFrame.stackOffset = inlineCallFrameStart + RegisterFile::CallFrameHeaderSize;
- inlineCallFrame.callee.set(*byteCodeParser->m_globalData, byteCodeParser->m_codeBlock->ownerExecutable(), callee);
- inlineCallFrame.caller = byteCodeParser->currentCodeOrigin();
- inlineCallFrame.arguments.resize(codeBlock->numParameters()); // Set the number of arguments including this, but don't configure the value recoveries, yet.
- inlineCallFrame.isCall = isCall(kind);
- byteCodeParser->m_codeBlock->inlineCallFrames().append(inlineCallFrame);
- m_inlineCallFrame = &byteCodeParser->m_codeBlock->inlineCallFrames().last();
-
+ ASSERT(inlineCallFrameStart.isValid());
+ ASSERT(callsiteBlockHead);
+
+ m_inlineCallFrame = byteCodeParser->m_graph.m_plan.inlineCallFrames->add();
+ initializeLazyWriteBarrierForInlineCallFrameExecutable(
+ byteCodeParser->m_graph.m_plan.writeBarriers,
+ m_inlineCallFrame->executable,
+ byteCodeParser->m_codeBlock,
+ m_inlineCallFrame,
+ byteCodeParser->m_codeBlock->ownerExecutable(),
+ codeBlock->ownerExecutable());
+ m_inlineCallFrame->stackOffset = inlineCallFrameStart.offset() - JSStack::CallFrameHeaderSize;
+ if (callee) {
+ m_inlineCallFrame->calleeRecovery = ValueRecovery::constant(callee);
+ m_inlineCallFrame->isClosureCall = false;
+ } else
+ m_inlineCallFrame->isClosureCall = true;
+ m_inlineCallFrame->caller = byteCodeParser->currentCodeOrigin();
+ m_inlineCallFrame->arguments.resize(argumentCountIncludingThis); // Set the number of arguments including this, but don't configure the value recoveries, yet.
+ m_inlineCallFrame->isCall = isCall(kind);
+
+ if (m_inlineCallFrame->caller.inlineCallFrame)
+ m_inlineCallFrame->capturedVars = m_inlineCallFrame->caller.inlineCallFrame->capturedVars;
+ else {
+ for (int i = byteCodeParser->m_codeBlock->m_numVars; i--;) {
+ if (byteCodeParser->m_codeBlock->isCaptured(virtualRegisterForLocal(i)))
+ m_inlineCallFrame->capturedVars.set(i);
+ }
+ }
+
+ for (int i = argumentCountIncludingThis; i--;) {
+ VirtualRegister argument = virtualRegisterForArgument(i);
+ if (codeBlock->isCaptured(argument))
+ m_inlineCallFrame->capturedVars.set(VirtualRegister(argument.offset() + m_inlineCallFrame->stackOffset).toLocal());
+ }
+ for (size_t i = codeBlock->m_numVars; i--;) {
+ VirtualRegister local = virtualRegisterForLocal(i);
+ if (codeBlock->isCaptured(local))
+ m_inlineCallFrame->capturedVars.set(VirtualRegister(local.offset() + m_inlineCallFrame->stackOffset).toLocal());
+ }
+
byteCodeParser->buildOperandMapsIfNecessary();
m_identifierRemap.resize(codeBlock->numberOfIdentifiers());
m_constantRemap.resize(codeBlock->numberOfConstantRegisters());
+ m_constantBufferRemap.resize(codeBlock->numberOfConstantBuffers());
+ m_switchRemap.resize(codeBlock->numberOfSwitchJumpTables());
for (size_t i = 0; i < codeBlock->numberOfIdentifiers(); ++i) {
StringImpl* rep = codeBlock->identifier(i).impl();
- IdentifierMap::AddResult result = byteCodeParser->m_identifierMap.add(rep, byteCodeParser->m_codeBlock->numberOfIdentifiers());
+ BorrowedIdentifierMap::AddResult result = byteCodeParser->m_identifierMap.add(rep, byteCodeParser->m_graph.identifiers().numberOfIdentifiers());
if (result.isNewEntry)
- byteCodeParser->m_codeBlock->addIdentifier(Identifier(byteCodeParser->m_globalData, rep));
- m_identifierRemap[i] = result.iterator->second;
+ byteCodeParser->m_graph.identifiers().addLazily(rep);
+ m_identifierRemap[i] = result.iterator->value;
}
for (size_t i = 0; i < codeBlock->numberOfConstantRegisters(); ++i) {
JSValue value = codeBlock->getConstant(i + FirstConstantRegisterIndex);
if (!value) {
if (byteCodeParser->m_emptyJSValueIndex == UINT_MAX) {
byteCodeParser->m_emptyJSValueIndex = byteCodeParser->m_codeBlock->numberOfConstantRegisters() + FirstConstantRegisterIndex;
- byteCodeParser->m_codeBlock->addConstant(JSValue());
+ byteCodeParser->addConstant(JSValue());
byteCodeParser->m_constants.append(ConstantRecord());
}
m_constantRemap[i] = byteCodeParser->m_emptyJSValueIndex;
}
JSValueMap::AddResult result = byteCodeParser->m_jsValueMap.add(JSValue::encode(value), byteCodeParser->m_codeBlock->numberOfConstantRegisters() + FirstConstantRegisterIndex);
if (result.isNewEntry) {
- byteCodeParser->m_codeBlock->addConstant(value);
+ byteCodeParser->addConstant(value);
byteCodeParser->m_constants.append(ConstantRecord());
}
- m_constantRemap[i] = result.iterator->second;
+ m_constantRemap[i] = result.iterator->value;
+ }
+ for (unsigned i = 0; i < codeBlock->numberOfConstantBuffers(); ++i) {
+ // If we inline the same code block multiple times, we don't want to needlessly
+ // duplicate its constant buffers.
+ HashMap<ConstantBufferKey, unsigned>::iterator iter =
+ byteCodeParser->m_constantBufferCache.find(ConstantBufferKey(codeBlock, i));
+ if (iter != byteCodeParser->m_constantBufferCache.end()) {
+ m_constantBufferRemap[i] = iter->value;
+ continue;
+ }
+ Vector<JSValue>& buffer = codeBlock->constantBufferAsVector(i);
+ unsigned newIndex = byteCodeParser->m_codeBlock->addConstantBuffer(buffer);
+ m_constantBufferRemap[i] = newIndex;
+ byteCodeParser->m_constantBufferCache.add(ConstantBufferKey(codeBlock, i), newIndex);
+ }
+ for (unsigned i = 0; i < codeBlock->numberOfSwitchJumpTables(); ++i) {
+ m_switchRemap[i] = byteCodeParser->m_codeBlock->numberOfSwitchJumpTables();
+ byteCodeParser->m_codeBlock->addSwitchJumpTable() = codeBlock->switchJumpTable(i);
}
-
m_callsiteBlockHeadNeedsLinking = true;
} else {
// Machine code block case.
ASSERT(codeBlock == byteCodeParser->m_codeBlock);
ASSERT(!callee);
- ASSERT(calleeVR == InvalidVirtualRegister);
- ASSERT(returnValueVR == InvalidVirtualRegister);
- ASSERT(inlineCallFrameStart == InvalidVirtualRegister);
- ASSERT(callsiteBlockHead == NoBlock);
+ ASSERT(!returnValueVR.isValid());
+ ASSERT(!inlineCallFrameStart.isValid());
+ ASSERT(!callsiteBlockHead);
m_inlineCallFrame = 0;
m_identifierRemap.resize(codeBlock->numberOfIdentifiers());
m_constantRemap.resize(codeBlock->numberOfConstantRegisters());
-
+ m_constantBufferRemap.resize(codeBlock->numberOfConstantBuffers());
+ m_switchRemap.resize(codeBlock->numberOfSwitchJumpTables());
for (size_t i = 0; i < codeBlock->numberOfIdentifiers(); ++i)
m_identifierRemap[i] = i;
for (size_t i = 0; i < codeBlock->numberOfConstantRegisters(); ++i)
m_constantRemap[i] = i + FirstConstantRegisterIndex;
-
+ for (size_t i = 0; i < codeBlock->numberOfConstantBuffers(); ++i)
+ m_constantBufferRemap[i] = i;
+ for (size_t i = 0; i < codeBlock->numberOfSwitchJumpTables(); ++i)
+ m_switchRemap[i] = i;
m_callsiteBlockHeadNeedsLinking = false;
}
{
CodeBlock* codeBlock = m_inlineStackTop->m_codeBlock;
-#if DFG_ENABLE(DEBUG_VERBOSE)
- dataLog("Parsing code block %p. codeType = %s, numCapturedVars = %u, needsFullScopeChain = %s, needsActivation = %s, isStrictMode = %s\n",
- codeBlock,
- codeTypeToString(codeBlock->codeType()),
- codeBlock->m_numCapturedVars,
- codeBlock->needsFullScopeChain()?"true":"false",
- codeBlock->ownerExecutable()->needsActivation()?"true":"false",
- codeBlock->ownerExecutable()->isStrictMode()?"true":"false");
-#endif
+ if (m_graph.compilation()) {
+ m_graph.compilation()->addProfiledBytecodes(
+ *m_vm->m_perBytecodeProfiler, m_inlineStackTop->m_profiledBlock);
+ }
+
+ bool shouldDumpBytecode = Options::dumpBytecodeAtDFGTime();
+ if (shouldDumpBytecode) {
+ dataLog("Parsing ", *codeBlock);
+ if (inlineCallFrame()) {
+ dataLog(
+ " for inlining at ", CodeBlockWithJITType(m_codeBlock, JITCode::DFGJIT),
+ " ", inlineCallFrame()->caller);
+ }
+ dataLog(
+ ": captureCount = ", codeBlock->symbolTable() ? codeBlock->symbolTable()->captureCount() : 0,
+ ", needsActivation = ", codeBlock->needsActivation(),
+ ", isStrictMode = ", codeBlock->ownerExecutable()->isStrictMode(), "\n");
+ codeBlock->baselineVersion()->dumpBytecode();
+ }
+
+ Vector<unsigned, 32> jumpTargets;
+ computePreciseJumpTargets(codeBlock, jumpTargets);
+ if (Options::dumpBytecodeAtDFGTime()) {
+ dataLog("Jump targets: ");
+ CommaPrinter comma;
+ for (unsigned i = 0; i < jumpTargets.size(); ++i)
+ dataLog(comma, jumpTargets[i]);
+ dataLog("\n");
+ }
- for (unsigned jumpTargetIndex = 0; jumpTargetIndex <= codeBlock->numberOfJumpTargets(); ++jumpTargetIndex) {
+ for (unsigned jumpTargetIndex = 0; jumpTargetIndex <= jumpTargets.size(); ++jumpTargetIndex) {
// The maximum bytecode offset to go into the current basicblock is either the next jump target, or the end of the instructions.
- unsigned limit = jumpTargetIndex < codeBlock->numberOfJumpTargets() ? codeBlock->jumpTarget(jumpTargetIndex) : codeBlock->instructions().size();
-#if DFG_ENABLE(DEBUG_VERBOSE)
- dataLog("Parsing bytecode with limit %p bc#%u at inline depth %u.\n", m_inlineStackTop->executable(), limit, CodeOrigin::inlineDepthForCallFrame(m_inlineStackTop->m_inlineCallFrame));
-#endif
+ unsigned limit = jumpTargetIndex < jumpTargets.size() ? jumpTargets[jumpTargetIndex] : codeBlock->instructions().size();
ASSERT(m_currentIndex < limit);
// Loop until we reach the current limit (i.e. next jump target).
do {
if (!m_currentBlock) {
// Check if we can use the last block.
- if (!m_graph.m_blocks.isEmpty() && m_graph.m_blocks.last()->isEmpty()) {
+ if (m_graph.numBlocks() && m_graph.lastBlock()->isEmpty()) {
// This must be a block belonging to us.
- ASSERT(m_inlineStackTop->m_unlinkedBlocks.last().m_blockIndex == m_graph.m_blocks.size() - 1);
+ ASSERT(m_inlineStackTop->m_unlinkedBlocks.last().m_block == m_graph.lastBlock());
// Either the block is linkable or it isn't. If it's linkable then it's the last
// block in the blockLinkingTargets list. If it's not then the last block will
// have a lower bytecode index that the one we're about to give to this block.
- if (m_inlineStackTop->m_blockLinkingTargets.isEmpty() || m_graph.m_blocks[m_inlineStackTop->m_blockLinkingTargets.last()]->bytecodeBegin != m_currentIndex) {
+ if (m_inlineStackTop->m_blockLinkingTargets.isEmpty() || m_inlineStackTop->m_blockLinkingTargets.last()->bytecodeBegin != m_currentIndex) {
// Make the block linkable.
- ASSERT(m_inlineStackTop->m_blockLinkingTargets.isEmpty() || m_graph.m_blocks[m_inlineStackTop->m_blockLinkingTargets.last()]->bytecodeBegin < m_currentIndex);
- m_inlineStackTop->m_blockLinkingTargets.append(m_graph.m_blocks.size() - 1);
+ ASSERT(m_inlineStackTop->m_blockLinkingTargets.isEmpty() || m_inlineStackTop->m_blockLinkingTargets.last()->bytecodeBegin < m_currentIndex);
+ m_inlineStackTop->m_blockLinkingTargets.append(m_graph.lastBlock());
}
// Change its bytecode begin and continue.
- m_currentBlock = m_graph.m_blocks.last().get();
-#if DFG_ENABLE(DEBUG_VERBOSE)
- dataLog("Reascribing bytecode index of block %p from bc#%u to bc#%u (peephole case).\n", m_currentBlock, m_currentBlock->bytecodeBegin, m_currentIndex);
-#endif
+ m_currentBlock = m_graph.lastBlock();
m_currentBlock->bytecodeBegin = m_currentIndex;
} else {
- OwnPtr<BasicBlock> block = adoptPtr(new BasicBlock(m_currentIndex, m_numArguments, m_numLocals));
-#if DFG_ENABLE(DEBUG_VERBOSE)
- dataLog("Creating basic block %p, #%zu for %p bc#%u at inline depth %u.\n", block.get(), m_graph.m_blocks.size(), m_inlineStackTop->executable(), m_currentIndex, CodeOrigin::inlineDepthForCallFrame(m_inlineStackTop->m_inlineCallFrame));
-#endif
+ RefPtr<BasicBlock> block = adoptRef(new BasicBlock(m_currentIndex, m_numArguments, m_numLocals, PNaN));
m_currentBlock = block.get();
- ASSERT(m_inlineStackTop->m_unlinkedBlocks.isEmpty() || m_graph.m_blocks[m_inlineStackTop->m_unlinkedBlocks.last().m_blockIndex]->bytecodeBegin < m_currentIndex);
- m_inlineStackTop->m_unlinkedBlocks.append(UnlinkedBlock(m_graph.m_blocks.size()));
- m_inlineStackTop->m_blockLinkingTargets.append(m_graph.m_blocks.size());
- m_graph.m_blocks.append(block.release());
+ // This assertion checks two things:
+ // 1) If the bytecodeBegin is greater than currentIndex, then something has gone
+ // horribly wrong. So, we're probably generating incorrect code.
+ // 2) If the bytecodeBegin is equal to the currentIndex, then we failed to do
+ // a peephole coalescing of this block in the if statement above. So, we're
+ // generating suboptimal code and leaving more work for the CFG simplifier.
+ ASSERT(m_inlineStackTop->m_unlinkedBlocks.isEmpty() || m_inlineStackTop->m_unlinkedBlocks.last().m_block->bytecodeBegin < m_currentIndex);
+ m_inlineStackTop->m_unlinkedBlocks.append(UnlinkedBlock(block.get()));
+ m_inlineStackTop->m_blockLinkingTargets.append(block.get());
+ // The first block is definitely an OSR target.
+ if (!m_graph.numBlocks())
+ block->isOSRTarget = true;
+ m_graph.appendBlock(block);
prepareToParseBlock();
}
}
// are at the end of an inline function, or we realized that we
// should stop parsing because there was a return in the first
// basic block.
- ASSERT(m_currentBlock->isEmpty() || m_graph.last().isTerminal() || (m_currentIndex == codeBlock->instructions().size() && m_inlineStackTop->m_inlineCallFrame) || !shouldContinueParsing);
+ ASSERT(m_currentBlock->isEmpty() || m_currentBlock->last()->isTerminal() || (m_currentIndex == codeBlock->instructions().size() && inlineCallFrame()) || !shouldContinueParsing);
if (!shouldContinueParsing)
return;
// Set during construction.
ASSERT(!m_currentIndex);
-#if DFG_ENABLE(ALL_VARIABLES_CAPTURED)
- // We should be pretending that the code has an activation.
- ASSERT(m_graph.needsActivation());
-#endif
+ if (Options::verboseDFGByteCodeParsing())
+ dataLog("Parsing ", *m_codeBlock, "\n");
+
+ m_dfgCodeBlock = m_graph.m_plan.profiledDFGCodeBlock.get();
+ if (isFTL(m_graph.m_plan.mode) && m_dfgCodeBlock
+ && Options::enablePolyvariantDevirtualization()) {
+ if (Options::enablePolyvariantCallInlining())
+ CallLinkStatus::computeDFGStatuses(m_dfgCodeBlock, m_callContextMap);
+ if (Options::enablePolyvariantByIdInlining())
+ m_dfgCodeBlock->getStubInfoMap(m_dfgStubInfos);
+ }
+
+ if (m_codeBlock->captureCount()) {
+ SymbolTable* symbolTable = m_codeBlock->symbolTable();
+ ConcurrentJITLocker locker(symbolTable->m_lock);
+ SymbolTable::Map::iterator iter = symbolTable->begin(locker);
+ SymbolTable::Map::iterator end = symbolTable->end(locker);
+ for (; iter != end; ++iter) {
+ VariableWatchpointSet* set = iter->value.watchpointSet();
+ if (!set)
+ continue;
+ size_t index = static_cast<size_t>(VirtualRegister(iter->value.getIndex()).toLocal());
+ while (m_localWatchpoints.size() <= index)
+ m_localWatchpoints.append(nullptr);
+ m_localWatchpoints[index] = set;
+ }
+ }
- InlineStackEntry inlineStackEntry(this, m_codeBlock, m_profiledBlock, NoBlock, InvalidVirtualRegister, 0, InvalidVirtualRegister, InvalidVirtualRegister, CodeForCall);
+ InlineStackEntry inlineStackEntry(
+ this, m_codeBlock, m_profiledBlock, 0, 0, VirtualRegister(), VirtualRegister(),
+ m_codeBlock->numParameters(), CodeForCall);
parseCodeBlock();
linkBlocks(inlineStackEntry.m_unlinkedBlocks, inlineStackEntry.m_blockLinkingTargets);
- determineReachability();
-#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
- dataLog("Processing local variable phis.\n");
-#endif
+ m_graph.determineReachability();
+ m_graph.killUnreachableBlocks();
- m_currentProfilingIndex = m_currentIndex;
-
- processPhiStack<LocalPhiStack>();
-#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
- dataLog("Processing argument phis.\n");
-#endif
- processPhiStack<ArgumentPhiStack>();
-
- fixVariableAccessPredictions();
+ for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) {
+ BasicBlock* block = m_graph.block(blockIndex);
+ if (!block)
+ continue;
+ ASSERT(block->variablesAtHead.numberOfLocals() == m_graph.block(0)->variablesAtHead.numberOfLocals());
+ ASSERT(block->variablesAtHead.numberOfArguments() == m_graph.block(0)->variablesAtHead.numberOfArguments());
+ ASSERT(block->variablesAtTail.numberOfLocals() == m_graph.block(0)->variablesAtHead.numberOfLocals());
+ ASSERT(block->variablesAtTail.numberOfArguments() == m_graph.block(0)->variablesAtHead.numberOfArguments());
+ }
- m_graph.m_preservedVars = m_preservedVars;
m_graph.m_localVars = m_numLocals;
m_graph.m_parameterSlots = m_parameterSlots;
bool parse(Graph& graph)
{
-#if DFG_DEBUG_LOCAL_DISBALE
- UNUSED_PARAM(graph);
- return false;
-#else
+ SamplingRegion samplingRegion("DFG Parsing");
return ByteCodeParser(graph).parse();
-#endif
}
} } // namespace JSC::DFG