/*
- * Copyright (C) 2011 Apple Inc. All rights reserved.
+ * Copyright (C) 2011, 2012, 2013, 2014 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
#if ENABLE(DFG_JIT)
+#include "AssemblyHelpers.h"
#include "CodeBlock.h"
#include "DFGArgumentPosition.h"
-#include "DFGAssemblyHelpers.h"
#include "DFGBasicBlock.h"
+#include "DFGDominators.h"
+#include "DFGLongLivedState.h"
+#include "DFGNaturalLoops.h"
#include "DFGNode.h"
+#include "DFGNodeAllocator.h"
+#include "DFGPlan.h"
+#include "DFGScannable.h"
+#include "JSStack.h"
#include "MethodOfGettingAValueProfile.h"
-#include "RegisterFile.h"
+#include <unordered_map>
#include <wtf/BitVector.h>
#include <wtf/HashMap.h>
#include <wtf/Vector.h>
namespace DFG {
struct StorageAccessData {
- size_t offset;
+ PropertyOffset offset;
unsigned identifierNumber;
-
- // NOTE: the offset and identifierNumber do not by themselves
- // uniquely identify a property. The identifierNumber and a
- // Structure* do. If those two match, then the offset should
- // be the same, as well. For any Node that has a StorageAccessData,
- // it is possible to retrieve the Structure* by looking at the
- // first child. It should be a CheckStructure, which has the
- // Structure*.
};
-struct ResolveGlobalData {
- unsigned identifierNumber;
- unsigned resolveInfoIndex;
+struct InlineVariableData {
+ InlineCallFrame* inlineCallFrame;
+ unsigned argumentPositionStart;
+ VariableAccessData* calleeVariable;
+};
+
+enum AddSpeculationMode {
+ DontSpeculateInt32,
+ SpeculateInt32AndTruncateConstants,
+ SpeculateInt32
};
-//
+//
// === Graph ===
//
-// The dataflow graph is an ordered vector of nodes.
// The order may be significant for nodes with side-effects (property accesses, value conversions).
// Nodes that are 'dead' remain in the vector with refCount 0.
-class Graph : public Vector<Node, 64> {
+class Graph : public virtual Scannable {
public:
- Graph(JSGlobalData& globalData, CodeBlock* codeBlock)
- : m_globalData(globalData)
- , m_codeBlock(codeBlock)
- , m_profiledBlock(codeBlock->alternative())
+ Graph(VM&, Plan&, LongLivedState&);
+ ~Graph();
+
+ void changeChild(Edge& edge, Node* newNode)
{
- ASSERT(m_profiledBlock);
+ edge.setNode(newNode);
}
- using Vector<Node, 64>::operator[];
- using Vector<Node, 64>::at;
-
- Node& operator[](Edge nodeUse) { return at(nodeUse.index()); }
- const Node& operator[](Edge nodeUse) const { return at(nodeUse.index()); }
-
- Node& at(Edge nodeUse) { return at(nodeUse.index()); }
- const Node& at(Edge nodeUse) const { return at(nodeUse.index()); }
-
- // Mark a node as being referenced.
- void ref(NodeIndex nodeIndex)
+ void changeEdge(Edge& edge, Edge newEdge)
{
- Node& node = at(nodeIndex);
- // If the value (before incrementing) was at refCount zero then we need to ref its children.
- if (node.ref())
- refChildren(nodeIndex);
+ edge = newEdge;
}
- void ref(Edge nodeUse)
+
+ void compareAndSwap(Edge& edge, Node* oldNode, Node* newNode)
{
- ref(nodeUse.index());
+ if (edge.node() != oldNode)
+ return;
+ changeChild(edge, newNode);
}
- void deref(NodeIndex nodeIndex)
+ void compareAndSwap(Edge& edge, Edge oldEdge, Edge newEdge)
{
- if (at(nodeIndex).deref())
- derefChildren(nodeIndex);
+ if (edge != oldEdge)
+ return;
+ changeEdge(edge, newEdge);
}
- void deref(Edge nodeUse)
- {
- deref(nodeUse.index());
+
+ void performSubstitution(Node* node)
+ {
+ if (node->flags() & NodeHasVarArgs) {
+ for (unsigned childIdx = node->firstChild(); childIdx < node->firstChild() + node->numChildren(); childIdx++)
+ performSubstitutionForEdge(m_varArgChildren[childIdx]);
+ } else {
+ performSubstitutionForEdge(node->child1());
+ performSubstitutionForEdge(node->child2());
+ performSubstitutionForEdge(node->child3());
+ }
}
- void clearAndDerefChild1(Node& node)
+ void performSubstitutionForEdge(Edge& child)
{
- if (!node.child1())
+ // Check if this operand is actually unused.
+ if (!child)
return;
- deref(node.child1());
- node.children.child1() = Edge();
+
+ // Check if there is any replacement.
+ Node* replacement = child->misc.replacement;
+ if (!replacement)
+ return;
+
+ child.setNode(replacement);
+
+ // There is definitely a replacement. Assert that the replacement does not
+ // have a replacement.
+ ASSERT(!child->misc.replacement);
}
-
- void clearAndDerefChild2(Node& node)
+
+ template<typename... Params>
+ Node* addNode(SpeculatedType type, Params... params)
{
- if (!node.child2())
- return;
- deref(node.child2());
- node.children.child2() = Edge();
+ Node* node = new (m_allocator) Node(params...);
+ node->predict(type);
+ return node;
}
- void clearAndDerefChild3(Node& node)
+ void dethread();
+
+ void convertToConstant(Node* node, unsigned constantNumber)
{
- if (!node.child3())
- return;
- deref(node.child3());
- node.children.child3() = Edge();
+ if (node->op() == GetLocal)
+ dethread();
+ else
+ ASSERT(!node->hasVariableAccessData(*this));
+ node->convertToConstant(constantNumber);
+ }
+
+ unsigned constantRegisterForConstant(JSValue value)
+ {
+ unsigned constantRegister;
+ if (!m_codeBlock->findConstant(value, constantRegister)) {
+ constantRegister = m_codeBlock->addConstantLazily();
+ initializeLazyWriteBarrierForConstant(
+ m_plan.writeBarriers,
+ m_codeBlock->constants()[constantRegister],
+ m_codeBlock,
+ constantRegister,
+ m_codeBlock->ownerExecutable(),
+ value);
+ }
+ return constantRegister;
+ }
+
+ void convertToConstant(Node* node, JSValue value)
+ {
+ if (value.isObject())
+ node->convertToWeakConstant(value.asCell());
+ else
+ convertToConstant(node, constantRegisterForConstant(value));
}
// CodeBlock is optional, but may allow additional information to be dumped (e.g. Identifier names).
- void dump();
- void dump(NodeIndex);
+ void dump(PrintStream& = WTF::dataFile(), DumpContext* = 0);
+ enum PhiNodeDumpMode { DumpLivePhisOnly, DumpAllPhis };
+ void dumpBlockHeader(PrintStream&, const char* prefix, BasicBlock*, PhiNodeDumpMode, DumpContext*);
+ void dump(PrintStream&, Edge);
+ void dump(PrintStream&, const char* prefix, Node*, DumpContext* = 0);
+ static int amountOfNodeWhiteSpace(Node*);
+ static void printNodeWhiteSpace(PrintStream&, Node*);
// Dump the code origin of the given node as a diff from the code origin of the
- // preceding node.
- void dumpCodeOrigin(NodeIndex, NodeIndex);
+ // preceding node. Returns true if anything was printed.
+ bool dumpCodeOrigin(PrintStream&, const char* prefix, Node* previousNode, Node* currentNode, DumpContext*);
- BlockIndex blockIndexForBytecodeOffset(Vector<BlockIndex>& blocks, unsigned bytecodeBegin);
-
- PredictedType getJSConstantPrediction(Node& node)
+ SpeculatedType getJSConstantSpeculation(Node* node)
{
- return predictionFromValue(node.valueOfJSConstant(m_codeBlock));
+ return speculationFromValue(node->valueOfJSConstant(m_codeBlock));
}
- bool addShouldSpeculateInteger(Node& add)
+ AddSpeculationMode addSpeculationMode(Node* add, bool leftShouldSpeculateInt32, bool rightShouldSpeculateInt32, PredictionPass pass)
{
- ASSERT(add.op() == ValueAdd || add.op() == ArithAdd || add.op() == ArithSub);
+ ASSERT(add->op() == ValueAdd || add->op() == ArithAdd || add->op() == ArithSub);
+
+ RareCaseProfilingSource source = add->sourceFor(pass);
- Node& left = at(add.child1());
- Node& right = at(add.child2());
+ Node* left = add->child1().node();
+ Node* right = add->child2().node();
- if (left.hasConstant())
- return addImmediateShouldSpeculateInteger(add, right, left);
- if (right.hasConstant())
- return addImmediateShouldSpeculateInteger(add, left, right);
+ if (left->hasConstant())
+ return addImmediateShouldSpeculateInt32(add, rightShouldSpeculateInt32, left, source);
+ if (right->hasConstant())
+ return addImmediateShouldSpeculateInt32(add, leftShouldSpeculateInt32, right, source);
+
+ return (leftShouldSpeculateInt32 && rightShouldSpeculateInt32 && add->canSpeculateInt32(source)) ? SpeculateInt32 : DontSpeculateInt32;
+ }
+
+ AddSpeculationMode valueAddSpeculationMode(Node* add, PredictionPass pass)
+ {
+ return addSpeculationMode(
+ add,
+ add->child1()->shouldSpeculateInt32OrBooleanExpectingDefined(),
+ add->child2()->shouldSpeculateInt32OrBooleanExpectingDefined(),
+ pass);
+ }
+
+ AddSpeculationMode arithAddSpeculationMode(Node* add, PredictionPass pass)
+ {
+ return addSpeculationMode(
+ add,
+ add->child1()->shouldSpeculateInt32OrBooleanForArithmetic(),
+ add->child2()->shouldSpeculateInt32OrBooleanForArithmetic(),
+ pass);
+ }
+
+ AddSpeculationMode addSpeculationMode(Node* add, PredictionPass pass)
+ {
+ if (add->op() == ValueAdd)
+ return valueAddSpeculationMode(add, pass);
+
+ return arithAddSpeculationMode(add, pass);
+ }
+
+ bool addShouldSpeculateInt32(Node* add, PredictionPass pass)
+ {
+ return addSpeculationMode(add, pass) != DontSpeculateInt32;
+ }
+
+ bool addShouldSpeculateMachineInt(Node* add)
+ {
+ if (!enableInt52())
+ return false;
- return Node::shouldSpeculateInteger(left, right) && add.canSpeculateInteger();
+ Node* left = add->child1().node();
+ Node* right = add->child2().node();
+
+ bool speculation;
+ if (add->op() == ValueAdd)
+ speculation = Node::shouldSpeculateMachineInt(left, right);
+ else
+ speculation = Node::shouldSpeculateMachineInt(left, right);
+
+ return speculation && !hasExitSite(add, Int52Overflow);
}
- bool negateShouldSpeculateInteger(Node& negate)
+ bool mulShouldSpeculateInt32(Node* mul, PredictionPass pass)
{
- ASSERT(negate.op() == ArithNegate);
- return at(negate.child1()).shouldSpeculateInteger() && negate.canSpeculateInteger();
+ ASSERT(mul->op() == ArithMul);
+
+ Node* left = mul->child1().node();
+ Node* right = mul->child2().node();
+
+ return Node::shouldSpeculateInt32OrBooleanForArithmetic(left, right)
+ && mul->canSpeculateInt32(mul->sourceFor(pass));
}
- bool addShouldSpeculateInteger(NodeIndex nodeIndex)
+ bool mulShouldSpeculateMachineInt(Node* mul, PredictionPass pass)
{
- return addShouldSpeculateInteger(at(nodeIndex));
+ ASSERT(mul->op() == ArithMul);
+
+ if (!enableInt52())
+ return false;
+
+ Node* left = mul->child1().node();
+ Node* right = mul->child2().node();
+
+ return Node::shouldSpeculateMachineInt(left, right)
+ && mul->canSpeculateInt52(pass)
+ && !hasExitSite(mul, Int52Overflow);
+ }
+
+ bool negateShouldSpeculateInt32(Node* negate, PredictionPass pass)
+ {
+ ASSERT(negate->op() == ArithNegate);
+ return negate->child1()->shouldSpeculateInt32OrBooleanForArithmetic()
+ && negate->canSpeculateInt32(pass);
+ }
+
+ bool negateShouldSpeculateMachineInt(Node* negate, PredictionPass pass)
+ {
+ ASSERT(negate->op() == ArithNegate);
+ if (!enableInt52())
+ return false;
+ return negate->child1()->shouldSpeculateMachineInt()
+ && !hasExitSite(negate, Int52Overflow)
+ && negate->canSpeculateInt52(pass);
+ }
+
+ VirtualRegister bytecodeRegisterForArgument(CodeOrigin codeOrigin, int argument)
+ {
+ return VirtualRegister(
+ codeOrigin.inlineCallFrame->stackOffset +
+ baselineCodeBlockFor(codeOrigin)->argumentIndexAfterCapture(argument));
}
// Helper methods to check nodes for constants.
- bool isConstant(NodeIndex nodeIndex)
+ bool isConstant(Node* node)
{
- return at(nodeIndex).hasConstant();
+ return node->hasConstant();
}
- bool isJSConstant(NodeIndex nodeIndex)
+ bool isJSConstant(Node* node)
{
- return at(nodeIndex).hasConstant();
+ return node->hasConstant();
}
- bool isInt32Constant(NodeIndex nodeIndex)
+ bool isInt32Constant(Node* node)
{
- return at(nodeIndex).isInt32Constant(m_codeBlock);
+ return node->isInt32Constant(m_codeBlock);
}
- bool isDoubleConstant(NodeIndex nodeIndex)
+ bool isDoubleConstant(Node* node)
{
- return at(nodeIndex).isDoubleConstant(m_codeBlock);
+ return node->isDoubleConstant(m_codeBlock);
}
- bool isNumberConstant(NodeIndex nodeIndex)
+ bool isNumberConstant(Node* node)
{
- return at(nodeIndex).isNumberConstant(m_codeBlock);
+ return node->isNumberConstant(m_codeBlock);
+ }
+ bool isMachineIntConstant(Node* node)
+ {
+ return node->isMachineIntConstant(m_codeBlock);
+ }
+ bool isBooleanConstant(Node* node)
+ {
+ return node->isBooleanConstant(m_codeBlock);
+ }
+ bool isCellConstant(Node* node)
+ {
+ if (!isJSConstant(node))
+ return false;
+ JSValue value = valueOfJSConstant(node);
+ return value.isCell() && !!value;
}
- bool isBooleanConstant(NodeIndex nodeIndex)
+ bool isFunctionConstant(Node* node)
{
- return at(nodeIndex).isBooleanConstant(m_codeBlock);
+ if (!isJSConstant(node))
+ return false;
+ if (!getJSFunction(valueOfJSConstant(node)))
+ return false;
+ return true;
}
- bool isFunctionConstant(NodeIndex nodeIndex)
+ bool isInternalFunctionConstant(Node* node)
{
- if (!isJSConstant(nodeIndex))
+ if (!isJSConstant(node))
+ return false;
+ JSValue value = valueOfJSConstant(node);
+ if (!value.isCell() || !value)
return false;
- if (!getJSFunction(valueOfJSConstant(nodeIndex)))
+ JSCell* cell = value.asCell();
+ if (!cell->inherits(InternalFunction::info()))
return false;
return true;
}
// Helper methods get constant values from nodes.
- JSValue valueOfJSConstant(NodeIndex nodeIndex)
+ JSValue valueOfJSConstant(Node* node)
{
- return at(nodeIndex).valueOfJSConstant(m_codeBlock);
+ return node->valueOfJSConstant(m_codeBlock);
}
- int32_t valueOfInt32Constant(NodeIndex nodeIndex)
+ int32_t valueOfInt32Constant(Node* node)
{
- return valueOfJSConstant(nodeIndex).asInt32();
+ JSValue value = valueOfJSConstant(node);
+ if (!value.isInt32()) {
+ dataLog("Value isn't int32: ", value, "\n");
+ dump();
+ RELEASE_ASSERT_NOT_REACHED();
+ }
+ return value.asInt32();
}
- double valueOfNumberConstant(NodeIndex nodeIndex)
+ double valueOfNumberConstant(Node* node)
{
- return valueOfJSConstant(nodeIndex).asNumber();
+ return valueOfJSConstant(node).asNumber();
}
- bool valueOfBooleanConstant(NodeIndex nodeIndex)
+ bool valueOfBooleanConstant(Node* node)
{
- return valueOfJSConstant(nodeIndex).asBoolean();
+ return valueOfJSConstant(node).asBoolean();
}
- JSFunction* valueOfFunctionConstant(NodeIndex nodeIndex)
+ JSFunction* valueOfFunctionConstant(Node* node)
{
- JSCell* function = getJSFunction(valueOfJSConstant(nodeIndex));
+ JSCell* function = getJSFunction(valueOfJSConstant(node));
ASSERT(function);
return jsCast<JSFunction*>(function);
}
static const char *opName(NodeType);
- // This is O(n), and should only be used for verbose dumps.
- const char* nameOfVariableAccessData(VariableAccessData*);
-
- void predictArgumentTypes();
-
StructureSet* addStructureSet(const StructureSet& structureSet)
{
ASSERT(structureSet.size());
return &m_structureTransitionData.last();
}
+ JSGlobalObject* globalObjectFor(CodeOrigin codeOrigin)
+ {
+ return m_codeBlock->globalObjectFor(codeOrigin);
+ }
+
+ JSObject* globalThisObjectFor(CodeOrigin codeOrigin)
+ {
+ JSGlobalObject* object = globalObjectFor(codeOrigin);
+ return jsCast<JSObject*>(object->methodTable()->toThis(object, object->globalExec(), NotStrictMode));
+ }
+
+ ScriptExecutable* executableFor(InlineCallFrame* inlineCallFrame)
+ {
+ if (!inlineCallFrame)
+ return m_codeBlock->ownerExecutable();
+
+ return inlineCallFrame->executable.get();
+ }
+
+ ScriptExecutable* executableFor(const CodeOrigin& codeOrigin)
+ {
+ return executableFor(codeOrigin.inlineCallFrame);
+ }
+
+ CodeBlock* baselineCodeBlockFor(InlineCallFrame* inlineCallFrame)
+ {
+ if (!inlineCallFrame)
+ return m_profiledBlock;
+ return baselineCodeBlockForInlineCallFrame(inlineCallFrame);
+ }
+
CodeBlock* baselineCodeBlockFor(const CodeOrigin& codeOrigin)
{
return baselineCodeBlockForOriginAndBaselineCodeBlock(codeOrigin, m_profiledBlock);
}
- ValueProfile* valueProfileFor(NodeIndex nodeIndex)
+ bool isStrictModeFor(CodeOrigin codeOrigin)
+ {
+ if (!codeOrigin.inlineCallFrame)
+ return m_codeBlock->isStrictMode();
+ return jsCast<FunctionExecutable*>(codeOrigin.inlineCallFrame->executable.get())->isStrictMode();
+ }
+
+ ECMAMode ecmaModeFor(CodeOrigin codeOrigin)
+ {
+ return isStrictModeFor(codeOrigin) ? StrictMode : NotStrictMode;
+ }
+
+ bool masqueradesAsUndefinedWatchpointIsStillValid(const CodeOrigin& codeOrigin)
+ {
+ return m_plan.watchpoints.isStillValid(
+ globalObjectFor(codeOrigin)->masqueradesAsUndefinedWatchpoint());
+ }
+
+ bool hasGlobalExitSite(const CodeOrigin& codeOrigin, ExitKind exitKind)
+ {
+ return baselineCodeBlockFor(codeOrigin)->hasExitSite(FrequentExitSite(exitKind));
+ }
+
+ bool hasExitSite(const CodeOrigin& codeOrigin, ExitKind exitKind)
+ {
+ return baselineCodeBlockFor(codeOrigin)->hasExitSite(FrequentExitSite(codeOrigin.bytecodeIndex, exitKind));
+ }
+
+ bool hasExitSite(Node* node, ExitKind exitKind)
+ {
+ return hasExitSite(node->origin.semantic, exitKind);
+ }
+
+ bool usesArguments(InlineCallFrame* inlineCallFrame)
+ {
+ if (!inlineCallFrame)
+ return m_profiledBlock->usesArguments();
+
+ return baselineCodeBlockForInlineCallFrame(inlineCallFrame)->usesArguments();
+ }
+
+ VirtualRegister argumentsRegisterFor(InlineCallFrame* inlineCallFrame)
+ {
+ if (!inlineCallFrame)
+ return m_profiledBlock->argumentsRegister();
+
+ return VirtualRegister(baselineCodeBlockForInlineCallFrame(
+ inlineCallFrame)->argumentsRegister().offset() +
+ inlineCallFrame->stackOffset);
+ }
+
+ VirtualRegister argumentsRegisterFor(const CodeOrigin& codeOrigin)
+ {
+ return argumentsRegisterFor(codeOrigin.inlineCallFrame);
+ }
+
+ VirtualRegister machineArgumentsRegisterFor(InlineCallFrame* inlineCallFrame)
+ {
+ if (!inlineCallFrame)
+ return m_codeBlock->argumentsRegister();
+
+ return inlineCallFrame->argumentsRegister;
+ }
+
+ VirtualRegister machineArgumentsRegisterFor(const CodeOrigin& codeOrigin)
+ {
+ return machineArgumentsRegisterFor(codeOrigin.inlineCallFrame);
+ }
+
+ VirtualRegister uncheckedArgumentsRegisterFor(InlineCallFrame* inlineCallFrame)
+ {
+ if (!inlineCallFrame)
+ return m_profiledBlock->uncheckedArgumentsRegister();
+
+ CodeBlock* codeBlock = baselineCodeBlockForInlineCallFrame(inlineCallFrame);
+ if (!codeBlock->usesArguments())
+ return VirtualRegister();
+
+ return VirtualRegister(codeBlock->argumentsRegister().offset() +
+ inlineCallFrame->stackOffset);
+ }
+
+ VirtualRegister uncheckedArgumentsRegisterFor(const CodeOrigin& codeOrigin)
+ {
+ return uncheckedArgumentsRegisterFor(codeOrigin.inlineCallFrame);
+ }
+
+ VirtualRegister activationRegister()
+ {
+ return m_profiledBlock->activationRegister();
+ }
+
+ VirtualRegister uncheckedActivationRegister()
+ {
+ return m_profiledBlock->uncheckedActivationRegister();
+ }
+
+ VirtualRegister machineActivationRegister()
+ {
+ return m_profiledBlock->activationRegister();
+ }
+
+ VirtualRegister uncheckedMachineActivationRegister()
+ {
+ return m_profiledBlock->uncheckedActivationRegister();
+ }
+
+ ValueProfile* valueProfileFor(Node* node)
{
- if (nodeIndex == NoNode)
+ if (!node)
return 0;
- Node& node = at(nodeIndex);
- CodeBlock* profiledBlock = baselineCodeBlockFor(node.codeOrigin);
+ CodeBlock* profiledBlock = baselineCodeBlockFor(node->origin.semantic);
+
+ if (node->op() == GetArgument)
+ return profiledBlock->valueProfileForArgument(node->local().toArgument());
- if (node.hasLocal()) {
- if (!operandIsArgument(node.local()))
+ if (node->hasLocal(*this)) {
+ if (m_form == SSA)
return 0;
- int argument = operandToArgument(node.local());
- if (node.variableAccessData() != at(m_arguments[argument]).variableAccessData())
+ if (!node->local().isArgument())
+ return 0;
+ int argument = node->local().toArgument();
+ if (node->variableAccessData() != m_arguments[argument]->variableAccessData())
return 0;
return profiledBlock->valueProfileForArgument(argument);
}
- if (node.hasHeapPrediction())
- return profiledBlock->valueProfileForBytecodeOffset(node.codeOrigin.bytecodeIndexForValueProfile());
+ if (node->hasHeapPrediction())
+ return profiledBlock->valueProfileForBytecodeOffset(node->origin.semantic.bytecodeIndex);
return 0;
}
- MethodOfGettingAValueProfile methodOfGettingAValueProfileFor(NodeIndex nodeIndex)
+ MethodOfGettingAValueProfile methodOfGettingAValueProfileFor(Node* node)
{
- if (nodeIndex == NoNode)
+ if (!node)
return MethodOfGettingAValueProfile();
- Node& node = at(nodeIndex);
- CodeBlock* profiledBlock = baselineCodeBlockFor(node.codeOrigin);
+ CodeBlock* profiledBlock = baselineCodeBlockFor(node->origin.semantic);
- if (node.op() == GetLocal) {
+ if (node->op() == GetLocal) {
return MethodOfGettingAValueProfile::fromLazyOperand(
profiledBlock,
LazyOperandValueProfileKey(
- node.codeOrigin.bytecodeIndex, node.local()));
+ node->origin.semantic.bytecodeIndex, node->local()));
}
- return MethodOfGettingAValueProfile(valueProfileFor(nodeIndex));
+ return MethodOfGettingAValueProfile(valueProfileFor(node));
}
- bool needsActivation() const
+ bool usesArguments() const
{
-#if DFG_ENABLE(ALL_VARIABLES_CAPTURED)
- return true;
-#else
- return m_codeBlock->needsFullScopeChain() && m_codeBlock->codeType() != GlobalCode;
-#endif
+ return m_codeBlock->usesArguments();
}
- // Pass an argument index. Currently it's ignored, but that's somewhat
- // of a bug.
- bool argumentIsCaptured(int) const
+ BlockIndex numBlocks() const { return m_blocks.size(); }
+ BasicBlock* block(BlockIndex blockIndex) const { return m_blocks[blockIndex].get(); }
+ BasicBlock* lastBlock() const { return block(numBlocks() - 1); }
+
+ void appendBlock(PassRefPtr<BasicBlock> basicBlock)
{
- return needsActivation();
+ basicBlock->index = m_blocks.size();
+ m_blocks.append(basicBlock);
}
- bool localIsCaptured(int operand) const
+
+ void killBlock(BlockIndex blockIndex)
{
-#if DFG_ENABLE(ALL_VARIABLES_CAPTURED)
- return operand < m_codeBlock->m_numVars;
-#else
- return operand < m_codeBlock->m_numCapturedVars;
-#endif
+ m_blocks[blockIndex].clear();
+ }
+
+ void killBlock(BasicBlock* basicBlock)
+ {
+ killBlock(basicBlock->index);
}
- bool isCaptured(int operand) const
+ void killBlockAndItsContents(BasicBlock*);
+
+ void killUnreachableBlocks();
+
+ bool isPredictedNumerical(Node* node)
{
- if (operandIsArgument(operand))
- return argumentIsCaptured(operandToArgument(operand));
- return localIsCaptured(operand);
+ return isNumerical(node->child1().useKind()) && isNumerical(node->child2().useKind());
+ }
+
+ // Note that a 'true' return does not actually mean that the ByVal access clobbers nothing.
+ // It really means that it will not clobber the entire world. It's still up to you to
+ // carefully consider things like:
+ // - PutByVal definitely changes the array it stores to, and may even change its length.
+ // - PutByOffset definitely changes the object it stores to.
+ // - and so on.
+ bool byValIsPure(Node* node)
+ {
+ switch (node->arrayMode().type()) {
+ case Array::Generic:
+ return false;
+ case Array::Int32:
+ case Array::Double:
+ case Array::Contiguous:
+ case Array::ArrayStorage:
+ return !node->arrayMode().isOutOfBounds();
+ case Array::SlowPutArrayStorage:
+ return !node->arrayMode().mayStoreToHole();
+ case Array::String:
+ return node->op() == GetByVal && node->arrayMode().isInBounds();
+#if USE(JSVALUE32_64)
+ case Array::Arguments:
+ if (node->op() == GetByVal)
+ return true;
+ return false;
+#endif // USE(JSVALUE32_64)
+ default:
+ return true;
+ }
}
- bool isCaptured(VirtualRegister virtualRegister) const
+
+ bool clobbersWorld(Node* node)
+ {
+ if (node->flags() & NodeClobbersWorld)
+ return true;
+ if (!(node->flags() & NodeMightClobber))
+ return false;
+ switch (node->op()) {
+ case GetByVal:
+ case PutByValDirect:
+ case PutByVal:
+ case PutByValAlias:
+ return !byValIsPure(node);
+ case ToString:
+ switch (node->child1().useKind()) {
+ case StringObjectUse:
+ case StringOrStringObjectUse:
+ return false;
+ case CellUse:
+ case UntypedUse:
+ return true;
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ return true;
+ }
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ return true; // If by some oddity we hit this case in release build it's safer to have CSE assume the worst.
+ }
+ }
+
+ void determineReachability();
+ void resetReachability();
+
+ void resetExitStates();
+
+ unsigned varArgNumChildren(Node* node)
{
- return isCaptured(static_cast<int>(virtualRegister));
+ ASSERT(node->flags() & NodeHasVarArgs);
+ return node->numChildren();
}
- JSGlobalData& m_globalData;
+ unsigned numChildren(Node* node)
+ {
+ if (node->flags() & NodeHasVarArgs)
+ return varArgNumChildren(node);
+ return AdjacencyList::Size;
+ }
+
+ Edge& varArgChild(Node* node, unsigned index)
+ {
+ ASSERT(node->flags() & NodeHasVarArgs);
+ return m_varArgChildren[node->firstChild() + index];
+ }
+
+ Edge& child(Node* node, unsigned index)
+ {
+ if (node->flags() & NodeHasVarArgs)
+ return varArgChild(node, index);
+ return node->children.child(index);
+ }
+
+ void voteNode(Node* node, unsigned ballot, float weight = 1)
+ {
+ switch (node->op()) {
+ case ValueToInt32:
+ case UInt32ToNumber:
+ node = node->child1().node();
+ break;
+ default:
+ break;
+ }
+
+ if (node->op() == GetLocal)
+ node->variableAccessData()->vote(ballot, weight);
+ }
+
+ void voteNode(Edge edge, unsigned ballot, float weight = 1)
+ {
+ voteNode(edge.node(), ballot, weight);
+ }
+
+ void voteChildren(Node* node, unsigned ballot, float weight = 1)
+ {
+ if (node->flags() & NodeHasVarArgs) {
+ for (unsigned childIdx = node->firstChild();
+ childIdx < node->firstChild() + node->numChildren();
+ childIdx++) {
+ if (!!m_varArgChildren[childIdx])
+ voteNode(m_varArgChildren[childIdx], ballot, weight);
+ }
+ return;
+ }
+
+ if (!node->child1())
+ return;
+ voteNode(node->child1(), ballot, weight);
+ if (!node->child2())
+ return;
+ voteNode(node->child2(), ballot, weight);
+ if (!node->child3())
+ return;
+ voteNode(node->child3(), ballot, weight);
+ }
+
+ template<typename T> // T = Node* or Edge
+ void substitute(BasicBlock& block, unsigned startIndexInBlock, T oldThing, T newThing)
+ {
+ for (unsigned indexInBlock = startIndexInBlock; indexInBlock < block.size(); ++indexInBlock) {
+ Node* node = block[indexInBlock];
+ if (node->flags() & NodeHasVarArgs) {
+ for (unsigned childIdx = node->firstChild(); childIdx < node->firstChild() + node->numChildren(); ++childIdx) {
+ if (!!m_varArgChildren[childIdx])
+ compareAndSwap(m_varArgChildren[childIdx], oldThing, newThing);
+ }
+ continue;
+ }
+ if (!node->child1())
+ continue;
+ compareAndSwap(node->children.child1(), oldThing, newThing);
+ if (!node->child2())
+ continue;
+ compareAndSwap(node->children.child2(), oldThing, newThing);
+ if (!node->child3())
+ continue;
+ compareAndSwap(node->children.child3(), oldThing, newThing);
+ }
+ }
+
+ // Use this if you introduce a new GetLocal and you know that you introduced it *before*
+ // any GetLocals in the basic block.
+ // FIXME: it may be appropriate, in the future, to generalize this to handle GetLocals
+ // introduced anywhere in the basic block.
+ void substituteGetLocal(BasicBlock& block, unsigned startIndexInBlock, VariableAccessData* variableAccessData, Node* newGetLocal);
+
+ void invalidateCFG();
+
+ void clearFlagsOnAllNodes(NodeFlags);
+
+ void clearReplacements();
+ void initializeNodeOwners();
+
+ void getBlocksInDepthFirstOrder(Vector<BasicBlock*>& result);
+
+ Profiler::Compilation* compilation() { return m_plan.compilation.get(); }
+
+ DesiredIdentifiers& identifiers() { return m_plan.identifiers; }
+ DesiredWatchpoints& watchpoints() { return m_plan.watchpoints; }
+ DesiredStructureChains& chains() { return m_plan.chains; }
+
+ FullBytecodeLiveness& livenessFor(CodeBlock*);
+ FullBytecodeLiveness& livenessFor(InlineCallFrame*);
+ bool isLiveInBytecode(VirtualRegister, CodeOrigin);
+
+ unsigned frameRegisterCount();
+ unsigned stackPointerOffset();
+ unsigned requiredRegisterCountForExit();
+ unsigned requiredRegisterCountForExecutionAndExit();
+
+ JSActivation* tryGetActivation(Node*);
+ WriteBarrierBase<Unknown>* tryGetRegisters(Node*);
+
+ JSArrayBufferView* tryGetFoldableView(Node*);
+ JSArrayBufferView* tryGetFoldableView(Node*, ArrayMode);
+ JSArrayBufferView* tryGetFoldableViewForChild1(Node*);
+
+ virtual void visitChildren(SlotVisitor&) override;
+
+ VM& m_vm;
+ Plan& m_plan;
CodeBlock* m_codeBlock;
CodeBlock* m_profiledBlock;
+
+ NodeAllocator& m_allocator;
- Vector< OwnPtr<BasicBlock> , 8> m_blocks;
+ Operands<AbstractValue> m_mustHandleAbstractValues;
+
+ Vector< RefPtr<BasicBlock> , 8> m_blocks;
Vector<Edge, 16> m_varArgChildren;
Vector<StorageAccessData> m_storageAccessData;
- Vector<ResolveGlobalData> m_resolveGlobalData;
- Vector<NodeIndex, 8> m_arguments;
+ Vector<Node*, 8> m_arguments;
SegmentedVector<VariableAccessData, 16> m_variableAccessData;
SegmentedVector<ArgumentPosition, 8> m_argumentPositions;
SegmentedVector<StructureSet, 16> m_structureSet;
SegmentedVector<StructureTransitionData, 8> m_structureTransitionData;
- BitVector m_preservedVars;
+ SegmentedVector<NewArrayBufferData, 4> m_newArrayBufferData;
+ Bag<BranchData> m_branchData;
+ Bag<SwitchData> m_switchData;
+ Bag<MultiGetByOffsetData> m_multiGetByOffsetData;
+ Bag<MultiPutByOffsetData> m_multiPutByOffsetData;
+ Vector<InlineVariableData, 4> m_inlineVariableData;
+ HashMap<CodeBlock*, std::unique_ptr<FullBytecodeLiveness>> m_bytecodeLiveness;
+ bool m_hasArguments;
+ HashSet<ExecutableBase*> m_executablesWhoseArgumentsEscaped;
+ BitVector m_lazyVars;
+ Dominators m_dominators;
+ NaturalLoops m_naturalLoops;
unsigned m_localVars;
+ unsigned m_nextMachineLocal;
unsigned m_parameterSlots;
+ int m_machineCaptureStart;
+ std::unique_ptr<SlowArgument[]> m_slowArguments;
+
+#if USE(JSVALUE32_64)
+ std::unordered_map<int64_t, double*> m_doubleConstantsMap;
+ std::unique_ptr<Bag<double>> m_doubleConstants;
+#endif
+
+ OptimizationFixpointState m_fixpointState;
+ GraphForm m_form;
+ UnificationState m_unificationState;
+ RefCountState m_refCountState;
private:
- bool addImmediateShouldSpeculateInteger(Node& add, Node& variable, Node& immediate)
+ void handleSuccessor(Vector<BasicBlock*, 16>& worklist, BasicBlock*, BasicBlock* successor);
+ void addForDepthFirstSort(Vector<BasicBlock*>& result, Vector<BasicBlock*, 16>& worklist, HashSet<BasicBlock*>& seen, BasicBlock*);
+
+ AddSpeculationMode addImmediateShouldSpeculateInt32(Node* add, bool variableShouldSpeculateInt32, Node* immediate, RareCaseProfilingSource source)
{
- ASSERT(immediate.hasConstant());
+ ASSERT(immediate->hasConstant());
- JSValue immediateValue = immediate.valueOfJSConstant(m_codeBlock);
- if (!immediateValue.isNumber())
- return false;
+ JSValue immediateValue = immediate->valueOfJSConstant(m_codeBlock);
+ if (!immediateValue.isNumber() && !immediateValue.isBoolean())
+ return DontSpeculateInt32;
- if (!variable.shouldSpeculateInteger())
- return false;
+ if (!variableShouldSpeculateInt32)
+ return DontSpeculateInt32;
- if (immediateValue.isInt32())
- return add.canSpeculateInteger();
+ if (immediateValue.isInt32() || immediateValue.isBoolean())
+ return add->canSpeculateInt32(source) ? SpeculateInt32 : DontSpeculateInt32;
double doubleImmediate = immediateValue.asDouble();
const double twoToThe48 = 281474976710656.0;
if (doubleImmediate < -twoToThe48 || doubleImmediate > twoToThe48)
- return false;
+ return DontSpeculateInt32;
- return nodeCanTruncateInteger(add.arithNodeFlags());
- }
-
- // When a node's refCount goes from 0 to 1, it must (logically) recursively ref all of its children, and vice versa.
- void refChildren(NodeIndex);
- void derefChildren(NodeIndex);
-};
-
-class GetBytecodeBeginForBlock {
-public:
- GetBytecodeBeginForBlock(Graph& graph)
- : m_graph(graph)
- {
+ return bytecodeCanTruncateInteger(add->arithNodeFlags()) ? SpeculateInt32AndTruncateConstants : DontSpeculateInt32;
}
-
- unsigned operator()(BlockIndex* blockIndex) const
- {
- return m_graph.m_blocks[*blockIndex]->bytecodeBegin;
- }
-
-private:
- Graph& m_graph;
};
-inline BlockIndex Graph::blockIndexForBytecodeOffset(Vector<BlockIndex>& linkingTargets, unsigned bytecodeBegin)
-{
- return *WTF::binarySearchWithFunctor<BlockIndex, unsigned>(linkingTargets.begin(), linkingTargets.size(), bytecodeBegin, WTF::KeyMustBePresentInArray, GetBytecodeBeginForBlock(*this));
-}
+#define DFG_NODE_DO_TO_CHILDREN(graph, node, thingToDo) do { \
+ Node* _node = (node); \
+ if (_node->flags() & NodeHasVarArgs) { \
+ for (unsigned _childIdx = _node->firstChild(); \
+ _childIdx < _node->firstChild() + _node->numChildren(); \
+ _childIdx++) { \
+ if (!!(graph).m_varArgChildren[_childIdx]) \
+ thingToDo(_node, (graph).m_varArgChildren[_childIdx]); \
+ } \
+ } else { \
+ if (!_node->child1()) { \
+ ASSERT( \
+ !_node->child2() \
+ && !_node->child3()); \
+ break; \
+ } \
+ thingToDo(_node, _node->child1()); \
+ \
+ if (!_node->child2()) { \
+ ASSERT(!_node->child3()); \
+ break; \
+ } \
+ thingToDo(_node, _node->child2()); \
+ \
+ if (!_node->child3()) \
+ break; \
+ thingToDo(_node, _node->child3()); \
+ } \
+ } while (false)
} } // namespace JSC::DFG