/*
- * Copyright (C) 2011 Apple Inc. All rights reserved.
+ * Copyright (C) 2011, 2012, 2013, 2014 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
#if ENABLE(DFG_JIT)
-#include <RegisterFile.h>
-#include <dfg/DFGNode.h>
+#include "AssemblyHelpers.h"
+#include "CodeBlock.h"
+#include "DFGArgumentPosition.h"
+#include "DFGBasicBlock.h"
+#include "DFGDominators.h"
+#include "DFGLongLivedState.h"
+#include "DFGNaturalLoops.h"
+#include "DFGNode.h"
+#include "DFGNodeAllocator.h"
+#include "DFGPlan.h"
+#include "DFGScannable.h"
+#include "JSStack.h"
+#include "MethodOfGettingAValueProfile.h"
+#include <unordered_map>
+#include <wtf/BitVector.h>
+#include <wtf/HashMap.h>
#include <wtf/Vector.h>
#include <wtf/StdLibExtras.h>
namespace JSC {
class CodeBlock;
+class ExecState;
namespace DFG {
-// helper function to distinguish vars & temporaries from arguments.
-inline bool operandIsArgument(int operand) { return operand < 0; }
+struct StorageAccessData {
+ PropertyOffset offset;
+ unsigned identifierNumber;
+};
+
+struct InlineVariableData {
+ InlineCallFrame* inlineCallFrame;
+ unsigned argumentPositionStart;
+ VariableAccessData* calleeVariable;
+};
-typedef uint8_t PredictedType;
-static const PredictedType PredictNone = 0;
-static const PredictedType PredictCell = 0x01;
-static const PredictedType PredictArray = 0x03;
-static const PredictedType PredictInt32 = 0x04;
+enum AddSpeculationMode {
+ DontSpeculateInt32,
+ SpeculateInt32AndTruncateConstants,
+ SpeculateInt32
+};
-struct PredictionSlot {
+//
+// === Graph ===
+//
+// The order may be significant for nodes with side-effects (property accesses, value conversions).
+// Nodes that are 'dead' remain in the vector with refCount 0.
+class Graph : public virtual Scannable {
public:
- PredictionSlot()
- : m_value(PredictNone)
+ Graph(VM&, Plan&, LongLivedState&);
+ ~Graph();
+
+ void changeChild(Edge& edge, Node* newNode)
{
+ edge.setNode(newNode);
}
- PredictedType m_value;
-};
-
-typedef uint32_t BlockIndex;
-
-// For every local variable we track any existing get or set of the value.
-// We track the get so that these may be shared, and we track the set to
-// retrieve the current value, and to reference the final definition.
-struct VariableRecord {
- VariableRecord()
- : value(NoNode)
+
+ void changeEdge(Edge& edge, Edge newEdge)
{
+ edge = newEdge;
}
-
- NodeIndex value;
-};
-
-typedef Vector <BlockIndex, 2> PredecessorList;
-
-struct BasicBlock {
- BasicBlock(unsigned bytecodeBegin, NodeIndex begin, unsigned numArguments, unsigned numLocals)
- : bytecodeBegin(bytecodeBegin)
- , begin(begin)
- , end(NoNode)
- , m_arguments(numArguments)
- , m_locals(numLocals)
+
+ void compareAndSwap(Edge& edge, Node* oldNode, Node* newNode)
+ {
+ if (edge.node() != oldNode)
+ return;
+ changeChild(edge, newNode);
+ }
+
+ void compareAndSwap(Edge& edge, Edge oldEdge, Edge newEdge)
+ {
+ if (edge != oldEdge)
+ return;
+ changeEdge(edge, newEdge);
+ }
+
+ void performSubstitution(Node* node)
+ {
+ if (node->flags() & NodeHasVarArgs) {
+ for (unsigned childIdx = node->firstChild(); childIdx < node->firstChild() + node->numChildren(); childIdx++)
+ performSubstitutionForEdge(m_varArgChildren[childIdx]);
+ } else {
+ performSubstitutionForEdge(node->child1());
+ performSubstitutionForEdge(node->child2());
+ performSubstitutionForEdge(node->child3());
+ }
+ }
+
+ void performSubstitutionForEdge(Edge& child)
{
+ // Check if this operand is actually unused.
+ if (!child)
+ return;
+
+ // Check if there is any replacement.
+ Node* replacement = child->misc.replacement;
+ if (!replacement)
+ return;
+
+ child.setNode(replacement);
+
+ // There is definitely a replacement. Assert that the replacement does not
+ // have a replacement.
+ ASSERT(!child->misc.replacement);
+ }
+
+ template<typename... Params>
+ Node* addNode(SpeculatedType type, Params... params)
+ {
+ Node* node = new (m_allocator) Node(params...);
+ node->predict(type);
+ return node;
}
- static inline BlockIndex getBytecodeBegin(OwnPtr<BasicBlock>* block)
+ void dethread();
+
+ void convertToConstant(Node* node, unsigned constantNumber)
+ {
+ if (node->op() == GetLocal)
+ dethread();
+ else
+ ASSERT(!node->hasVariableAccessData(*this));
+ node->convertToConstant(constantNumber);
+ }
+
+ unsigned constantRegisterForConstant(JSValue value)
{
- return (*block)->bytecodeBegin;
+ unsigned constantRegister;
+ if (!m_codeBlock->findConstant(value, constantRegister)) {
+ constantRegister = m_codeBlock->addConstantLazily();
+ initializeLazyWriteBarrierForConstant(
+ m_plan.writeBarriers,
+ m_codeBlock->constants()[constantRegister],
+ m_codeBlock,
+ constantRegister,
+ m_codeBlock->ownerExecutable(),
+ value);
+ }
+ return constantRegister;
+ }
+
+ void convertToConstant(Node* node, JSValue value)
+ {
+ if (value.isObject())
+ node->convertToWeakConstant(value.asCell());
+ else
+ convertToConstant(node, constantRegisterForConstant(value));
}
- unsigned bytecodeBegin;
- NodeIndex begin;
- NodeIndex end;
+ // CodeBlock is optional, but may allow additional information to be dumped (e.g. Identifier names).
+ void dump(PrintStream& = WTF::dataFile(), DumpContext* = 0);
+ enum PhiNodeDumpMode { DumpLivePhisOnly, DumpAllPhis };
+ void dumpBlockHeader(PrintStream&, const char* prefix, BasicBlock*, PhiNodeDumpMode, DumpContext*);
+ void dump(PrintStream&, Edge);
+ void dump(PrintStream&, const char* prefix, Node*, DumpContext* = 0);
+ static int amountOfNodeWhiteSpace(Node*);
+ static void printNodeWhiteSpace(PrintStream&, Node*);
- PredecessorList m_predecessors;
- Vector <VariableRecord, 8> m_arguments;
- Vector <VariableRecord, 16> m_locals;
-};
+ // Dump the code origin of the given node as a diff from the code origin of the
+ // preceding node. Returns true if anything was printed.
+ bool dumpCodeOrigin(PrintStream&, const char* prefix, Node* previousNode, Node* currentNode, DumpContext*);
-//
-// === Graph ===
-//
-// The dataflow graph is an ordered vector of nodes.
-// The order may be significant for nodes with side-effects (property accesses, value conversions).
-// Nodes that are 'dead' remain in the vector with refCount 0.
-class Graph : public Vector<Node, 64> {
-public:
- Graph(unsigned numArguments, unsigned numVariables)
- : m_argumentPredictions(numArguments)
- , m_variablePredictions(numVariables)
+ SpeculatedType getJSConstantSpeculation(Node* node)
{
+ return speculationFromValue(node->valueOfJSConstant(m_codeBlock));
}
-
- // Mark a node as being referenced.
- void ref(NodeIndex nodeIndex)
+
+ AddSpeculationMode addSpeculationMode(Node* add, bool leftShouldSpeculateInt32, bool rightShouldSpeculateInt32, PredictionPass pass)
+ {
+ ASSERT(add->op() == ValueAdd || add->op() == ArithAdd || add->op() == ArithSub);
+
+ RareCaseProfilingSource source = add->sourceFor(pass);
+
+ Node* left = add->child1().node();
+ Node* right = add->child2().node();
+
+ if (left->hasConstant())
+ return addImmediateShouldSpeculateInt32(add, rightShouldSpeculateInt32, left, source);
+ if (right->hasConstant())
+ return addImmediateShouldSpeculateInt32(add, leftShouldSpeculateInt32, right, source);
+
+ return (leftShouldSpeculateInt32 && rightShouldSpeculateInt32 && add->canSpeculateInt32(source)) ? SpeculateInt32 : DontSpeculateInt32;
+ }
+
+ AddSpeculationMode valueAddSpeculationMode(Node* add, PredictionPass pass)
+ {
+ return addSpeculationMode(
+ add,
+ add->child1()->shouldSpeculateInt32OrBooleanExpectingDefined(),
+ add->child2()->shouldSpeculateInt32OrBooleanExpectingDefined(),
+ pass);
+ }
+
+ AddSpeculationMode arithAddSpeculationMode(Node* add, PredictionPass pass)
+ {
+ return addSpeculationMode(
+ add,
+ add->child1()->shouldSpeculateInt32OrBooleanForArithmetic(),
+ add->child2()->shouldSpeculateInt32OrBooleanForArithmetic(),
+ pass);
+ }
+
+ AddSpeculationMode addSpeculationMode(Node* add, PredictionPass pass)
+ {
+ if (add->op() == ValueAdd)
+ return valueAddSpeculationMode(add, pass);
+
+ return arithAddSpeculationMode(add, pass);
+ }
+
+ bool addShouldSpeculateInt32(Node* add, PredictionPass pass)
{
- Node& node = at(nodeIndex);
- // If the value (before incrementing) was at refCount zero then we need to ref its children.
- if (node.ref())
- refChildren(nodeIndex);
+ return addSpeculationMode(add, pass) != DontSpeculateInt32;
}
+
+ bool addShouldSpeculateMachineInt(Node* add)
+ {
+ if (!enableInt52())
+ return false;
+
+ Node* left = add->child1().node();
+ Node* right = add->child2().node();
-#ifndef NDEBUG
- // CodeBlock is optional, but may allow additional information to be dumped (e.g. Identifier names).
- void dump(CodeBlock* = 0);
- void dump(NodeIndex, CodeBlock* = 0);
-#endif
+ bool speculation;
+ if (add->op() == ValueAdd)
+ speculation = Node::shouldSpeculateMachineInt(left, right);
+ else
+ speculation = Node::shouldSpeculateMachineInt(left, right);
- BlockIndex blockIndexForBytecodeOffset(unsigned bytecodeBegin)
+ return speculation && !hasExitSite(add, Int52Overflow);
+ }
+
+ bool mulShouldSpeculateInt32(Node* mul, PredictionPass pass)
{
- OwnPtr<BasicBlock>* begin = m_blocks.begin();
- OwnPtr<BasicBlock>* block = binarySearch<OwnPtr<BasicBlock>, unsigned, BasicBlock::getBytecodeBegin>(begin, m_blocks.size(), bytecodeBegin);
- ASSERT(block >= m_blocks.begin() && block < m_blocks.end());
- return static_cast<BlockIndex>(block - begin);
+ ASSERT(mul->op() == ArithMul);
+
+ Node* left = mul->child1().node();
+ Node* right = mul->child2().node();
+
+ return Node::shouldSpeculateInt32OrBooleanForArithmetic(left, right)
+ && mul->canSpeculateInt32(mul->sourceFor(pass));
}
+
+ bool mulShouldSpeculateMachineInt(Node* mul, PredictionPass pass)
+ {
+ ASSERT(mul->op() == ArithMul);
+
+ if (!enableInt52())
+ return false;
+
+ Node* left = mul->child1().node();
+ Node* right = mul->child2().node();
- BasicBlock& blockForBytecodeOffset(unsigned bytecodeBegin)
+ return Node::shouldSpeculateMachineInt(left, right)
+ && mul->canSpeculateInt52(pass)
+ && !hasExitSite(mul, Int52Overflow);
+ }
+
+ bool negateShouldSpeculateInt32(Node* negate, PredictionPass pass)
{
- return *m_blocks[blockIndexForBytecodeOffset(bytecodeBegin)];
+ ASSERT(negate->op() == ArithNegate);
+ return negate->child1()->shouldSpeculateInt32OrBooleanForArithmetic()
+ && negate->canSpeculateInt32(pass);
}
-
- void predict(int operand, PredictedType prediction)
+
+ bool negateShouldSpeculateMachineInt(Node* negate, PredictionPass pass)
+ {
+ ASSERT(negate->op() == ArithNegate);
+ if (!enableInt52())
+ return false;
+ return negate->child1()->shouldSpeculateMachineInt()
+ && !hasExitSite(negate, Int52Overflow)
+ && negate->canSpeculateInt52(pass);
+ }
+
+ VirtualRegister bytecodeRegisterForArgument(CodeOrigin codeOrigin, int argument)
+ {
+ return VirtualRegister(
+ codeOrigin.inlineCallFrame->stackOffset +
+ baselineCodeBlockFor(codeOrigin)->argumentIndexAfterCapture(argument));
+ }
+
+ // Helper methods to check nodes for constants.
+ bool isConstant(Node* node)
{
- if (operandIsArgument(operand)) {
- unsigned argument = operand + m_argumentPredictions.size() + RegisterFile::CallFrameHeaderSize;
- m_argumentPredictions[argument].m_value |= prediction;
- } else if ((unsigned)operand < m_variablePredictions.size())
- m_variablePredictions[operand].m_value |= prediction;
-
+ return node->hasConstant();
+ }
+ bool isJSConstant(Node* node)
+ {
+ return node->hasConstant();
+ }
+ bool isInt32Constant(Node* node)
+ {
+ return node->isInt32Constant(m_codeBlock);
+ }
+ bool isDoubleConstant(Node* node)
+ {
+ return node->isDoubleConstant(m_codeBlock);
+ }
+ bool isNumberConstant(Node* node)
+ {
+ return node->isNumberConstant(m_codeBlock);
+ }
+ bool isMachineIntConstant(Node* node)
+ {
+ return node->isMachineIntConstant(m_codeBlock);
+ }
+ bool isBooleanConstant(Node* node)
+ {
+ return node->isBooleanConstant(m_codeBlock);
+ }
+ bool isCellConstant(Node* node)
+ {
+ if (!isJSConstant(node))
+ return false;
+ JSValue value = valueOfJSConstant(node);
+ return value.isCell() && !!value;
+ }
+ bool isFunctionConstant(Node* node)
+ {
+ if (!isJSConstant(node))
+ return false;
+ if (!getJSFunction(valueOfJSConstant(node)))
+ return false;
+ return true;
+ }
+ bool isInternalFunctionConstant(Node* node)
+ {
+ if (!isJSConstant(node))
+ return false;
+ JSValue value = valueOfJSConstant(node);
+ if (!value.isCell() || !value)
+ return false;
+ JSCell* cell = value.asCell();
+ if (!cell->inherits(InternalFunction::info()))
+ return false;
+ return true;
+ }
+ // Helper methods get constant values from nodes.
+ JSValue valueOfJSConstant(Node* node)
+ {
+ return node->valueOfJSConstant(m_codeBlock);
+ }
+ int32_t valueOfInt32Constant(Node* node)
+ {
+ JSValue value = valueOfJSConstant(node);
+ if (!value.isInt32()) {
+ dataLog("Value isn't int32: ", value, "\n");
+ dump();
+ RELEASE_ASSERT_NOT_REACHED();
+ }
+ return value.asInt32();
+ }
+ double valueOfNumberConstant(Node* node)
+ {
+ return valueOfJSConstant(node).asNumber();
+ }
+ bool valueOfBooleanConstant(Node* node)
+ {
+ return valueOfJSConstant(node).asBoolean();
+ }
+ JSFunction* valueOfFunctionConstant(Node* node)
+ {
+ JSCell* function = getJSFunction(valueOfJSConstant(node));
+ ASSERT(function);
+ return jsCast<JSFunction*>(function);
}
- PredictedType getPrediction(int operand)
+ static const char *opName(NodeType);
+
+ StructureSet* addStructureSet(const StructureSet& structureSet)
+ {
+ ASSERT(structureSet.size());
+ m_structureSet.append(structureSet);
+ return &m_structureSet.last();
+ }
+
+ StructureTransitionData* addStructureTransitionData(const StructureTransitionData& structureTransitionData)
+ {
+ m_structureTransitionData.append(structureTransitionData);
+ return &m_structureTransitionData.last();
+ }
+
+ JSGlobalObject* globalObjectFor(CodeOrigin codeOrigin)
{
- if (operandIsArgument(operand)) {
- unsigned argument = operand + m_argumentPredictions.size() + RegisterFile::CallFrameHeaderSize;
- return m_argumentPredictions[argument].m_value;
+ return m_codeBlock->globalObjectFor(codeOrigin);
+ }
+
+ JSObject* globalThisObjectFor(CodeOrigin codeOrigin)
+ {
+ JSGlobalObject* object = globalObjectFor(codeOrigin);
+ return jsCast<JSObject*>(object->methodTable()->toThis(object, object->globalExec(), NotStrictMode));
+ }
+
+ ScriptExecutable* executableFor(InlineCallFrame* inlineCallFrame)
+ {
+ if (!inlineCallFrame)
+ return m_codeBlock->ownerExecutable();
+
+ return inlineCallFrame->executable.get();
+ }
+
+ ScriptExecutable* executableFor(const CodeOrigin& codeOrigin)
+ {
+ return executableFor(codeOrigin.inlineCallFrame);
+ }
+
+ CodeBlock* baselineCodeBlockFor(InlineCallFrame* inlineCallFrame)
+ {
+ if (!inlineCallFrame)
+ return m_profiledBlock;
+ return baselineCodeBlockForInlineCallFrame(inlineCallFrame);
+ }
+
+ CodeBlock* baselineCodeBlockFor(const CodeOrigin& codeOrigin)
+ {
+ return baselineCodeBlockForOriginAndBaselineCodeBlock(codeOrigin, m_profiledBlock);
+ }
+
+ bool isStrictModeFor(CodeOrigin codeOrigin)
+ {
+ if (!codeOrigin.inlineCallFrame)
+ return m_codeBlock->isStrictMode();
+ return jsCast<FunctionExecutable*>(codeOrigin.inlineCallFrame->executable.get())->isStrictMode();
+ }
+
+ ECMAMode ecmaModeFor(CodeOrigin codeOrigin)
+ {
+ return isStrictModeFor(codeOrigin) ? StrictMode : NotStrictMode;
+ }
+
+ bool masqueradesAsUndefinedWatchpointIsStillValid(const CodeOrigin& codeOrigin)
+ {
+ return m_plan.watchpoints.isStillValid(
+ globalObjectFor(codeOrigin)->masqueradesAsUndefinedWatchpoint());
+ }
+
+ bool hasGlobalExitSite(const CodeOrigin& codeOrigin, ExitKind exitKind)
+ {
+ return baselineCodeBlockFor(codeOrigin)->hasExitSite(FrequentExitSite(exitKind));
+ }
+
+ bool hasExitSite(const CodeOrigin& codeOrigin, ExitKind exitKind)
+ {
+ return baselineCodeBlockFor(codeOrigin)->hasExitSite(FrequentExitSite(codeOrigin.bytecodeIndex, exitKind));
+ }
+
+ bool hasExitSite(Node* node, ExitKind exitKind)
+ {
+ return hasExitSite(node->origin.semantic, exitKind);
+ }
+
+ bool usesArguments(InlineCallFrame* inlineCallFrame)
+ {
+ if (!inlineCallFrame)
+ return m_profiledBlock->usesArguments();
+
+ return baselineCodeBlockForInlineCallFrame(inlineCallFrame)->usesArguments();
+ }
+
+ VirtualRegister argumentsRegisterFor(InlineCallFrame* inlineCallFrame)
+ {
+ if (!inlineCallFrame)
+ return m_profiledBlock->argumentsRegister();
+
+ return VirtualRegister(baselineCodeBlockForInlineCallFrame(
+ inlineCallFrame)->argumentsRegister().offset() +
+ inlineCallFrame->stackOffset);
+ }
+
+ VirtualRegister argumentsRegisterFor(const CodeOrigin& codeOrigin)
+ {
+ return argumentsRegisterFor(codeOrigin.inlineCallFrame);
+ }
+
+ VirtualRegister machineArgumentsRegisterFor(InlineCallFrame* inlineCallFrame)
+ {
+ if (!inlineCallFrame)
+ return m_codeBlock->argumentsRegister();
+
+ return inlineCallFrame->argumentsRegister;
+ }
+
+ VirtualRegister machineArgumentsRegisterFor(const CodeOrigin& codeOrigin)
+ {
+ return machineArgumentsRegisterFor(codeOrigin.inlineCallFrame);
+ }
+
+ VirtualRegister uncheckedArgumentsRegisterFor(InlineCallFrame* inlineCallFrame)
+ {
+ if (!inlineCallFrame)
+ return m_profiledBlock->uncheckedArgumentsRegister();
+
+ CodeBlock* codeBlock = baselineCodeBlockForInlineCallFrame(inlineCallFrame);
+ if (!codeBlock->usesArguments())
+ return VirtualRegister();
+
+ return VirtualRegister(codeBlock->argumentsRegister().offset() +
+ inlineCallFrame->stackOffset);
+ }
+
+ VirtualRegister uncheckedArgumentsRegisterFor(const CodeOrigin& codeOrigin)
+ {
+ return uncheckedArgumentsRegisterFor(codeOrigin.inlineCallFrame);
+ }
+
+ VirtualRegister activationRegister()
+ {
+ return m_profiledBlock->activationRegister();
+ }
+
+ VirtualRegister uncheckedActivationRegister()
+ {
+ return m_profiledBlock->uncheckedActivationRegister();
+ }
+
+ VirtualRegister machineActivationRegister()
+ {
+ return m_profiledBlock->activationRegister();
+ }
+
+ VirtualRegister uncheckedMachineActivationRegister()
+ {
+ return m_profiledBlock->uncheckedActivationRegister();
+ }
+
+ ValueProfile* valueProfileFor(Node* node)
+ {
+ if (!node)
+ return 0;
+
+ CodeBlock* profiledBlock = baselineCodeBlockFor(node->origin.semantic);
+
+ if (node->op() == GetArgument)
+ return profiledBlock->valueProfileForArgument(node->local().toArgument());
+
+ if (node->hasLocal(*this)) {
+ if (m_form == SSA)
+ return 0;
+ if (!node->local().isArgument())
+ return 0;
+ int argument = node->local().toArgument();
+ if (node->variableAccessData() != m_arguments[argument]->variableAccessData())
+ return 0;
+ return profiledBlock->valueProfileForArgument(argument);
+ }
+
+ if (node->hasHeapPrediction())
+ return profiledBlock->valueProfileForBytecodeOffset(node->origin.semantic.bytecodeIndex);
+
+ return 0;
+ }
+
+ MethodOfGettingAValueProfile methodOfGettingAValueProfileFor(Node* node)
+ {
+ if (!node)
+ return MethodOfGettingAValueProfile();
+
+ CodeBlock* profiledBlock = baselineCodeBlockFor(node->origin.semantic);
+
+ if (node->op() == GetLocal) {
+ return MethodOfGettingAValueProfile::fromLazyOperand(
+ profiledBlock,
+ LazyOperandValueProfileKey(
+ node->origin.semantic.bytecodeIndex, node->local()));
}
- if ((unsigned)operand < m_variablePredictions.size())
- return m_variablePredictions[operand].m_value;
- return PredictNone;
+
+ return MethodOfGettingAValueProfile(valueProfileFor(node));
+ }
+
+ bool usesArguments() const
+ {
+ return m_codeBlock->usesArguments();
}
+
+ BlockIndex numBlocks() const { return m_blocks.size(); }
+ BasicBlock* block(BlockIndex blockIndex) const { return m_blocks[blockIndex].get(); }
+ BasicBlock* lastBlock() const { return block(numBlocks() - 1); }
- Vector< OwnPtr<BasicBlock> , 8> m_blocks;
-private:
+ void appendBlock(PassRefPtr<BasicBlock> basicBlock)
+ {
+ basicBlock->index = m_blocks.size();
+ m_blocks.append(basicBlock);
+ }
+
+ void killBlock(BlockIndex blockIndex)
+ {
+ m_blocks[blockIndex].clear();
+ }
+
+ void killBlock(BasicBlock* basicBlock)
+ {
+ killBlock(basicBlock->index);
+ }
+
+ void killBlockAndItsContents(BasicBlock*);
+
+ void killUnreachableBlocks();
+
+ bool isPredictedNumerical(Node* node)
+ {
+ return isNumerical(node->child1().useKind()) && isNumerical(node->child2().useKind());
+ }
+
+ // Note that a 'true' return does not actually mean that the ByVal access clobbers nothing.
+ // It really means that it will not clobber the entire world. It's still up to you to
+ // carefully consider things like:
+ // - PutByVal definitely changes the array it stores to, and may even change its length.
+ // - PutByOffset definitely changes the object it stores to.
+ // - and so on.
+ bool byValIsPure(Node* node)
+ {
+ switch (node->arrayMode().type()) {
+ case Array::Generic:
+ return false;
+ case Array::Int32:
+ case Array::Double:
+ case Array::Contiguous:
+ case Array::ArrayStorage:
+ return !node->arrayMode().isOutOfBounds();
+ case Array::SlowPutArrayStorage:
+ return !node->arrayMode().mayStoreToHole();
+ case Array::String:
+ return node->op() == GetByVal && node->arrayMode().isInBounds();
+#if USE(JSVALUE32_64)
+ case Array::Arguments:
+ if (node->op() == GetByVal)
+ return true;
+ return false;
+#endif // USE(JSVALUE32_64)
+ default:
+ return true;
+ }
+ }
+
+ bool clobbersWorld(Node* node)
+ {
+ if (node->flags() & NodeClobbersWorld)
+ return true;
+ if (!(node->flags() & NodeMightClobber))
+ return false;
+ switch (node->op()) {
+ case GetByVal:
+ case PutByValDirect:
+ case PutByVal:
+ case PutByValAlias:
+ return !byValIsPure(node);
+ case ToString:
+ switch (node->child1().useKind()) {
+ case StringObjectUse:
+ case StringOrStringObjectUse:
+ return false;
+ case CellUse:
+ case UntypedUse:
+ return true;
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ return true;
+ }
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ return true; // If by some oddity we hit this case in release build it's safer to have CSE assume the worst.
+ }
+ }
+
+ void determineReachability();
+ void resetReachability();
+
+ void resetExitStates();
+
+ unsigned varArgNumChildren(Node* node)
+ {
+ ASSERT(node->flags() & NodeHasVarArgs);
+ return node->numChildren();
+ }
+
+ unsigned numChildren(Node* node)
+ {
+ if (node->flags() & NodeHasVarArgs)
+ return varArgNumChildren(node);
+ return AdjacencyList::Size;
+ }
+
+ Edge& varArgChild(Node* node, unsigned index)
+ {
+ ASSERT(node->flags() & NodeHasVarArgs);
+ return m_varArgChildren[node->firstChild() + index];
+ }
+
+ Edge& child(Node* node, unsigned index)
+ {
+ if (node->flags() & NodeHasVarArgs)
+ return varArgChild(node, index);
+ return node->children.child(index);
+ }
+
+ void voteNode(Node* node, unsigned ballot, float weight = 1)
+ {
+ switch (node->op()) {
+ case ValueToInt32:
+ case UInt32ToNumber:
+ node = node->child1().node();
+ break;
+ default:
+ break;
+ }
+
+ if (node->op() == GetLocal)
+ node->variableAccessData()->vote(ballot, weight);
+ }
+
+ void voteNode(Edge edge, unsigned ballot, float weight = 1)
+ {
+ voteNode(edge.node(), ballot, weight);
+ }
+
+ void voteChildren(Node* node, unsigned ballot, float weight = 1)
+ {
+ if (node->flags() & NodeHasVarArgs) {
+ for (unsigned childIdx = node->firstChild();
+ childIdx < node->firstChild() + node->numChildren();
+ childIdx++) {
+ if (!!m_varArgChildren[childIdx])
+ voteNode(m_varArgChildren[childIdx], ballot, weight);
+ }
+ return;
+ }
+
+ if (!node->child1())
+ return;
+ voteNode(node->child1(), ballot, weight);
+ if (!node->child2())
+ return;
+ voteNode(node->child2(), ballot, weight);
+ if (!node->child3())
+ return;
+ voteNode(node->child3(), ballot, weight);
+ }
+
+ template<typename T> // T = Node* or Edge
+ void substitute(BasicBlock& block, unsigned startIndexInBlock, T oldThing, T newThing)
+ {
+ for (unsigned indexInBlock = startIndexInBlock; indexInBlock < block.size(); ++indexInBlock) {
+ Node* node = block[indexInBlock];
+ if (node->flags() & NodeHasVarArgs) {
+ for (unsigned childIdx = node->firstChild(); childIdx < node->firstChild() + node->numChildren(); ++childIdx) {
+ if (!!m_varArgChildren[childIdx])
+ compareAndSwap(m_varArgChildren[childIdx], oldThing, newThing);
+ }
+ continue;
+ }
+ if (!node->child1())
+ continue;
+ compareAndSwap(node->children.child1(), oldThing, newThing);
+ if (!node->child2())
+ continue;
+ compareAndSwap(node->children.child2(), oldThing, newThing);
+ if (!node->child3())
+ continue;
+ compareAndSwap(node->children.child3(), oldThing, newThing);
+ }
+ }
+
+ // Use this if you introduce a new GetLocal and you know that you introduced it *before*
+ // any GetLocals in the basic block.
+ // FIXME: it may be appropriate, in the future, to generalize this to handle GetLocals
+ // introduced anywhere in the basic block.
+ void substituteGetLocal(BasicBlock& block, unsigned startIndexInBlock, VariableAccessData* variableAccessData, Node* newGetLocal);
+
+ void invalidateCFG();
+
+ void clearFlagsOnAllNodes(NodeFlags);
+
+ void clearReplacements();
+ void initializeNodeOwners();
+
+ void getBlocksInDepthFirstOrder(Vector<BasicBlock*>& result);
+
+ Profiler::Compilation* compilation() { return m_plan.compilation.get(); }
+
+ DesiredIdentifiers& identifiers() { return m_plan.identifiers; }
+ DesiredWatchpoints& watchpoints() { return m_plan.watchpoints; }
+ DesiredStructureChains& chains() { return m_plan.chains; }
+
+ FullBytecodeLiveness& livenessFor(CodeBlock*);
+ FullBytecodeLiveness& livenessFor(InlineCallFrame*);
+ bool isLiveInBytecode(VirtualRegister, CodeOrigin);
+
+ unsigned frameRegisterCount();
+ unsigned stackPointerOffset();
+ unsigned requiredRegisterCountForExit();
+ unsigned requiredRegisterCountForExecutionAndExit();
+
+ JSActivation* tryGetActivation(Node*);
+ WriteBarrierBase<Unknown>* tryGetRegisters(Node*);
+
+ JSArrayBufferView* tryGetFoldableView(Node*);
+ JSArrayBufferView* tryGetFoldableView(Node*, ArrayMode);
+ JSArrayBufferView* tryGetFoldableViewForChild1(Node*);
+
+ virtual void visitChildren(SlotVisitor&) override;
+
+ VM& m_vm;
+ Plan& m_plan;
+ CodeBlock* m_codeBlock;
+ CodeBlock* m_profiledBlock;
+
+ NodeAllocator& m_allocator;
- // When a node's refCount goes from 0 to 1, it must (logically) recursively ref all of its children, and vice versa.
- void refChildren(NodeIndex);
+ Operands<AbstractValue> m_mustHandleAbstractValues;
+
+ Vector< RefPtr<BasicBlock> , 8> m_blocks;
+ Vector<Edge, 16> m_varArgChildren;
+ Vector<StorageAccessData> m_storageAccessData;
+ Vector<Node*, 8> m_arguments;
+ SegmentedVector<VariableAccessData, 16> m_variableAccessData;
+ SegmentedVector<ArgumentPosition, 8> m_argumentPositions;
+ SegmentedVector<StructureSet, 16> m_structureSet;
+ SegmentedVector<StructureTransitionData, 8> m_structureTransitionData;
+ SegmentedVector<NewArrayBufferData, 4> m_newArrayBufferData;
+ Bag<BranchData> m_branchData;
+ Bag<SwitchData> m_switchData;
+ Bag<MultiGetByOffsetData> m_multiGetByOffsetData;
+ Bag<MultiPutByOffsetData> m_multiPutByOffsetData;
+ Vector<InlineVariableData, 4> m_inlineVariableData;
+ HashMap<CodeBlock*, std::unique_ptr<FullBytecodeLiveness>> m_bytecodeLiveness;
+ bool m_hasArguments;
+ HashSet<ExecutableBase*> m_executablesWhoseArgumentsEscaped;
+ BitVector m_lazyVars;
+ Dominators m_dominators;
+ NaturalLoops m_naturalLoops;
+ unsigned m_localVars;
+ unsigned m_nextMachineLocal;
+ unsigned m_parameterSlots;
+ int m_machineCaptureStart;
+ std::unique_ptr<SlowArgument[]> m_slowArguments;
- Vector<PredictionSlot, 16> m_argumentPredictions;
- Vector<PredictionSlot, 16> m_variablePredictions;
+#if USE(JSVALUE32_64)
+ std::unordered_map<int64_t, double*> m_doubleConstantsMap;
+ std::unique_ptr<Bag<double>> m_doubleConstants;
+#endif
+
+ OptimizationFixpointState m_fixpointState;
+ GraphForm m_form;
+ UnificationState m_unificationState;
+ RefCountState m_refCountState;
+private:
+
+ void handleSuccessor(Vector<BasicBlock*, 16>& worklist, BasicBlock*, BasicBlock* successor);
+ void addForDepthFirstSort(Vector<BasicBlock*>& result, Vector<BasicBlock*, 16>& worklist, HashSet<BasicBlock*>& seen, BasicBlock*);
+
+ AddSpeculationMode addImmediateShouldSpeculateInt32(Node* add, bool variableShouldSpeculateInt32, Node* immediate, RareCaseProfilingSource source)
+ {
+ ASSERT(immediate->hasConstant());
+
+ JSValue immediateValue = immediate->valueOfJSConstant(m_codeBlock);
+ if (!immediateValue.isNumber() && !immediateValue.isBoolean())
+ return DontSpeculateInt32;
+
+ if (!variableShouldSpeculateInt32)
+ return DontSpeculateInt32;
+
+ if (immediateValue.isInt32() || immediateValue.isBoolean())
+ return add->canSpeculateInt32(source) ? SpeculateInt32 : DontSpeculateInt32;
+
+ double doubleImmediate = immediateValue.asDouble();
+ const double twoToThe48 = 281474976710656.0;
+ if (doubleImmediate < -twoToThe48 || doubleImmediate > twoToThe48)
+ return DontSpeculateInt32;
+
+ return bytecodeCanTruncateInteger(add->arithNodeFlags()) ? SpeculateInt32AndTruncateConstants : DontSpeculateInt32;
+ }
};
+#define DFG_NODE_DO_TO_CHILDREN(graph, node, thingToDo) do { \
+ Node* _node = (node); \
+ if (_node->flags() & NodeHasVarArgs) { \
+ for (unsigned _childIdx = _node->firstChild(); \
+ _childIdx < _node->firstChild() + _node->numChildren(); \
+ _childIdx++) { \
+ if (!!(graph).m_varArgChildren[_childIdx]) \
+ thingToDo(_node, (graph).m_varArgChildren[_childIdx]); \
+ } \
+ } else { \
+ if (!_node->child1()) { \
+ ASSERT( \
+ !_node->child2() \
+ && !_node->child3()); \
+ break; \
+ } \
+ thingToDo(_node, _node->child1()); \
+ \
+ if (!_node->child2()) { \
+ ASSERT(!_node->child3()); \
+ break; \
+ } \
+ thingToDo(_node, _node->child2()); \
+ \
+ if (!_node->child3()) \
+ break; \
+ thingToDo(_node, _node->child3()); \
+ } \
+ } while (false)
+
} } // namespace JSC::DFG
#endif