2 * Copyright (C) 2011-2015 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #include "AssemblyHelpers.h"
32 #include "BytecodeLivenessAnalysisInlines.h"
33 #include "CodeBlock.h"
34 #include "DFGArgumentPosition.h"
35 #include "DFGBasicBlock.h"
36 #include "DFGDominators.h"
37 #include "DFGFrozenValue.h"
38 #include "DFGLongLivedState.h"
39 #include "DFGNaturalLoops.h"
41 #include "DFGNodeAllocator.h"
43 #include "DFGPrePostNumbering.h"
44 #include "DFGScannable.h"
45 #include "FullBytecodeLiveness.h"
47 #include "MethodOfGettingAValueProfile.h"
48 #include <unordered_map>
49 #include <wtf/BitVector.h>
50 #include <wtf/HashMap.h>
51 #include <wtf/Vector.h>
52 #include <wtf/StdLibExtras.h>
61 #define DFG_NODE_DO_TO_CHILDREN(graph, node, thingToDo) do { \
62 Node* _node = (node); \
63 if (_node->flags() & NodeHasVarArgs) { \
64 for (unsigned _childIdx = _node->firstChild(); \
65 _childIdx < _node->firstChild() + _node->numChildren(); \
67 if (!!(graph).m_varArgChildren[_childIdx]) \
68 thingToDo(_node, (graph).m_varArgChildren[_childIdx]); \
71 if (!_node->child1()) { \
74 && !_node->child3()); \
77 thingToDo(_node, _node->child1()); \
79 if (!_node->child2()) { \
80 ASSERT(!_node->child3()); \
83 thingToDo(_node, _node->child2()); \
85 if (!_node->child3()) \
87 thingToDo(_node, _node->child3()); \
91 #define DFG_ASSERT(graph, node, assertion) do { \
94 (graph).handleAssertionFailure( \
95 (node), __FILE__, __LINE__, WTF_PRETTY_FUNCTION, #assertion); \
98 #define DFG_CRASH(graph, node, reason) do { \
99 (graph).handleAssertionFailure( \
100 (node), __FILE__, __LINE__, WTF_PRETTY_FUNCTION, (reason)); \
103 struct InlineVariableData
{
104 InlineCallFrame
* inlineCallFrame
;
105 unsigned argumentPositionStart
;
106 VariableAccessData
* calleeVariable
;
109 enum AddSpeculationMode
{
111 SpeculateInt32AndTruncateConstants
,
118 // The order may be significant for nodes with side-effects (property accesses, value conversions).
119 // Nodes that are 'dead' remain in the vector with refCount 0.
120 class Graph
: public virtual Scannable
{
122 Graph(VM
&, Plan
&, LongLivedState
&);
125 void changeChild(Edge
& edge
, Node
* newNode
)
127 edge
.setNode(newNode
);
130 void changeEdge(Edge
& edge
, Edge newEdge
)
135 void compareAndSwap(Edge
& edge
, Node
* oldNode
, Node
* newNode
)
137 if (edge
.node() != oldNode
)
139 changeChild(edge
, newNode
);
142 void compareAndSwap(Edge
& edge
, Edge oldEdge
, Edge newEdge
)
146 changeEdge(edge
, newEdge
);
149 void performSubstitution(Node
* node
)
151 if (node
->flags() & NodeHasVarArgs
) {
152 for (unsigned childIdx
= node
->firstChild(); childIdx
< node
->firstChild() + node
->numChildren(); childIdx
++)
153 performSubstitutionForEdge(m_varArgChildren
[childIdx
]);
155 performSubstitutionForEdge(node
->child1());
156 performSubstitutionForEdge(node
->child2());
157 performSubstitutionForEdge(node
->child3());
161 void performSubstitutionForEdge(Edge
& child
)
163 // Check if this operand is actually unused.
167 // Check if there is any replacement.
168 Node
* replacement
= child
->replacement();
172 child
.setNode(replacement
);
174 // There is definitely a replacement. Assert that the replacement does not
175 // have a replacement.
176 ASSERT(!child
->replacement());
179 template<typename
... Params
>
180 Node
* addNode(SpeculatedType type
, Params
... params
)
182 Node
* node
= new (m_allocator
) Node(params
...);
189 FrozenValue
* freeze(JSValue
); // We use weak freezing by default.
190 FrozenValue
* freezeStrong(JSValue
); // Shorthand for freeze(value)->strengthenTo(StrongValue).
192 void convertToConstant(Node
* node
, FrozenValue
* value
);
193 void convertToConstant(Node
* node
, JSValue value
);
194 void convertToStrongConstant(Node
* node
, JSValue value
);
196 StructureRegistrationResult
registerStructure(Structure
* structure
);
197 void assertIsRegistered(Structure
* structure
);
199 // CodeBlock is optional, but may allow additional information to be dumped (e.g. Identifier names).
200 void dump(PrintStream
& = WTF::dataFile(), DumpContext
* = 0);
202 bool terminalsAreValid();
204 enum PhiNodeDumpMode
{ DumpLivePhisOnly
, DumpAllPhis
};
205 void dumpBlockHeader(PrintStream
&, const char* prefix
, BasicBlock
*, PhiNodeDumpMode
, DumpContext
*);
206 void dump(PrintStream
&, Edge
);
207 void dump(PrintStream
&, const char* prefix
, Node
*, DumpContext
* = 0);
208 static int amountOfNodeWhiteSpace(Node
*);
209 static void printNodeWhiteSpace(PrintStream
&, Node
*);
211 // Dump the code origin of the given node as a diff from the code origin of the
212 // preceding node. Returns true if anything was printed.
213 bool dumpCodeOrigin(PrintStream
&, const char* prefix
, Node
* previousNode
, Node
* currentNode
, DumpContext
*);
215 AddSpeculationMode
addSpeculationMode(Node
* add
, bool leftShouldSpeculateInt32
, bool rightShouldSpeculateInt32
, PredictionPass pass
)
217 ASSERT(add
->op() == ValueAdd
|| add
->op() == ArithAdd
|| add
->op() == ArithSub
);
219 RareCaseProfilingSource source
= add
->sourceFor(pass
);
221 Node
* left
= add
->child1().node();
222 Node
* right
= add
->child2().node();
224 if (left
->hasConstant())
225 return addImmediateShouldSpeculateInt32(add
, rightShouldSpeculateInt32
, right
, left
, source
);
226 if (right
->hasConstant())
227 return addImmediateShouldSpeculateInt32(add
, leftShouldSpeculateInt32
, left
, right
, source
);
229 return (leftShouldSpeculateInt32
&& rightShouldSpeculateInt32
&& add
->canSpeculateInt32(source
)) ? SpeculateInt32
: DontSpeculateInt32
;
232 AddSpeculationMode
valueAddSpeculationMode(Node
* add
, PredictionPass pass
)
234 return addSpeculationMode(
236 add
->child1()->shouldSpeculateInt32OrBooleanExpectingDefined(),
237 add
->child2()->shouldSpeculateInt32OrBooleanExpectingDefined(),
241 AddSpeculationMode
arithAddSpeculationMode(Node
* add
, PredictionPass pass
)
243 return addSpeculationMode(
245 add
->child1()->shouldSpeculateInt32OrBooleanForArithmetic(),
246 add
->child2()->shouldSpeculateInt32OrBooleanForArithmetic(),
250 AddSpeculationMode
addSpeculationMode(Node
* add
, PredictionPass pass
)
252 if (add
->op() == ValueAdd
)
253 return valueAddSpeculationMode(add
, pass
);
255 return arithAddSpeculationMode(add
, pass
);
258 bool addShouldSpeculateInt32(Node
* add
, PredictionPass pass
)
260 return addSpeculationMode(add
, pass
) != DontSpeculateInt32
;
263 bool addShouldSpeculateMachineInt(Node
* add
)
268 Node
* left
= add
->child1().node();
269 Node
* right
= add
->child2().node();
271 bool speculation
= Node::shouldSpeculateMachineInt(left
, right
);
272 return speculation
&& !hasExitSite(add
, Int52Overflow
);
275 bool mulShouldSpeculateInt32(Node
* mul
, PredictionPass pass
)
277 ASSERT(mul
->op() == ArithMul
);
279 Node
* left
= mul
->child1().node();
280 Node
* right
= mul
->child2().node();
282 return Node::shouldSpeculateInt32OrBooleanForArithmetic(left
, right
)
283 && mul
->canSpeculateInt32(mul
->sourceFor(pass
));
286 bool mulShouldSpeculateMachineInt(Node
* mul
, PredictionPass pass
)
288 ASSERT(mul
->op() == ArithMul
);
293 Node
* left
= mul
->child1().node();
294 Node
* right
= mul
->child2().node();
296 return Node::shouldSpeculateMachineInt(left
, right
)
297 && mul
->canSpeculateInt52(pass
)
298 && !hasExitSite(mul
, Int52Overflow
);
301 bool negateShouldSpeculateInt32(Node
* negate
, PredictionPass pass
)
303 ASSERT(negate
->op() == ArithNegate
);
304 return negate
->child1()->shouldSpeculateInt32OrBooleanForArithmetic()
305 && negate
->canSpeculateInt32(pass
);
308 bool negateShouldSpeculateMachineInt(Node
* negate
, PredictionPass pass
)
310 ASSERT(negate
->op() == ArithNegate
);
313 return negate
->child1()->shouldSpeculateMachineInt()
314 && !hasExitSite(negate
, Int52Overflow
)
315 && negate
->canSpeculateInt52(pass
);
318 bool roundShouldSpeculateInt32(Node
* arithRound
, PredictionPass pass
)
320 ASSERT(arithRound
->op() == ArithRound
);
321 return arithRound
->canSpeculateInt32(pass
) && !hasExitSite(arithRound
->origin
.semantic
, Overflow
) && !hasExitSite(arithRound
->origin
.semantic
, NegativeZero
);
324 static const char *opName(NodeType
);
326 StructureSet
* addStructureSet(const StructureSet
& structureSet
)
328 ASSERT(structureSet
.size());
329 m_structureSet
.append(structureSet
);
330 return &m_structureSet
.last();
333 JSGlobalObject
* globalObjectFor(CodeOrigin codeOrigin
)
335 return m_codeBlock
->globalObjectFor(codeOrigin
);
338 JSObject
* globalThisObjectFor(CodeOrigin codeOrigin
)
340 JSGlobalObject
* object
= globalObjectFor(codeOrigin
);
341 return jsCast
<JSObject
*>(object
->methodTable()->toThis(object
, object
->globalExec(), NotStrictMode
));
344 ScriptExecutable
* executableFor(InlineCallFrame
* inlineCallFrame
)
346 if (!inlineCallFrame
)
347 return m_codeBlock
->ownerExecutable();
349 return inlineCallFrame
->executable
.get();
352 ScriptExecutable
* executableFor(const CodeOrigin
& codeOrigin
)
354 return executableFor(codeOrigin
.inlineCallFrame
);
357 CodeBlock
* baselineCodeBlockFor(InlineCallFrame
* inlineCallFrame
)
359 if (!inlineCallFrame
)
360 return m_profiledBlock
;
361 return baselineCodeBlockForInlineCallFrame(inlineCallFrame
);
364 CodeBlock
* baselineCodeBlockFor(const CodeOrigin
& codeOrigin
)
366 return baselineCodeBlockForOriginAndBaselineCodeBlock(codeOrigin
, m_profiledBlock
);
369 SymbolTable
* symbolTableFor(InlineCallFrame
* inlineCallFrame
)
371 return baselineCodeBlockFor(inlineCallFrame
)->symbolTable();
374 SymbolTable
* symbolTableFor(const CodeOrigin
& codeOrigin
)
376 return symbolTableFor(codeOrigin
.inlineCallFrame
);
379 bool isStrictModeFor(CodeOrigin codeOrigin
)
381 if (!codeOrigin
.inlineCallFrame
)
382 return m_codeBlock
->isStrictMode();
383 return jsCast
<FunctionExecutable
*>(codeOrigin
.inlineCallFrame
->executable
.get())->isStrictMode();
386 ECMAMode
ecmaModeFor(CodeOrigin codeOrigin
)
388 return isStrictModeFor(codeOrigin
) ? StrictMode
: NotStrictMode
;
391 bool masqueradesAsUndefinedWatchpointIsStillValid(const CodeOrigin
& codeOrigin
)
393 return globalObjectFor(codeOrigin
)->masqueradesAsUndefinedWatchpoint()->isStillValid();
396 bool hasGlobalExitSite(const CodeOrigin
& codeOrigin
, ExitKind exitKind
)
398 return baselineCodeBlockFor(codeOrigin
)->hasExitSite(FrequentExitSite(exitKind
));
401 bool hasExitSite(const CodeOrigin
& codeOrigin
, ExitKind exitKind
)
403 return baselineCodeBlockFor(codeOrigin
)->hasExitSite(FrequentExitSite(codeOrigin
.bytecodeIndex
, exitKind
));
406 bool hasExitSite(Node
* node
, ExitKind exitKind
)
408 return hasExitSite(node
->origin
.semantic
, exitKind
);
411 VirtualRegister
activationRegister()
413 return m_profiledBlock
->activationRegister();
416 VirtualRegister
uncheckedActivationRegister()
418 return m_profiledBlock
->uncheckedActivationRegister();
421 VirtualRegister
machineActivationRegister()
423 return m_profiledBlock
->activationRegister();
426 VirtualRegister
uncheckedMachineActivationRegister()
428 return m_profiledBlock
->uncheckedActivationRegister();
431 ValueProfile
* valueProfileFor(Node
*);
432 MethodOfGettingAValueProfile
methodOfGettingAValueProfileFor(Node
*);
434 BlockIndex
numBlocks() const { return m_blocks
.size(); }
435 BasicBlock
* block(BlockIndex blockIndex
) const { return m_blocks
[blockIndex
].get(); }
436 BasicBlock
* lastBlock() const { return block(numBlocks() - 1); }
438 void appendBlock(PassRefPtr
<BasicBlock
> basicBlock
)
440 basicBlock
->index
= m_blocks
.size();
441 m_blocks
.append(basicBlock
);
444 void killBlock(BlockIndex blockIndex
)
446 m_blocks
[blockIndex
] = nullptr;
449 void killBlock(BasicBlock
* basicBlock
)
451 killBlock(basicBlock
->index
);
454 void killBlockAndItsContents(BasicBlock
*);
456 void killUnreachableBlocks();
458 void determineReachability();
459 void resetReachability();
461 void computeRefCounts();
463 unsigned varArgNumChildren(Node
* node
)
465 ASSERT(node
->flags() & NodeHasVarArgs
);
466 return node
->numChildren();
469 unsigned numChildren(Node
* node
)
471 if (node
->flags() & NodeHasVarArgs
)
472 return varArgNumChildren(node
);
473 return AdjacencyList::Size
;
476 Edge
& varArgChild(Node
* node
, unsigned index
)
478 ASSERT(node
->flags() & NodeHasVarArgs
);
479 return m_varArgChildren
[node
->firstChild() + index
];
482 Edge
& child(Node
* node
, unsigned index
)
484 if (node
->flags() & NodeHasVarArgs
)
485 return varArgChild(node
, index
);
486 return node
->children
.child(index
);
489 void voteNode(Node
* node
, unsigned ballot
, float weight
= 1)
491 switch (node
->op()) {
494 node
= node
->child1().node();
500 if (node
->op() == GetLocal
)
501 node
->variableAccessData()->vote(ballot
, weight
);
504 void voteNode(Edge edge
, unsigned ballot
, float weight
= 1)
506 voteNode(edge
.node(), ballot
, weight
);
509 void voteChildren(Node
* node
, unsigned ballot
, float weight
= 1)
511 if (node
->flags() & NodeHasVarArgs
) {
512 for (unsigned childIdx
= node
->firstChild();
513 childIdx
< node
->firstChild() + node
->numChildren();
515 if (!!m_varArgChildren
[childIdx
])
516 voteNode(m_varArgChildren
[childIdx
], ballot
, weight
);
523 voteNode(node
->child1(), ballot
, weight
);
526 voteNode(node
->child2(), ballot
, weight
);
529 voteNode(node
->child3(), ballot
, weight
);
532 template<typename T
> // T = Node* or Edge
533 void substitute(BasicBlock
& block
, unsigned startIndexInBlock
, T oldThing
, T newThing
)
535 for (unsigned indexInBlock
= startIndexInBlock
; indexInBlock
< block
.size(); ++indexInBlock
) {
536 Node
* node
= block
[indexInBlock
];
537 if (node
->flags() & NodeHasVarArgs
) {
538 for (unsigned childIdx
= node
->firstChild(); childIdx
< node
->firstChild() + node
->numChildren(); ++childIdx
) {
539 if (!!m_varArgChildren
[childIdx
])
540 compareAndSwap(m_varArgChildren
[childIdx
], oldThing
, newThing
);
546 compareAndSwap(node
->children
.child1(), oldThing
, newThing
);
549 compareAndSwap(node
->children
.child2(), oldThing
, newThing
);
552 compareAndSwap(node
->children
.child3(), oldThing
, newThing
);
556 // Use this if you introduce a new GetLocal and you know that you introduced it *before*
557 // any GetLocals in the basic block.
558 // FIXME: it may be appropriate, in the future, to generalize this to handle GetLocals
559 // introduced anywhere in the basic block.
560 void substituteGetLocal(BasicBlock
& block
, unsigned startIndexInBlock
, VariableAccessData
* variableAccessData
, Node
* newGetLocal
);
562 void invalidateCFG();
564 void clearFlagsOnAllNodes(NodeFlags
);
566 void clearReplacements();
568 void initializeNodeOwners();
570 BlockList
blocksInPreOrder();
571 BlockList
blocksInPostOrder();
573 class NaturalBlockIterable
{
575 NaturalBlockIterable()
580 NaturalBlockIterable(Graph
& graph
)
593 iterator(Graph
& graph
, BlockIndex index
)
595 , m_index(findNext(index
))
599 BasicBlock
*operator*()
601 return m_graph
->block(m_index
);
604 iterator
& operator++()
606 m_index
= findNext(m_index
+ 1);
610 bool operator==(const iterator
& other
) const
612 return m_index
== other
.m_index
;
615 bool operator!=(const iterator
& other
) const
617 return !(*this == other
);
621 BlockIndex
findNext(BlockIndex index
)
623 while (index
< m_graph
->numBlocks() && !m_graph
->block(index
))
634 return iterator(*m_graph
, 0);
639 return iterator(*m_graph
, m_graph
->numBlocks());
646 NaturalBlockIterable
blocksInNaturalOrder()
648 return NaturalBlockIterable(*this);
651 template<typename ChildFunctor
>
652 void doToChildrenWithNode(Node
* node
, const ChildFunctor
& functor
)
654 DFG_NODE_DO_TO_CHILDREN(*this, node
, functor
);
657 template<typename ChildFunctor
>
658 void doToChildren(Node
* node
, const ChildFunctor
& functor
)
660 doToChildrenWithNode(
662 [&functor
] (Node
*, Edge
& edge
) {
667 bool uses(Node
* node
, Node
* child
)
670 doToChildren(node
, [&] (Edge edge
) { result
|= edge
== child
; });
674 Profiler::Compilation
* compilation() { return m_plan
.compilation
.get(); }
676 DesiredIdentifiers
& identifiers() { return m_plan
.identifiers
; }
677 DesiredWatchpoints
& watchpoints() { return m_plan
.watchpoints
; }
679 FullBytecodeLiveness
& livenessFor(CodeBlock
*);
680 FullBytecodeLiveness
& livenessFor(InlineCallFrame
*);
682 // Quickly query if a single local is live at the given point. This is faster than calling
683 // forAllLiveInBytecode() if you will only query one local. But, if you want to know all of the
684 // locals live, then calling this for each local is much slower than forAllLiveInBytecode().
685 bool isLiveInBytecode(VirtualRegister
, CodeOrigin
);
687 // Quickly get all of the non-argument locals live at the given point. This doesn't give you
688 // any arguments because those are all presumed live. You can call forAllLiveInBytecode() to
689 // also get the arguments. This is much faster than calling isLiveInBytecode() for each local.
690 template<typename Functor
>
691 void forAllLocalsLiveInBytecode(CodeOrigin codeOrigin
, const Functor
& functor
)
693 // Support for not redundantly reporting arguments. Necessary because in case of a varargs
694 // call, only the callee knows that arguments are live while in the case of a non-varargs
695 // call, both callee and caller will see the variables live.
696 VirtualRegister exclusionStart
;
697 VirtualRegister exclusionEnd
;
700 InlineCallFrame
* inlineCallFrame
= codeOrigin
.inlineCallFrame
;
701 VirtualRegister
stackOffset(inlineCallFrame
? inlineCallFrame
->stackOffset
: 0);
703 if (inlineCallFrame
) {
704 if (inlineCallFrame
->isClosureCall
)
705 functor(stackOffset
+ JSStack::Callee
);
706 if (inlineCallFrame
->isVarargs())
707 functor(stackOffset
+ JSStack::ArgumentCount
);
710 CodeBlock
* codeBlock
= baselineCodeBlockFor(inlineCallFrame
);
711 FullBytecodeLiveness
& fullLiveness
= livenessFor(codeBlock
);
712 const FastBitVector
& liveness
= fullLiveness
.getLiveness(codeOrigin
.bytecodeIndex
);
713 for (unsigned relativeLocal
= codeBlock
->m_numCalleeRegisters
; relativeLocal
--;) {
714 VirtualRegister reg
= stackOffset
+ virtualRegisterForLocal(relativeLocal
);
716 // Don't report if our callee already reported.
717 if (reg
>= exclusionStart
&& reg
< exclusionEnd
)
720 if (liveness
.get(relativeLocal
))
724 if (!inlineCallFrame
)
727 // Arguments are always live. This would be redundant if it wasn't for our
728 // op_call_varargs inlining. See the comment above.
729 exclusionStart
= stackOffset
+ CallFrame::argumentOffsetIncludingThis(0);
730 exclusionEnd
= stackOffset
+ CallFrame::argumentOffsetIncludingThis(inlineCallFrame
->arguments
.size());
732 // We will always have a "this" argument and exclusionStart should be a smaller stack
733 // offset than exclusionEnd.
734 ASSERT(exclusionStart
< exclusionEnd
);
736 for (VirtualRegister reg
= exclusionStart
; reg
< exclusionEnd
; reg
+= 1)
739 codeOrigin
= inlineCallFrame
->caller
;
743 // Get a BitVector of all of the non-argument locals live right now. This is mostly useful if
744 // you want to compare two sets of live locals from two different CodeOrigins.
745 BitVector
localsLiveInBytecode(CodeOrigin
);
747 // Tells you all of the arguments and locals live at the given CodeOrigin. This is a small
748 // extension to forAllLocalsLiveInBytecode(), since all arguments are always presumed live.
749 template<typename Functor
>
750 void forAllLiveInBytecode(CodeOrigin codeOrigin
, const Functor
& functor
)
752 forAllLocalsLiveInBytecode(codeOrigin
, functor
);
754 // Report all arguments as being live.
755 for (unsigned argument
= block(0)->variablesAtHead
.numberOfArguments(); argument
--;)
756 functor(virtualRegisterForArgument(argument
));
759 BytecodeKills
& killsFor(CodeBlock
*);
760 BytecodeKills
& killsFor(InlineCallFrame
*);
762 unsigned frameRegisterCount();
763 unsigned stackPointerOffset();
764 unsigned requiredRegisterCountForExit();
765 unsigned requiredRegisterCountForExecutionAndExit();
767 JSValue
tryGetConstantProperty(JSValue base
, const StructureSet
&, PropertyOffset
);
768 JSValue
tryGetConstantProperty(JSValue base
, Structure
*, PropertyOffset
);
769 JSValue
tryGetConstantProperty(JSValue base
, const StructureAbstractValue
&, PropertyOffset
);
770 JSValue
tryGetConstantProperty(const AbstractValue
&, PropertyOffset
);
772 JSValue
tryGetConstantClosureVar(JSValue base
, ScopeOffset
);
773 JSValue
tryGetConstantClosureVar(const AbstractValue
&, ScopeOffset
);
774 JSValue
tryGetConstantClosureVar(Node
*, ScopeOffset
);
776 JSArrayBufferView
* tryGetFoldableView(JSValue
);
777 JSArrayBufferView
* tryGetFoldableView(JSValue
, ArrayMode arrayMode
);
779 void registerFrozenValues();
781 virtual void visitChildren(SlotVisitor
&) override
;
783 NO_RETURN_DUE_TO_CRASH
void handleAssertionFailure(
784 std::nullptr_t
, const char* file
, int line
, const char* function
,
785 const char* assertion
);
786 NO_RETURN_DUE_TO_CRASH
void handleAssertionFailure(
787 Node
*, const char* file
, int line
, const char* function
,
788 const char* assertion
);
789 NO_RETURN_DUE_TO_CRASH
void handleAssertionFailure(
790 BasicBlock
*, const char* file
, int line
, const char* function
,
791 const char* assertion
);
793 bool hasDebuggerEnabled() const { return m_hasDebuggerEnabled
; }
797 CodeBlock
* m_codeBlock
;
798 CodeBlock
* m_profiledBlock
;
800 NodeAllocator
& m_allocator
;
802 Vector
< RefPtr
<BasicBlock
> , 8> m_blocks
;
803 Vector
<Edge
, 16> m_varArgChildren
;
805 HashMap
<EncodedJSValue
, FrozenValue
*, EncodedJSValueHash
, EncodedJSValueHashTraits
> m_frozenValueMap
;
806 Bag
<FrozenValue
> m_frozenValues
;
808 Vector
<uint32_t> m_uint32ValuesInUse
;
810 Bag
<StorageAccessData
> m_storageAccessData
;
812 // In CPS, this is all of the SetArgument nodes for the arguments in the machine code block
813 // that survived DCE. All of them except maybe "this" will survive DCE, because of the Flush
816 // In SSA, this is all of the GetStack nodes for the arguments in the machine code block that
817 // may have some speculation in the prologue and survived DCE. Note that to get the speculation
818 // for an argument in SSA, you must use m_argumentFormats, since we still have to speculate
819 // even if the argument got killed. For example:
825 // Assume that x is always int during profiling. The ArithAdd for "x + 1" will be dead and will
826 // have a proven check for the edge to "x". So, we will not insert a Check node and we will
827 // kill the GetStack for "x". But, we must do the int check in the progolue, because that's the
828 // thing we used to allow DCE of ArithAdd. Otherwise the add could be impure:
831 // valueOf: function() { do side effects; }
835 // If we DCE the ArithAdd and we remove the int check on x, then this won't do the side
837 Vector
<Node
*, 8> m_arguments
;
839 // In CPS, this is meaningless. In SSA, this is the argument speculation that we've locked in.
840 Vector
<FlushFormat
> m_argumentFormats
;
842 SegmentedVector
<VariableAccessData
, 16> m_variableAccessData
;
843 SegmentedVector
<ArgumentPosition
, 8> m_argumentPositions
;
844 SegmentedVector
<StructureSet
, 16> m_structureSet
;
845 Bag
<Transition
> m_transitions
;
846 SegmentedVector
<NewArrayBufferData
, 4> m_newArrayBufferData
;
847 Bag
<BranchData
> m_branchData
;
848 Bag
<SwitchData
> m_switchData
;
849 Bag
<MultiGetByOffsetData
> m_multiGetByOffsetData
;
850 Bag
<MultiPutByOffsetData
> m_multiPutByOffsetData
;
851 Bag
<ObjectMaterializationData
> m_objectMaterializationData
;
852 Bag
<CallVarargsData
> m_callVarargsData
;
853 Bag
<LoadVarargsData
> m_loadVarargsData
;
854 Bag
<StackAccessData
> m_stackAccessData
;
855 Vector
<InlineVariableData
, 4> m_inlineVariableData
;
856 HashMap
<CodeBlock
*, std::unique_ptr
<FullBytecodeLiveness
>> m_bytecodeLiveness
;
857 HashMap
<CodeBlock
*, std::unique_ptr
<BytecodeKills
>> m_bytecodeKills
;
858 Dominators m_dominators
;
859 PrePostNumbering m_prePostNumbering
;
860 NaturalLoops m_naturalLoops
;
861 unsigned m_localVars
;
862 unsigned m_nextMachineLocal
;
863 unsigned m_parameterSlots
;
865 #if USE(JSVALUE32_64)
866 std::unordered_map
<int64_t, double*> m_doubleConstantsMap
;
867 std::unique_ptr
<Bag
<double>> m_doubleConstants
;
870 OptimizationFixpointState m_fixpointState
;
871 StructureRegistrationState m_structureRegistrationState
;
873 UnificationState m_unificationState
;
874 PlanStage m_planStage
{ PlanStage::Initial
};
875 RefCountState m_refCountState
;
876 bool m_hasDebuggerEnabled
;
879 void handleSuccessor(Vector
<BasicBlock
*, 16>& worklist
, BasicBlock
*, BasicBlock
* successor
);
881 AddSpeculationMode
addImmediateShouldSpeculateInt32(Node
* add
, bool variableShouldSpeculateInt32
, Node
* operand
, Node
*immediate
, RareCaseProfilingSource source
)
883 ASSERT(immediate
->hasConstant());
885 JSValue immediateValue
= immediate
->asJSValue();
886 if (!immediateValue
.isNumber() && !immediateValue
.isBoolean())
887 return DontSpeculateInt32
;
889 if (!variableShouldSpeculateInt32
)
890 return DontSpeculateInt32
;
892 // Integer constants can be typed Double if they are written like a double in the source code (e.g. 42.0).
893 // In that case, we stay conservative unless the other operand was explicitly typed as integer.
894 NodeFlags operandResultType
= operand
->result();
895 if (operandResultType
!= NodeResultInt32
&& immediateValue
.isDouble())
896 return DontSpeculateInt32
;
898 if (immediateValue
.isBoolean() || jsNumber(immediateValue
.asNumber()).isInt32())
899 return add
->canSpeculateInt32(source
) ? SpeculateInt32
: DontSpeculateInt32
;
901 double doubleImmediate
= immediateValue
.asDouble();
902 const double twoToThe48
= 281474976710656.0;
903 if (doubleImmediate
< -twoToThe48
|| doubleImmediate
> twoToThe48
)
904 return DontSpeculateInt32
;
906 return bytecodeCanTruncateInteger(add
->arithNodeFlags()) ? SpeculateInt32AndTruncateConstants
: DontSpeculateInt32
;
910 } } // namespace JSC::DFG