2 * Copyright (C) 2011, 2012, 2013, 2014 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #include "AssemblyHelpers.h"
32 #include "CodeBlock.h"
33 #include "DFGArgumentPosition.h"
34 #include "DFGBasicBlock.h"
35 #include "DFGDominators.h"
36 #include "DFGLongLivedState.h"
37 #include "DFGNaturalLoops.h"
39 #include "DFGNodeAllocator.h"
41 #include "DFGScannable.h"
43 #include "MethodOfGettingAValueProfile.h"
44 #include <unordered_map>
45 #include <wtf/BitVector.h>
46 #include <wtf/HashMap.h>
47 #include <wtf/Vector.h>
48 #include <wtf/StdLibExtras.h>
57 struct StorageAccessData
{
58 PropertyOffset offset
;
59 unsigned identifierNumber
;
62 struct InlineVariableData
{
63 InlineCallFrame
* inlineCallFrame
;
64 unsigned argumentPositionStart
;
65 VariableAccessData
* calleeVariable
;
68 enum AddSpeculationMode
{
70 SpeculateInt32AndTruncateConstants
,
77 // The order may be significant for nodes with side-effects (property accesses, value conversions).
78 // Nodes that are 'dead' remain in the vector with refCount 0.
79 class Graph
: public virtual Scannable
{
81 Graph(VM
&, Plan
&, LongLivedState
&);
84 void changeChild(Edge
& edge
, Node
* newNode
)
86 edge
.setNode(newNode
);
89 void changeEdge(Edge
& edge
, Edge newEdge
)
94 void compareAndSwap(Edge
& edge
, Node
* oldNode
, Node
* newNode
)
96 if (edge
.node() != oldNode
)
98 changeChild(edge
, newNode
);
101 void compareAndSwap(Edge
& edge
, Edge oldEdge
, Edge newEdge
)
105 changeEdge(edge
, newEdge
);
108 void performSubstitution(Node
* node
)
110 if (node
->flags() & NodeHasVarArgs
) {
111 for (unsigned childIdx
= node
->firstChild(); childIdx
< node
->firstChild() + node
->numChildren(); childIdx
++)
112 performSubstitutionForEdge(m_varArgChildren
[childIdx
]);
114 performSubstitutionForEdge(node
->child1());
115 performSubstitutionForEdge(node
->child2());
116 performSubstitutionForEdge(node
->child3());
120 void performSubstitutionForEdge(Edge
& child
)
122 // Check if this operand is actually unused.
126 // Check if there is any replacement.
127 Node
* replacement
= child
->misc
.replacement
;
131 child
.setNode(replacement
);
133 // There is definitely a replacement. Assert that the replacement does not
134 // have a replacement.
135 ASSERT(!child
->misc
.replacement
);
138 template<typename
... Params
>
139 Node
* addNode(SpeculatedType type
, Params
... params
)
141 Node
* node
= new (m_allocator
) Node(params
...);
148 void convertToConstant(Node
* node
, unsigned constantNumber
)
150 if (node
->op() == GetLocal
)
153 ASSERT(!node
->hasVariableAccessData(*this));
154 node
->convertToConstant(constantNumber
);
157 unsigned constantRegisterForConstant(JSValue value
)
159 unsigned constantRegister
;
160 if (!m_codeBlock
->findConstant(value
, constantRegister
)) {
161 constantRegister
= m_codeBlock
->addConstantLazily();
162 initializeLazyWriteBarrierForConstant(
163 m_plan
.writeBarriers
,
164 m_codeBlock
->constants()[constantRegister
],
167 m_codeBlock
->ownerExecutable(),
170 return constantRegister
;
173 void convertToConstant(Node
* node
, JSValue value
)
175 if (value
.isObject())
176 node
->convertToWeakConstant(value
.asCell());
178 convertToConstant(node
, constantRegisterForConstant(value
));
181 // CodeBlock is optional, but may allow additional information to be dumped (e.g. Identifier names).
182 void dump(PrintStream
& = WTF::dataFile(), DumpContext
* = 0);
183 enum PhiNodeDumpMode
{ DumpLivePhisOnly
, DumpAllPhis
};
184 void dumpBlockHeader(PrintStream
&, const char* prefix
, BasicBlock
*, PhiNodeDumpMode
, DumpContext
*);
185 void dump(PrintStream
&, Edge
);
186 void dump(PrintStream
&, const char* prefix
, Node
*, DumpContext
* = 0);
187 static int amountOfNodeWhiteSpace(Node
*);
188 static void printNodeWhiteSpace(PrintStream
&, Node
*);
190 // Dump the code origin of the given node as a diff from the code origin of the
191 // preceding node. Returns true if anything was printed.
192 bool dumpCodeOrigin(PrintStream
&, const char* prefix
, Node
* previousNode
, Node
* currentNode
, DumpContext
*);
194 SpeculatedType
getJSConstantSpeculation(Node
* node
)
196 return speculationFromValue(node
->valueOfJSConstant(m_codeBlock
));
199 AddSpeculationMode
addSpeculationMode(Node
* add
, bool leftShouldSpeculateInt32
, bool rightShouldSpeculateInt32
, PredictionPass pass
)
201 ASSERT(add
->op() == ValueAdd
|| add
->op() == ArithAdd
|| add
->op() == ArithSub
);
203 RareCaseProfilingSource source
= add
->sourceFor(pass
);
205 Node
* left
= add
->child1().node();
206 Node
* right
= add
->child2().node();
208 if (left
->hasConstant())
209 return addImmediateShouldSpeculateInt32(add
, rightShouldSpeculateInt32
, left
, source
);
210 if (right
->hasConstant())
211 return addImmediateShouldSpeculateInt32(add
, leftShouldSpeculateInt32
, right
, source
);
213 return (leftShouldSpeculateInt32
&& rightShouldSpeculateInt32
&& add
->canSpeculateInt32(source
)) ? SpeculateInt32
: DontSpeculateInt32
;
216 AddSpeculationMode
valueAddSpeculationMode(Node
* add
, PredictionPass pass
)
218 return addSpeculationMode(
220 add
->child1()->shouldSpeculateInt32OrBooleanExpectingDefined(),
221 add
->child2()->shouldSpeculateInt32OrBooleanExpectingDefined(),
225 AddSpeculationMode
arithAddSpeculationMode(Node
* add
, PredictionPass pass
)
227 return addSpeculationMode(
229 add
->child1()->shouldSpeculateInt32OrBooleanForArithmetic(),
230 add
->child2()->shouldSpeculateInt32OrBooleanForArithmetic(),
234 AddSpeculationMode
addSpeculationMode(Node
* add
, PredictionPass pass
)
236 if (add
->op() == ValueAdd
)
237 return valueAddSpeculationMode(add
, pass
);
239 return arithAddSpeculationMode(add
, pass
);
242 bool addShouldSpeculateInt32(Node
* add
, PredictionPass pass
)
244 return addSpeculationMode(add
, pass
) != DontSpeculateInt32
;
247 bool addShouldSpeculateMachineInt(Node
* add
)
252 Node
* left
= add
->child1().node();
253 Node
* right
= add
->child2().node();
256 if (add
->op() == ValueAdd
)
257 speculation
= Node::shouldSpeculateMachineInt(left
, right
);
259 speculation
= Node::shouldSpeculateMachineInt(left
, right
);
261 return speculation
&& !hasExitSite(add
, Int52Overflow
);
264 bool mulShouldSpeculateInt32(Node
* mul
, PredictionPass pass
)
266 ASSERT(mul
->op() == ArithMul
);
268 Node
* left
= mul
->child1().node();
269 Node
* right
= mul
->child2().node();
271 return Node::shouldSpeculateInt32OrBooleanForArithmetic(left
, right
)
272 && mul
->canSpeculateInt32(mul
->sourceFor(pass
));
275 bool mulShouldSpeculateMachineInt(Node
* mul
, PredictionPass pass
)
277 ASSERT(mul
->op() == ArithMul
);
282 Node
* left
= mul
->child1().node();
283 Node
* right
= mul
->child2().node();
285 return Node::shouldSpeculateMachineInt(left
, right
)
286 && mul
->canSpeculateInt52(pass
)
287 && !hasExitSite(mul
, Int52Overflow
);
290 bool negateShouldSpeculateInt32(Node
* negate
, PredictionPass pass
)
292 ASSERT(negate
->op() == ArithNegate
);
293 return negate
->child1()->shouldSpeculateInt32OrBooleanForArithmetic()
294 && negate
->canSpeculateInt32(pass
);
297 bool negateShouldSpeculateMachineInt(Node
* negate
, PredictionPass pass
)
299 ASSERT(negate
->op() == ArithNegate
);
302 return negate
->child1()->shouldSpeculateMachineInt()
303 && !hasExitSite(negate
, Int52Overflow
)
304 && negate
->canSpeculateInt52(pass
);
307 VirtualRegister
bytecodeRegisterForArgument(CodeOrigin codeOrigin
, int argument
)
309 return VirtualRegister(
310 codeOrigin
.inlineCallFrame
->stackOffset
+
311 baselineCodeBlockFor(codeOrigin
)->argumentIndexAfterCapture(argument
));
314 // Helper methods to check nodes for constants.
315 bool isConstant(Node
* node
)
317 return node
->hasConstant();
319 bool isJSConstant(Node
* node
)
321 return node
->hasConstant();
323 bool isInt32Constant(Node
* node
)
325 return node
->isInt32Constant(m_codeBlock
);
327 bool isDoubleConstant(Node
* node
)
329 return node
->isDoubleConstant(m_codeBlock
);
331 bool isNumberConstant(Node
* node
)
333 return node
->isNumberConstant(m_codeBlock
);
335 bool isMachineIntConstant(Node
* node
)
337 return node
->isMachineIntConstant(m_codeBlock
);
339 bool isBooleanConstant(Node
* node
)
341 return node
->isBooleanConstant(m_codeBlock
);
343 bool isCellConstant(Node
* node
)
345 if (!isJSConstant(node
))
347 JSValue value
= valueOfJSConstant(node
);
348 return value
.isCell() && !!value
;
350 bool isFunctionConstant(Node
* node
)
352 if (!isJSConstant(node
))
354 if (!getJSFunction(valueOfJSConstant(node
)))
358 bool isInternalFunctionConstant(Node
* node
)
360 if (!isJSConstant(node
))
362 JSValue value
= valueOfJSConstant(node
);
363 if (!value
.isCell() || !value
)
365 JSCell
* cell
= value
.asCell();
366 if (!cell
->inherits(InternalFunction::info()))
370 // Helper methods get constant values from nodes.
371 JSValue
valueOfJSConstant(Node
* node
)
373 return node
->valueOfJSConstant(m_codeBlock
);
375 int32_t valueOfInt32Constant(Node
* node
)
377 JSValue value
= valueOfJSConstant(node
);
378 if (!value
.isInt32()) {
379 dataLog("Value isn't int32: ", value
, "\n");
381 RELEASE_ASSERT_NOT_REACHED();
383 return value
.asInt32();
385 double valueOfNumberConstant(Node
* node
)
387 return valueOfJSConstant(node
).asNumber();
389 bool valueOfBooleanConstant(Node
* node
)
391 return valueOfJSConstant(node
).asBoolean();
393 JSFunction
* valueOfFunctionConstant(Node
* node
)
395 JSCell
* function
= getJSFunction(valueOfJSConstant(node
));
397 return jsCast
<JSFunction
*>(function
);
400 static const char *opName(NodeType
);
402 StructureSet
* addStructureSet(const StructureSet
& structureSet
)
404 ASSERT(structureSet
.size());
405 m_structureSet
.append(structureSet
);
406 return &m_structureSet
.last();
409 StructureTransitionData
* addStructureTransitionData(const StructureTransitionData
& structureTransitionData
)
411 m_structureTransitionData
.append(structureTransitionData
);
412 return &m_structureTransitionData
.last();
415 JSGlobalObject
* globalObjectFor(CodeOrigin codeOrigin
)
417 return m_codeBlock
->globalObjectFor(codeOrigin
);
420 JSObject
* globalThisObjectFor(CodeOrigin codeOrigin
)
422 JSGlobalObject
* object
= globalObjectFor(codeOrigin
);
423 return jsCast
<JSObject
*>(object
->methodTable()->toThis(object
, object
->globalExec(), NotStrictMode
));
426 ScriptExecutable
* executableFor(InlineCallFrame
* inlineCallFrame
)
428 if (!inlineCallFrame
)
429 return m_codeBlock
->ownerExecutable();
431 return inlineCallFrame
->executable
.get();
434 ScriptExecutable
* executableFor(const CodeOrigin
& codeOrigin
)
436 return executableFor(codeOrigin
.inlineCallFrame
);
439 CodeBlock
* baselineCodeBlockFor(InlineCallFrame
* inlineCallFrame
)
441 if (!inlineCallFrame
)
442 return m_profiledBlock
;
443 return baselineCodeBlockForInlineCallFrame(inlineCallFrame
);
446 CodeBlock
* baselineCodeBlockFor(const CodeOrigin
& codeOrigin
)
448 return baselineCodeBlockForOriginAndBaselineCodeBlock(codeOrigin
, m_profiledBlock
);
451 bool isStrictModeFor(CodeOrigin codeOrigin
)
453 if (!codeOrigin
.inlineCallFrame
)
454 return m_codeBlock
->isStrictMode();
455 return jsCast
<FunctionExecutable
*>(codeOrigin
.inlineCallFrame
->executable
.get())->isStrictMode();
458 ECMAMode
ecmaModeFor(CodeOrigin codeOrigin
)
460 return isStrictModeFor(codeOrigin
) ? StrictMode
: NotStrictMode
;
463 bool masqueradesAsUndefinedWatchpointIsStillValid(const CodeOrigin
& codeOrigin
)
465 return m_plan
.watchpoints
.isStillValid(
466 globalObjectFor(codeOrigin
)->masqueradesAsUndefinedWatchpoint());
469 bool hasGlobalExitSite(const CodeOrigin
& codeOrigin
, ExitKind exitKind
)
471 return baselineCodeBlockFor(codeOrigin
)->hasExitSite(FrequentExitSite(exitKind
));
474 bool hasExitSite(const CodeOrigin
& codeOrigin
, ExitKind exitKind
)
476 return baselineCodeBlockFor(codeOrigin
)->hasExitSite(FrequentExitSite(codeOrigin
.bytecodeIndex
, exitKind
));
479 bool hasExitSite(Node
* node
, ExitKind exitKind
)
481 return hasExitSite(node
->origin
.semantic
, exitKind
);
484 VirtualRegister
argumentsRegisterFor(InlineCallFrame
* inlineCallFrame
)
486 if (!inlineCallFrame
)
487 return m_profiledBlock
->argumentsRegister();
489 return VirtualRegister(baselineCodeBlockForInlineCallFrame(
490 inlineCallFrame
)->argumentsRegister().offset() +
491 inlineCallFrame
->stackOffset
);
494 VirtualRegister
argumentsRegisterFor(const CodeOrigin
& codeOrigin
)
496 return argumentsRegisterFor(codeOrigin
.inlineCallFrame
);
499 VirtualRegister
machineArgumentsRegisterFor(InlineCallFrame
* inlineCallFrame
)
501 if (!inlineCallFrame
)
502 return m_codeBlock
->argumentsRegister();
504 return inlineCallFrame
->argumentsRegister
;
507 VirtualRegister
machineArgumentsRegisterFor(const CodeOrigin
& codeOrigin
)
509 return machineArgumentsRegisterFor(codeOrigin
.inlineCallFrame
);
512 VirtualRegister
uncheckedArgumentsRegisterFor(InlineCallFrame
* inlineCallFrame
)
514 if (!inlineCallFrame
)
515 return m_profiledBlock
->uncheckedArgumentsRegister();
517 CodeBlock
* codeBlock
= baselineCodeBlockForInlineCallFrame(inlineCallFrame
);
518 if (!codeBlock
->usesArguments())
519 return VirtualRegister();
521 return VirtualRegister(codeBlock
->argumentsRegister().offset() +
522 inlineCallFrame
->stackOffset
);
525 VirtualRegister
uncheckedArgumentsRegisterFor(const CodeOrigin
& codeOrigin
)
527 return uncheckedArgumentsRegisterFor(codeOrigin
.inlineCallFrame
);
530 VirtualRegister
activationRegister()
532 return m_profiledBlock
->activationRegister();
535 VirtualRegister
uncheckedActivationRegister()
537 return m_profiledBlock
->uncheckedActivationRegister();
540 VirtualRegister
machineActivationRegister()
542 return m_profiledBlock
->activationRegister();
545 VirtualRegister
uncheckedMachineActivationRegister()
547 return m_profiledBlock
->uncheckedActivationRegister();
550 ValueProfile
* valueProfileFor(Node
* node
)
555 CodeBlock
* profiledBlock
= baselineCodeBlockFor(node
->origin
.semantic
);
557 if (node
->op() == GetArgument
)
558 return profiledBlock
->valueProfileForArgument(node
->local().toArgument());
560 if (node
->hasLocal(*this)) {
563 if (!node
->local().isArgument())
565 int argument
= node
->local().toArgument();
566 if (node
->variableAccessData() != m_arguments
[argument
]->variableAccessData())
568 return profiledBlock
->valueProfileForArgument(argument
);
571 if (node
->hasHeapPrediction())
572 return profiledBlock
->valueProfileForBytecodeOffset(node
->origin
.semantic
.bytecodeIndex
);
577 MethodOfGettingAValueProfile
methodOfGettingAValueProfileFor(Node
* node
)
580 return MethodOfGettingAValueProfile();
582 CodeBlock
* profiledBlock
= baselineCodeBlockFor(node
->origin
.semantic
);
584 if (node
->op() == GetLocal
) {
585 return MethodOfGettingAValueProfile::fromLazyOperand(
587 LazyOperandValueProfileKey(
588 node
->origin
.semantic
.bytecodeIndex
, node
->local()));
591 return MethodOfGettingAValueProfile(valueProfileFor(node
));
594 bool usesArguments() const
596 return m_codeBlock
->usesArguments();
599 BlockIndex
numBlocks() const { return m_blocks
.size(); }
600 BasicBlock
* block(BlockIndex blockIndex
) const { return m_blocks
[blockIndex
].get(); }
601 BasicBlock
* lastBlock() const { return block(numBlocks() - 1); }
603 void appendBlock(PassRefPtr
<BasicBlock
> basicBlock
)
605 basicBlock
->index
= m_blocks
.size();
606 m_blocks
.append(basicBlock
);
609 void killBlock(BlockIndex blockIndex
)
611 m_blocks
[blockIndex
].clear();
614 void killBlock(BasicBlock
* basicBlock
)
616 killBlock(basicBlock
->index
);
619 void killBlockAndItsContents(BasicBlock
*);
621 void killUnreachableBlocks();
623 bool isPredictedNumerical(Node
* node
)
625 return isNumerical(node
->child1().useKind()) && isNumerical(node
->child2().useKind());
628 // Note that a 'true' return does not actually mean that the ByVal access clobbers nothing.
629 // It really means that it will not clobber the entire world. It's still up to you to
630 // carefully consider things like:
631 // - PutByVal definitely changes the array it stores to, and may even change its length.
632 // - PutByOffset definitely changes the object it stores to.
634 bool byValIsPure(Node
* node
)
636 switch (node
->arrayMode().type()) {
641 case Array::Contiguous
:
642 case Array::ArrayStorage
:
643 return !node
->arrayMode().isOutOfBounds();
644 case Array::SlowPutArrayStorage
:
645 return !node
->arrayMode().mayStoreToHole();
647 return node
->op() == GetByVal
&& node
->arrayMode().isInBounds();
648 #if USE(JSVALUE32_64)
649 case Array::Arguments
:
650 if (node
->op() == GetByVal
)
653 #endif // USE(JSVALUE32_64)
659 bool clobbersWorld(Node
* node
)
661 if (node
->flags() & NodeClobbersWorld
)
663 if (!(node
->flags() & NodeMightClobber
))
665 switch (node
->op()) {
670 return !byValIsPure(node
);
672 switch (node
->child1().useKind()) {
673 case StringObjectUse
:
674 case StringOrStringObjectUse
:
680 RELEASE_ASSERT_NOT_REACHED();
684 RELEASE_ASSERT_NOT_REACHED();
685 return true; // If by some oddity we hit this case in release build it's safer to have CSE assume the worst.
689 void determineReachability();
690 void resetReachability();
692 void resetExitStates();
694 unsigned varArgNumChildren(Node
* node
)
696 ASSERT(node
->flags() & NodeHasVarArgs
);
697 return node
->numChildren();
700 unsigned numChildren(Node
* node
)
702 if (node
->flags() & NodeHasVarArgs
)
703 return varArgNumChildren(node
);
704 return AdjacencyList::Size
;
707 Edge
& varArgChild(Node
* node
, unsigned index
)
709 ASSERT(node
->flags() & NodeHasVarArgs
);
710 return m_varArgChildren
[node
->firstChild() + index
];
713 Edge
& child(Node
* node
, unsigned index
)
715 if (node
->flags() & NodeHasVarArgs
)
716 return varArgChild(node
, index
);
717 return node
->children
.child(index
);
720 void voteNode(Node
* node
, unsigned ballot
, float weight
= 1)
722 switch (node
->op()) {
725 node
= node
->child1().node();
731 if (node
->op() == GetLocal
)
732 node
->variableAccessData()->vote(ballot
, weight
);
735 void voteNode(Edge edge
, unsigned ballot
, float weight
= 1)
737 voteNode(edge
.node(), ballot
, weight
);
740 void voteChildren(Node
* node
, unsigned ballot
, float weight
= 1)
742 if (node
->flags() & NodeHasVarArgs
) {
743 for (unsigned childIdx
= node
->firstChild();
744 childIdx
< node
->firstChild() + node
->numChildren();
746 if (!!m_varArgChildren
[childIdx
])
747 voteNode(m_varArgChildren
[childIdx
], ballot
, weight
);
754 voteNode(node
->child1(), ballot
, weight
);
757 voteNode(node
->child2(), ballot
, weight
);
760 voteNode(node
->child3(), ballot
, weight
);
763 template<typename T
> // T = Node* or Edge
764 void substitute(BasicBlock
& block
, unsigned startIndexInBlock
, T oldThing
, T newThing
)
766 for (unsigned indexInBlock
= startIndexInBlock
; indexInBlock
< block
.size(); ++indexInBlock
) {
767 Node
* node
= block
[indexInBlock
];
768 if (node
->flags() & NodeHasVarArgs
) {
769 for (unsigned childIdx
= node
->firstChild(); childIdx
< node
->firstChild() + node
->numChildren(); ++childIdx
) {
770 if (!!m_varArgChildren
[childIdx
])
771 compareAndSwap(m_varArgChildren
[childIdx
], oldThing
, newThing
);
777 compareAndSwap(node
->children
.child1(), oldThing
, newThing
);
780 compareAndSwap(node
->children
.child2(), oldThing
, newThing
);
783 compareAndSwap(node
->children
.child3(), oldThing
, newThing
);
787 // Use this if you introduce a new GetLocal and you know that you introduced it *before*
788 // any GetLocals in the basic block.
789 // FIXME: it may be appropriate, in the future, to generalize this to handle GetLocals
790 // introduced anywhere in the basic block.
791 void substituteGetLocal(BasicBlock
& block
, unsigned startIndexInBlock
, VariableAccessData
* variableAccessData
, Node
* newGetLocal
);
793 void invalidateCFG();
795 void clearFlagsOnAllNodes(NodeFlags
);
797 void clearReplacements();
798 void initializeNodeOwners();
800 void getBlocksInDepthFirstOrder(Vector
<BasicBlock
*>& result
);
802 Profiler::Compilation
* compilation() { return m_plan
.compilation
.get(); }
804 DesiredIdentifiers
& identifiers() { return m_plan
.identifiers
; }
805 DesiredWatchpoints
& watchpoints() { return m_plan
.watchpoints
; }
806 DesiredStructureChains
& chains() { return m_plan
.chains
; }
808 FullBytecodeLiveness
& livenessFor(CodeBlock
*);
809 FullBytecodeLiveness
& livenessFor(InlineCallFrame
*);
810 bool isLiveInBytecode(VirtualRegister
, CodeOrigin
);
812 unsigned frameRegisterCount();
813 unsigned stackPointerOffset();
814 unsigned requiredRegisterCountForExit();
815 unsigned requiredRegisterCountForExecutionAndExit();
817 JSActivation
* tryGetActivation(Node
*);
818 WriteBarrierBase
<Unknown
>* tryGetRegisters(Node
*);
820 JSArrayBufferView
* tryGetFoldableView(Node
*);
821 JSArrayBufferView
* tryGetFoldableView(Node
*, ArrayMode
);
822 JSArrayBufferView
* tryGetFoldableViewForChild1(Node
*);
824 virtual void visitChildren(SlotVisitor
&) override
;
828 CodeBlock
* m_codeBlock
;
829 CodeBlock
* m_profiledBlock
;
831 NodeAllocator
& m_allocator
;
833 Operands
<AbstractValue
> m_mustHandleAbstractValues
;
835 Vector
< RefPtr
<BasicBlock
> , 8> m_blocks
;
836 Vector
<Edge
, 16> m_varArgChildren
;
837 Vector
<StorageAccessData
> m_storageAccessData
;
838 Vector
<Node
*, 8> m_arguments
;
839 SegmentedVector
<VariableAccessData
, 16> m_variableAccessData
;
840 SegmentedVector
<ArgumentPosition
, 8> m_argumentPositions
;
841 SegmentedVector
<StructureSet
, 16> m_structureSet
;
842 SegmentedVector
<StructureTransitionData
, 8> m_structureTransitionData
;
843 SegmentedVector
<NewArrayBufferData
, 4> m_newArrayBufferData
;
844 Bag
<BranchData
> m_branchData
;
845 Bag
<SwitchData
> m_switchData
;
846 Bag
<MultiGetByOffsetData
> m_multiGetByOffsetData
;
847 Bag
<MultiPutByOffsetData
> m_multiPutByOffsetData
;
848 Vector
<InlineVariableData
, 4> m_inlineVariableData
;
849 HashMap
<CodeBlock
*, std::unique_ptr
<FullBytecodeLiveness
>> m_bytecodeLiveness
;
851 HashSet
<ExecutableBase
*> m_executablesWhoseArgumentsEscaped
;
852 BitVector m_lazyVars
;
853 Dominators m_dominators
;
854 NaturalLoops m_naturalLoops
;
855 unsigned m_localVars
;
856 unsigned m_nextMachineLocal
;
857 unsigned m_parameterSlots
;
858 int m_machineCaptureStart
;
859 std::unique_ptr
<SlowArgument
[]> m_slowArguments
;
861 #if USE(JSVALUE32_64)
862 std::unordered_map
<int64_t, double*> m_doubleConstantsMap
;
863 std::unique_ptr
<Bag
<double>> m_doubleConstants
;
866 OptimizationFixpointState m_fixpointState
;
868 UnificationState m_unificationState
;
869 RefCountState m_refCountState
;
872 void handleSuccessor(Vector
<BasicBlock
*, 16>& worklist
, BasicBlock
*, BasicBlock
* successor
);
873 void addForDepthFirstSort(Vector
<BasicBlock
*>& result
, Vector
<BasicBlock
*, 16>& worklist
, HashSet
<BasicBlock
*>& seen
, BasicBlock
*);
875 AddSpeculationMode
addImmediateShouldSpeculateInt32(Node
* add
, bool variableShouldSpeculateInt32
, Node
* immediate
, RareCaseProfilingSource source
)
877 ASSERT(immediate
->hasConstant());
879 JSValue immediateValue
= immediate
->valueOfJSConstant(m_codeBlock
);
880 if (!immediateValue
.isNumber() && !immediateValue
.isBoolean())
881 return DontSpeculateInt32
;
883 if (!variableShouldSpeculateInt32
)
884 return DontSpeculateInt32
;
886 if (immediateValue
.isInt32() || immediateValue
.isBoolean())
887 return add
->canSpeculateInt32(source
) ? SpeculateInt32
: DontSpeculateInt32
;
889 double doubleImmediate
= immediateValue
.asDouble();
890 const double twoToThe48
= 281474976710656.0;
891 if (doubleImmediate
< -twoToThe48
|| doubleImmediate
> twoToThe48
)
892 return DontSpeculateInt32
;
894 return bytecodeCanTruncateInteger(add
->arithNodeFlags()) ? SpeculateInt32AndTruncateConstants
: DontSpeculateInt32
;
898 #define DFG_NODE_DO_TO_CHILDREN(graph, node, thingToDo) do { \
899 Node* _node = (node); \
900 if (_node->flags() & NodeHasVarArgs) { \
901 for (unsigned _childIdx = _node->firstChild(); \
902 _childIdx < _node->firstChild() + _node->numChildren(); \
904 if (!!(graph).m_varArgChildren[_childIdx]) \
905 thingToDo(_node, (graph).m_varArgChildren[_childIdx]); \
908 if (!_node->child1()) { \
911 && !_node->child3()); \
914 thingToDo(_node, _node->child1()); \
916 if (!_node->child2()) { \
917 ASSERT(!_node->child3()); \
920 thingToDo(_node, _node->child2()); \
922 if (!_node->child3()) \
924 thingToDo(_node, _node->child3()); \
928 } } // namespace JSC::DFG