2 * Copyright (C) 2011, 2012, 2013, 2014 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #include "AssemblyHelpers.h"
32 #include "CodeBlock.h"
33 #include "DFGArgumentPosition.h"
34 #include "DFGBasicBlock.h"
35 #include "DFGDominators.h"
36 #include "DFGLongLivedState.h"
37 #include "DFGNaturalLoops.h"
39 #include "DFGNodeAllocator.h"
41 #include "DFGScannable.h"
43 #include "MethodOfGettingAValueProfile.h"
44 #include <unordered_map>
45 #include <wtf/BitVector.h>
46 #include <wtf/HashMap.h>
47 #include <wtf/Vector.h>
48 #include <wtf/StdLibExtras.h>
57 struct StorageAccessData
{
58 PropertyOffset offset
;
59 unsigned identifierNumber
;
62 struct InlineVariableData
{
63 InlineCallFrame
* inlineCallFrame
;
64 unsigned argumentPositionStart
;
65 VariableAccessData
* calleeVariable
;
68 enum AddSpeculationMode
{
70 SpeculateInt32AndTruncateConstants
,
77 // The order may be significant for nodes with side-effects (property accesses, value conversions).
78 // Nodes that are 'dead' remain in the vector with refCount 0.
79 class Graph
: public virtual Scannable
{
81 Graph(VM
&, Plan
&, LongLivedState
&);
84 void changeChild(Edge
& edge
, Node
* newNode
)
86 edge
.setNode(newNode
);
89 void changeEdge(Edge
& edge
, Edge newEdge
)
94 void compareAndSwap(Edge
& edge
, Node
* oldNode
, Node
* newNode
)
96 if (edge
.node() != oldNode
)
98 changeChild(edge
, newNode
);
101 void compareAndSwap(Edge
& edge
, Edge oldEdge
, Edge newEdge
)
105 changeEdge(edge
, newEdge
);
108 void performSubstitution(Node
* node
)
110 if (node
->flags() & NodeHasVarArgs
) {
111 for (unsigned childIdx
= node
->firstChild(); childIdx
< node
->firstChild() + node
->numChildren(); childIdx
++)
112 performSubstitutionForEdge(m_varArgChildren
[childIdx
]);
114 performSubstitutionForEdge(node
->child1());
115 performSubstitutionForEdge(node
->child2());
116 performSubstitutionForEdge(node
->child3());
120 void performSubstitutionForEdge(Edge
& child
)
122 // Check if this operand is actually unused.
126 // Check if there is any replacement.
127 Node
* replacement
= child
->misc
.replacement
;
131 child
.setNode(replacement
);
133 // There is definitely a replacement. Assert that the replacement does not
134 // have a replacement.
135 ASSERT(!child
->misc
.replacement
);
138 template<typename
... Params
>
139 Node
* addNode(SpeculatedType type
, Params
... params
)
141 Node
* node
= new (m_allocator
) Node(params
...);
148 void convertToConstant(Node
* node
, unsigned constantNumber
)
150 if (node
->op() == GetLocal
)
153 ASSERT(!node
->hasVariableAccessData(*this));
154 node
->convertToConstant(constantNumber
);
157 unsigned constantRegisterForConstant(JSValue value
)
159 unsigned constantRegister
;
160 if (!m_codeBlock
->findConstant(value
, constantRegister
)) {
161 constantRegister
= m_codeBlock
->addConstantLazily();
162 initializeLazyWriteBarrierForConstant(
163 m_plan
.writeBarriers
,
164 m_codeBlock
->constants()[constantRegister
],
167 m_codeBlock
->ownerExecutable(),
170 return constantRegister
;
173 void convertToConstant(Node
* node
, JSValue value
)
175 if (value
.isObject())
176 node
->convertToWeakConstant(value
.asCell());
178 convertToConstant(node
, constantRegisterForConstant(value
));
181 // CodeBlock is optional, but may allow additional information to be dumped (e.g. Identifier names).
182 void dump(PrintStream
& = WTF::dataFile(), DumpContext
* = 0);
183 enum PhiNodeDumpMode
{ DumpLivePhisOnly
, DumpAllPhis
};
184 void dumpBlockHeader(PrintStream
&, const char* prefix
, BasicBlock
*, PhiNodeDumpMode
, DumpContext
*);
185 void dump(PrintStream
&, Edge
);
186 void dump(PrintStream
&, const char* prefix
, Node
*, DumpContext
* = 0);
187 static int amountOfNodeWhiteSpace(Node
*);
188 static void printNodeWhiteSpace(PrintStream
&, Node
*);
190 // Dump the code origin of the given node as a diff from the code origin of the
191 // preceding node. Returns true if anything was printed.
192 bool dumpCodeOrigin(PrintStream
&, const char* prefix
, Node
* previousNode
, Node
* currentNode
, DumpContext
*);
194 SpeculatedType
getJSConstantSpeculation(Node
* node
)
196 return speculationFromValue(node
->valueOfJSConstant(m_codeBlock
));
199 AddSpeculationMode
addSpeculationMode(Node
* add
, bool leftShouldSpeculateInt32
, bool rightShouldSpeculateInt32
, PredictionPass pass
)
201 ASSERT(add
->op() == ValueAdd
|| add
->op() == ArithAdd
|| add
->op() == ArithSub
);
203 RareCaseProfilingSource source
= add
->sourceFor(pass
);
205 Node
* left
= add
->child1().node();
206 Node
* right
= add
->child2().node();
208 if (left
->hasConstant())
209 return addImmediateShouldSpeculateInt32(add
, rightShouldSpeculateInt32
, left
, source
);
210 if (right
->hasConstant())
211 return addImmediateShouldSpeculateInt32(add
, leftShouldSpeculateInt32
, right
, source
);
213 return (leftShouldSpeculateInt32
&& rightShouldSpeculateInt32
&& add
->canSpeculateInt32(source
)) ? SpeculateInt32
: DontSpeculateInt32
;
216 AddSpeculationMode
valueAddSpeculationMode(Node
* add
, PredictionPass pass
)
218 return addSpeculationMode(
220 add
->child1()->shouldSpeculateInt32OrBooleanExpectingDefined(),
221 add
->child2()->shouldSpeculateInt32OrBooleanExpectingDefined(),
225 AddSpeculationMode
arithAddSpeculationMode(Node
* add
, PredictionPass pass
)
227 return addSpeculationMode(
229 add
->child1()->shouldSpeculateInt32OrBooleanForArithmetic(),
230 add
->child2()->shouldSpeculateInt32OrBooleanForArithmetic(),
234 AddSpeculationMode
addSpeculationMode(Node
* add
, PredictionPass pass
)
236 if (add
->op() == ValueAdd
)
237 return valueAddSpeculationMode(add
, pass
);
239 return arithAddSpeculationMode(add
, pass
);
242 bool addShouldSpeculateInt32(Node
* add
, PredictionPass pass
)
244 return addSpeculationMode(add
, pass
) != DontSpeculateInt32
;
247 bool addShouldSpeculateMachineInt(Node
* add
)
252 Node
* left
= add
->child1().node();
253 Node
* right
= add
->child2().node();
256 if (add
->op() == ValueAdd
)
257 speculation
= Node::shouldSpeculateMachineInt(left
, right
);
259 speculation
= Node::shouldSpeculateMachineInt(left
, right
);
261 return speculation
&& !hasExitSite(add
, Int52Overflow
);
264 bool mulShouldSpeculateInt32(Node
* mul
, PredictionPass pass
)
266 ASSERT(mul
->op() == ArithMul
);
268 Node
* left
= mul
->child1().node();
269 Node
* right
= mul
->child2().node();
271 return Node::shouldSpeculateInt32OrBooleanForArithmetic(left
, right
)
272 && mul
->canSpeculateInt32(mul
->sourceFor(pass
));
275 bool mulShouldSpeculateMachineInt(Node
* mul
, PredictionPass pass
)
277 ASSERT(mul
->op() == ArithMul
);
282 Node
* left
= mul
->child1().node();
283 Node
* right
= mul
->child2().node();
285 return Node::shouldSpeculateMachineInt(left
, right
)
286 && mul
->canSpeculateInt52(pass
)
287 && !hasExitSite(mul
, Int52Overflow
);
290 bool negateShouldSpeculateInt32(Node
* negate
, PredictionPass pass
)
292 ASSERT(negate
->op() == ArithNegate
);
293 return negate
->child1()->shouldSpeculateInt32OrBooleanForArithmetic()
294 && negate
->canSpeculateInt32(pass
);
297 bool negateShouldSpeculateMachineInt(Node
* negate
, PredictionPass pass
)
299 ASSERT(negate
->op() == ArithNegate
);
302 return negate
->child1()->shouldSpeculateMachineInt()
303 && !hasExitSite(negate
, Int52Overflow
)
304 && negate
->canSpeculateInt52(pass
);
307 VirtualRegister
bytecodeRegisterForArgument(CodeOrigin codeOrigin
, int argument
)
309 return VirtualRegister(
310 codeOrigin
.inlineCallFrame
->stackOffset
+
311 baselineCodeBlockFor(codeOrigin
)->argumentIndexAfterCapture(argument
));
314 // Helper methods to check nodes for constants.
315 bool isConstant(Node
* node
)
317 return node
->hasConstant();
319 bool isJSConstant(Node
* node
)
321 return node
->hasConstant();
323 bool isInt32Constant(Node
* node
)
325 return node
->isInt32Constant(m_codeBlock
);
327 bool isDoubleConstant(Node
* node
)
329 return node
->isDoubleConstant(m_codeBlock
);
331 bool isNumberConstant(Node
* node
)
333 return node
->isNumberConstant(m_codeBlock
);
335 bool isMachineIntConstant(Node
* node
)
337 return node
->isMachineIntConstant(m_codeBlock
);
339 bool isBooleanConstant(Node
* node
)
341 return node
->isBooleanConstant(m_codeBlock
);
343 bool isCellConstant(Node
* node
)
345 if (!isJSConstant(node
))
347 JSValue value
= valueOfJSConstant(node
);
348 return value
.isCell() && !!value
;
350 bool isFunctionConstant(Node
* node
)
352 if (!isJSConstant(node
))
354 if (!getJSFunction(valueOfJSConstant(node
)))
358 bool isInternalFunctionConstant(Node
* node
)
360 if (!isJSConstant(node
))
362 JSValue value
= valueOfJSConstant(node
);
363 if (!value
.isCell() || !value
)
365 JSCell
* cell
= value
.asCell();
366 if (!cell
->inherits(InternalFunction::info()))
370 // Helper methods get constant values from nodes.
371 JSValue
valueOfJSConstant(Node
* node
)
373 return node
->valueOfJSConstant(m_codeBlock
);
375 int32_t valueOfInt32Constant(Node
* node
)
377 JSValue value
= valueOfJSConstant(node
);
378 if (!value
.isInt32()) {
379 dataLog("Value isn't int32: ", value
, "\n");
381 RELEASE_ASSERT_NOT_REACHED();
383 return value
.asInt32();
385 double valueOfNumberConstant(Node
* node
)
387 return valueOfJSConstant(node
).asNumber();
389 bool valueOfBooleanConstant(Node
* node
)
391 return valueOfJSConstant(node
).asBoolean();
393 JSFunction
* valueOfFunctionConstant(Node
* node
)
395 JSCell
* function
= getJSFunction(valueOfJSConstant(node
));
397 return jsCast
<JSFunction
*>(function
);
400 static const char *opName(NodeType
);
402 StructureSet
* addStructureSet(const StructureSet
& structureSet
)
404 ASSERT(structureSet
.size());
405 m_structureSet
.append(structureSet
);
406 return &m_structureSet
.last();
409 StructureTransitionData
* addStructureTransitionData(const StructureTransitionData
& structureTransitionData
)
411 m_structureTransitionData
.append(structureTransitionData
);
412 return &m_structureTransitionData
.last();
415 JSGlobalObject
* globalObjectFor(CodeOrigin codeOrigin
)
417 return m_codeBlock
->globalObjectFor(codeOrigin
);
420 JSObject
* globalThisObjectFor(CodeOrigin codeOrigin
)
422 JSGlobalObject
* object
= globalObjectFor(codeOrigin
);
423 return jsCast
<JSObject
*>(object
->methodTable()->toThis(object
, object
->globalExec(), NotStrictMode
));
426 ScriptExecutable
* executableFor(InlineCallFrame
* inlineCallFrame
)
428 if (!inlineCallFrame
)
429 return m_codeBlock
->ownerExecutable();
431 return inlineCallFrame
->executable
.get();
434 ScriptExecutable
* executableFor(const CodeOrigin
& codeOrigin
)
436 return executableFor(codeOrigin
.inlineCallFrame
);
439 CodeBlock
* baselineCodeBlockFor(InlineCallFrame
* inlineCallFrame
)
441 if (!inlineCallFrame
)
442 return m_profiledBlock
;
443 return baselineCodeBlockForInlineCallFrame(inlineCallFrame
);
446 CodeBlock
* baselineCodeBlockFor(const CodeOrigin
& codeOrigin
)
448 return baselineCodeBlockForOriginAndBaselineCodeBlock(codeOrigin
, m_profiledBlock
);
451 bool isStrictModeFor(CodeOrigin codeOrigin
)
453 if (!codeOrigin
.inlineCallFrame
)
454 return m_codeBlock
->isStrictMode();
455 return jsCast
<FunctionExecutable
*>(codeOrigin
.inlineCallFrame
->executable
.get())->isStrictMode();
458 ECMAMode
ecmaModeFor(CodeOrigin codeOrigin
)
460 return isStrictModeFor(codeOrigin
) ? StrictMode
: NotStrictMode
;
463 bool masqueradesAsUndefinedWatchpointIsStillValid(const CodeOrigin
& codeOrigin
)
465 return m_plan
.watchpoints
.isStillValid(
466 globalObjectFor(codeOrigin
)->masqueradesAsUndefinedWatchpoint());
469 bool hasGlobalExitSite(const CodeOrigin
& codeOrigin
, ExitKind exitKind
)
471 return baselineCodeBlockFor(codeOrigin
)->hasExitSite(FrequentExitSite(exitKind
));
474 bool hasExitSite(const CodeOrigin
& codeOrigin
, ExitKind exitKind
)
476 return baselineCodeBlockFor(codeOrigin
)->hasExitSite(FrequentExitSite(codeOrigin
.bytecodeIndex
, exitKind
));
479 bool hasExitSite(Node
* node
, ExitKind exitKind
)
481 return hasExitSite(node
->origin
.semantic
, exitKind
);
484 bool usesArguments(InlineCallFrame
* inlineCallFrame
)
486 if (!inlineCallFrame
)
487 return m_profiledBlock
->usesArguments();
489 return baselineCodeBlockForInlineCallFrame(inlineCallFrame
)->usesArguments();
492 VirtualRegister
argumentsRegisterFor(InlineCallFrame
* inlineCallFrame
)
494 if (!inlineCallFrame
)
495 return m_profiledBlock
->argumentsRegister();
497 return VirtualRegister(baselineCodeBlockForInlineCallFrame(
498 inlineCallFrame
)->argumentsRegister().offset() +
499 inlineCallFrame
->stackOffset
);
502 VirtualRegister
argumentsRegisterFor(const CodeOrigin
& codeOrigin
)
504 return argumentsRegisterFor(codeOrigin
.inlineCallFrame
);
507 VirtualRegister
machineArgumentsRegisterFor(InlineCallFrame
* inlineCallFrame
)
509 if (!inlineCallFrame
)
510 return m_codeBlock
->argumentsRegister();
512 return inlineCallFrame
->argumentsRegister
;
515 VirtualRegister
machineArgumentsRegisterFor(const CodeOrigin
& codeOrigin
)
517 return machineArgumentsRegisterFor(codeOrigin
.inlineCallFrame
);
520 VirtualRegister
uncheckedArgumentsRegisterFor(InlineCallFrame
* inlineCallFrame
)
522 if (!inlineCallFrame
)
523 return m_profiledBlock
->uncheckedArgumentsRegister();
525 CodeBlock
* codeBlock
= baselineCodeBlockForInlineCallFrame(inlineCallFrame
);
526 if (!codeBlock
->usesArguments())
527 return VirtualRegister();
529 return VirtualRegister(codeBlock
->argumentsRegister().offset() +
530 inlineCallFrame
->stackOffset
);
533 VirtualRegister
uncheckedArgumentsRegisterFor(const CodeOrigin
& codeOrigin
)
535 return uncheckedArgumentsRegisterFor(codeOrigin
.inlineCallFrame
);
538 VirtualRegister
activationRegister()
540 return m_profiledBlock
->activationRegister();
543 VirtualRegister
uncheckedActivationRegister()
545 return m_profiledBlock
->uncheckedActivationRegister();
548 VirtualRegister
machineActivationRegister()
550 return m_profiledBlock
->activationRegister();
553 VirtualRegister
uncheckedMachineActivationRegister()
555 return m_profiledBlock
->uncheckedActivationRegister();
558 ValueProfile
* valueProfileFor(Node
* node
)
563 CodeBlock
* profiledBlock
= baselineCodeBlockFor(node
->origin
.semantic
);
565 if (node
->op() == GetArgument
)
566 return profiledBlock
->valueProfileForArgument(node
->local().toArgument());
568 if (node
->hasLocal(*this)) {
571 if (!node
->local().isArgument())
573 int argument
= node
->local().toArgument();
574 if (node
->variableAccessData() != m_arguments
[argument
]->variableAccessData())
576 return profiledBlock
->valueProfileForArgument(argument
);
579 if (node
->hasHeapPrediction())
580 return profiledBlock
->valueProfileForBytecodeOffset(node
->origin
.semantic
.bytecodeIndex
);
585 MethodOfGettingAValueProfile
methodOfGettingAValueProfileFor(Node
* node
)
588 return MethodOfGettingAValueProfile();
590 CodeBlock
* profiledBlock
= baselineCodeBlockFor(node
->origin
.semantic
);
592 if (node
->op() == GetLocal
) {
593 return MethodOfGettingAValueProfile::fromLazyOperand(
595 LazyOperandValueProfileKey(
596 node
->origin
.semantic
.bytecodeIndex
, node
->local()));
599 return MethodOfGettingAValueProfile(valueProfileFor(node
));
602 bool usesArguments() const
604 return m_codeBlock
->usesArguments();
607 BlockIndex
numBlocks() const { return m_blocks
.size(); }
608 BasicBlock
* block(BlockIndex blockIndex
) const { return m_blocks
[blockIndex
].get(); }
609 BasicBlock
* lastBlock() const { return block(numBlocks() - 1); }
611 void appendBlock(PassRefPtr
<BasicBlock
> basicBlock
)
613 basicBlock
->index
= m_blocks
.size();
614 m_blocks
.append(basicBlock
);
617 void killBlock(BlockIndex blockIndex
)
619 m_blocks
[blockIndex
].clear();
622 void killBlock(BasicBlock
* basicBlock
)
624 killBlock(basicBlock
->index
);
627 void killBlockAndItsContents(BasicBlock
*);
629 void killUnreachableBlocks();
631 bool isPredictedNumerical(Node
* node
)
633 return isNumerical(node
->child1().useKind()) && isNumerical(node
->child2().useKind());
636 // Note that a 'true' return does not actually mean that the ByVal access clobbers nothing.
637 // It really means that it will not clobber the entire world. It's still up to you to
638 // carefully consider things like:
639 // - PutByVal definitely changes the array it stores to, and may even change its length.
640 // - PutByOffset definitely changes the object it stores to.
642 bool byValIsPure(Node
* node
)
644 switch (node
->arrayMode().type()) {
649 case Array::Contiguous
:
650 case Array::ArrayStorage
:
651 return !node
->arrayMode().isOutOfBounds();
652 case Array::SlowPutArrayStorage
:
653 return !node
->arrayMode().mayStoreToHole();
655 return node
->op() == GetByVal
&& node
->arrayMode().isInBounds();
656 #if USE(JSVALUE32_64)
657 case Array::Arguments
:
658 if (node
->op() == GetByVal
)
661 #endif // USE(JSVALUE32_64)
667 bool clobbersWorld(Node
* node
)
669 if (node
->flags() & NodeClobbersWorld
)
671 if (!(node
->flags() & NodeMightClobber
))
673 switch (node
->op()) {
678 return !byValIsPure(node
);
680 switch (node
->child1().useKind()) {
681 case StringObjectUse
:
682 case StringOrStringObjectUse
:
688 RELEASE_ASSERT_NOT_REACHED();
692 RELEASE_ASSERT_NOT_REACHED();
693 return true; // If by some oddity we hit this case in release build it's safer to have CSE assume the worst.
697 void determineReachability();
698 void resetReachability();
700 void resetExitStates();
702 unsigned varArgNumChildren(Node
* node
)
704 ASSERT(node
->flags() & NodeHasVarArgs
);
705 return node
->numChildren();
708 unsigned numChildren(Node
* node
)
710 if (node
->flags() & NodeHasVarArgs
)
711 return varArgNumChildren(node
);
712 return AdjacencyList::Size
;
715 Edge
& varArgChild(Node
* node
, unsigned index
)
717 ASSERT(node
->flags() & NodeHasVarArgs
);
718 return m_varArgChildren
[node
->firstChild() + index
];
721 Edge
& child(Node
* node
, unsigned index
)
723 if (node
->flags() & NodeHasVarArgs
)
724 return varArgChild(node
, index
);
725 return node
->children
.child(index
);
728 void voteNode(Node
* node
, unsigned ballot
, float weight
= 1)
730 switch (node
->op()) {
733 node
= node
->child1().node();
739 if (node
->op() == GetLocal
)
740 node
->variableAccessData()->vote(ballot
, weight
);
743 void voteNode(Edge edge
, unsigned ballot
, float weight
= 1)
745 voteNode(edge
.node(), ballot
, weight
);
748 void voteChildren(Node
* node
, unsigned ballot
, float weight
= 1)
750 if (node
->flags() & NodeHasVarArgs
) {
751 for (unsigned childIdx
= node
->firstChild();
752 childIdx
< node
->firstChild() + node
->numChildren();
754 if (!!m_varArgChildren
[childIdx
])
755 voteNode(m_varArgChildren
[childIdx
], ballot
, weight
);
762 voteNode(node
->child1(), ballot
, weight
);
765 voteNode(node
->child2(), ballot
, weight
);
768 voteNode(node
->child3(), ballot
, weight
);
771 template<typename T
> // T = Node* or Edge
772 void substitute(BasicBlock
& block
, unsigned startIndexInBlock
, T oldThing
, T newThing
)
774 for (unsigned indexInBlock
= startIndexInBlock
; indexInBlock
< block
.size(); ++indexInBlock
) {
775 Node
* node
= block
[indexInBlock
];
776 if (node
->flags() & NodeHasVarArgs
) {
777 for (unsigned childIdx
= node
->firstChild(); childIdx
< node
->firstChild() + node
->numChildren(); ++childIdx
) {
778 if (!!m_varArgChildren
[childIdx
])
779 compareAndSwap(m_varArgChildren
[childIdx
], oldThing
, newThing
);
785 compareAndSwap(node
->children
.child1(), oldThing
, newThing
);
788 compareAndSwap(node
->children
.child2(), oldThing
, newThing
);
791 compareAndSwap(node
->children
.child3(), oldThing
, newThing
);
795 // Use this if you introduce a new GetLocal and you know that you introduced it *before*
796 // any GetLocals in the basic block.
797 // FIXME: it may be appropriate, in the future, to generalize this to handle GetLocals
798 // introduced anywhere in the basic block.
799 void substituteGetLocal(BasicBlock
& block
, unsigned startIndexInBlock
, VariableAccessData
* variableAccessData
, Node
* newGetLocal
);
801 void invalidateCFG();
803 void clearFlagsOnAllNodes(NodeFlags
);
805 void clearReplacements();
806 void initializeNodeOwners();
808 void getBlocksInDepthFirstOrder(Vector
<BasicBlock
*>& result
);
810 Profiler::Compilation
* compilation() { return m_plan
.compilation
.get(); }
812 DesiredIdentifiers
& identifiers() { return m_plan
.identifiers
; }
813 DesiredWatchpoints
& watchpoints() { return m_plan
.watchpoints
; }
814 DesiredStructureChains
& chains() { return m_plan
.chains
; }
816 FullBytecodeLiveness
& livenessFor(CodeBlock
*);
817 FullBytecodeLiveness
& livenessFor(InlineCallFrame
*);
818 bool isLiveInBytecode(VirtualRegister
, CodeOrigin
);
820 unsigned frameRegisterCount();
821 unsigned stackPointerOffset();
822 unsigned requiredRegisterCountForExit();
823 unsigned requiredRegisterCountForExecutionAndExit();
825 JSActivation
* tryGetActivation(Node
*);
826 WriteBarrierBase
<Unknown
>* tryGetRegisters(Node
*);
828 JSArrayBufferView
* tryGetFoldableView(Node
*);
829 JSArrayBufferView
* tryGetFoldableView(Node
*, ArrayMode
);
830 JSArrayBufferView
* tryGetFoldableViewForChild1(Node
*);
832 virtual void visitChildren(SlotVisitor
&) override
;
836 CodeBlock
* m_codeBlock
;
837 CodeBlock
* m_profiledBlock
;
839 NodeAllocator
& m_allocator
;
841 Operands
<AbstractValue
> m_mustHandleAbstractValues
;
843 Vector
< RefPtr
<BasicBlock
> , 8> m_blocks
;
844 Vector
<Edge
, 16> m_varArgChildren
;
845 Vector
<StorageAccessData
> m_storageAccessData
;
846 Vector
<Node
*, 8> m_arguments
;
847 SegmentedVector
<VariableAccessData
, 16> m_variableAccessData
;
848 SegmentedVector
<ArgumentPosition
, 8> m_argumentPositions
;
849 SegmentedVector
<StructureSet
, 16> m_structureSet
;
850 SegmentedVector
<StructureTransitionData
, 8> m_structureTransitionData
;
851 SegmentedVector
<NewArrayBufferData
, 4> m_newArrayBufferData
;
852 Bag
<BranchData
> m_branchData
;
853 Bag
<SwitchData
> m_switchData
;
854 Bag
<MultiGetByOffsetData
> m_multiGetByOffsetData
;
855 Bag
<MultiPutByOffsetData
> m_multiPutByOffsetData
;
856 Vector
<InlineVariableData
, 4> m_inlineVariableData
;
857 HashMap
<CodeBlock
*, std::unique_ptr
<FullBytecodeLiveness
>> m_bytecodeLiveness
;
859 HashSet
<ExecutableBase
*> m_executablesWhoseArgumentsEscaped
;
860 BitVector m_lazyVars
;
861 Dominators m_dominators
;
862 NaturalLoops m_naturalLoops
;
863 unsigned m_localVars
;
864 unsigned m_nextMachineLocal
;
865 unsigned m_parameterSlots
;
866 int m_machineCaptureStart
;
867 std::unique_ptr
<SlowArgument
[]> m_slowArguments
;
869 #if USE(JSVALUE32_64)
870 std::unordered_map
<int64_t, double*> m_doubleConstantsMap
;
871 std::unique_ptr
<Bag
<double>> m_doubleConstants
;
874 OptimizationFixpointState m_fixpointState
;
876 UnificationState m_unificationState
;
877 RefCountState m_refCountState
;
880 void handleSuccessor(Vector
<BasicBlock
*, 16>& worklist
, BasicBlock
*, BasicBlock
* successor
);
881 void addForDepthFirstSort(Vector
<BasicBlock
*>& result
, Vector
<BasicBlock
*, 16>& worklist
, HashSet
<BasicBlock
*>& seen
, BasicBlock
*);
883 AddSpeculationMode
addImmediateShouldSpeculateInt32(Node
* add
, bool variableShouldSpeculateInt32
, Node
* immediate
, RareCaseProfilingSource source
)
885 ASSERT(immediate
->hasConstant());
887 JSValue immediateValue
= immediate
->valueOfJSConstant(m_codeBlock
);
888 if (!immediateValue
.isNumber() && !immediateValue
.isBoolean())
889 return DontSpeculateInt32
;
891 if (!variableShouldSpeculateInt32
)
892 return DontSpeculateInt32
;
894 if (immediateValue
.isInt32() || immediateValue
.isBoolean())
895 return add
->canSpeculateInt32(source
) ? SpeculateInt32
: DontSpeculateInt32
;
897 double doubleImmediate
= immediateValue
.asDouble();
898 const double twoToThe48
= 281474976710656.0;
899 if (doubleImmediate
< -twoToThe48
|| doubleImmediate
> twoToThe48
)
900 return DontSpeculateInt32
;
902 return bytecodeCanTruncateInteger(add
->arithNodeFlags()) ? SpeculateInt32AndTruncateConstants
: DontSpeculateInt32
;
906 #define DFG_NODE_DO_TO_CHILDREN(graph, node, thingToDo) do { \
907 Node* _node = (node); \
908 if (_node->flags() & NodeHasVarArgs) { \
909 for (unsigned _childIdx = _node->firstChild(); \
910 _childIdx < _node->firstChild() + _node->numChildren(); \
912 if (!!(graph).m_varArgChildren[_childIdx]) \
913 thingToDo(_node, (graph).m_varArgChildren[_childIdx]); \
916 if (!_node->child1()) { \
919 && !_node->child3()); \
922 thingToDo(_node, _node->child1()); \
924 if (!_node->child2()) { \
925 ASSERT(!_node->child3()); \
928 thingToDo(_node, _node->child2()); \
930 if (!_node->child3()) \
932 thingToDo(_node, _node->child3()); \
936 } } // namespace JSC::DFG