]> git.saurik.com Git - apple/javascriptcore.git/blob - dfg/DFGGraph.h
JavaScriptCore-7600.1.4.17.5.tar.gz
[apple/javascriptcore.git] / dfg / DFGGraph.h
1 /*
2 * Copyright (C) 2011, 2012, 2013, 2014 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26 #ifndef DFGGraph_h
27 #define DFGGraph_h
28
29 #if ENABLE(DFG_JIT)
30
31 #include "AssemblyHelpers.h"
32 #include "CodeBlock.h"
33 #include "DFGArgumentPosition.h"
34 #include "DFGBasicBlock.h"
35 #include "DFGDominators.h"
36 #include "DFGLongLivedState.h"
37 #include "DFGNaturalLoops.h"
38 #include "DFGNode.h"
39 #include "DFGNodeAllocator.h"
40 #include "DFGPlan.h"
41 #include "DFGScannable.h"
42 #include "JSStack.h"
43 #include "MethodOfGettingAValueProfile.h"
44 #include <unordered_map>
45 #include <wtf/BitVector.h>
46 #include <wtf/HashMap.h>
47 #include <wtf/Vector.h>
48 #include <wtf/StdLibExtras.h>
49
50 namespace JSC {
51
52 class CodeBlock;
53 class ExecState;
54
55 namespace DFG {
56
57 struct StorageAccessData {
58 PropertyOffset offset;
59 unsigned identifierNumber;
60 };
61
62 struct InlineVariableData {
63 InlineCallFrame* inlineCallFrame;
64 unsigned argumentPositionStart;
65 VariableAccessData* calleeVariable;
66 };
67
68 enum AddSpeculationMode {
69 DontSpeculateInt32,
70 SpeculateInt32AndTruncateConstants,
71 SpeculateInt32
72 };
73
74 //
75 // === Graph ===
76 //
77 // The order may be significant for nodes with side-effects (property accesses, value conversions).
78 // Nodes that are 'dead' remain in the vector with refCount 0.
79 class Graph : public virtual Scannable {
80 public:
81 Graph(VM&, Plan&, LongLivedState&);
82 ~Graph();
83
84 void changeChild(Edge& edge, Node* newNode)
85 {
86 edge.setNode(newNode);
87 }
88
89 void changeEdge(Edge& edge, Edge newEdge)
90 {
91 edge = newEdge;
92 }
93
94 void compareAndSwap(Edge& edge, Node* oldNode, Node* newNode)
95 {
96 if (edge.node() != oldNode)
97 return;
98 changeChild(edge, newNode);
99 }
100
101 void compareAndSwap(Edge& edge, Edge oldEdge, Edge newEdge)
102 {
103 if (edge != oldEdge)
104 return;
105 changeEdge(edge, newEdge);
106 }
107
108 void performSubstitution(Node* node)
109 {
110 if (node->flags() & NodeHasVarArgs) {
111 for (unsigned childIdx = node->firstChild(); childIdx < node->firstChild() + node->numChildren(); childIdx++)
112 performSubstitutionForEdge(m_varArgChildren[childIdx]);
113 } else {
114 performSubstitutionForEdge(node->child1());
115 performSubstitutionForEdge(node->child2());
116 performSubstitutionForEdge(node->child3());
117 }
118 }
119
120 void performSubstitutionForEdge(Edge& child)
121 {
122 // Check if this operand is actually unused.
123 if (!child)
124 return;
125
126 // Check if there is any replacement.
127 Node* replacement = child->misc.replacement;
128 if (!replacement)
129 return;
130
131 child.setNode(replacement);
132
133 // There is definitely a replacement. Assert that the replacement does not
134 // have a replacement.
135 ASSERT(!child->misc.replacement);
136 }
137
138 template<typename... Params>
139 Node* addNode(SpeculatedType type, Params... params)
140 {
141 Node* node = new (m_allocator) Node(params...);
142 node->predict(type);
143 return node;
144 }
145
146 void dethread();
147
148 void convertToConstant(Node* node, unsigned constantNumber)
149 {
150 if (node->op() == GetLocal)
151 dethread();
152 else
153 ASSERT(!node->hasVariableAccessData(*this));
154 node->convertToConstant(constantNumber);
155 }
156
157 unsigned constantRegisterForConstant(JSValue value)
158 {
159 unsigned constantRegister;
160 if (!m_codeBlock->findConstant(value, constantRegister)) {
161 constantRegister = m_codeBlock->addConstantLazily();
162 initializeLazyWriteBarrierForConstant(
163 m_plan.writeBarriers,
164 m_codeBlock->constants()[constantRegister],
165 m_codeBlock,
166 constantRegister,
167 m_codeBlock->ownerExecutable(),
168 value);
169 }
170 return constantRegister;
171 }
172
173 void convertToConstant(Node* node, JSValue value)
174 {
175 if (value.isObject())
176 node->convertToWeakConstant(value.asCell());
177 else
178 convertToConstant(node, constantRegisterForConstant(value));
179 }
180
181 // CodeBlock is optional, but may allow additional information to be dumped (e.g. Identifier names).
182 void dump(PrintStream& = WTF::dataFile(), DumpContext* = 0);
183 enum PhiNodeDumpMode { DumpLivePhisOnly, DumpAllPhis };
184 void dumpBlockHeader(PrintStream&, const char* prefix, BasicBlock*, PhiNodeDumpMode, DumpContext*);
185 void dump(PrintStream&, Edge);
186 void dump(PrintStream&, const char* prefix, Node*, DumpContext* = 0);
187 static int amountOfNodeWhiteSpace(Node*);
188 static void printNodeWhiteSpace(PrintStream&, Node*);
189
190 // Dump the code origin of the given node as a diff from the code origin of the
191 // preceding node. Returns true if anything was printed.
192 bool dumpCodeOrigin(PrintStream&, const char* prefix, Node* previousNode, Node* currentNode, DumpContext*);
193
194 SpeculatedType getJSConstantSpeculation(Node* node)
195 {
196 return speculationFromValue(node->valueOfJSConstant(m_codeBlock));
197 }
198
199 AddSpeculationMode addSpeculationMode(Node* add, bool leftShouldSpeculateInt32, bool rightShouldSpeculateInt32, PredictionPass pass)
200 {
201 ASSERT(add->op() == ValueAdd || add->op() == ArithAdd || add->op() == ArithSub);
202
203 RareCaseProfilingSource source = add->sourceFor(pass);
204
205 Node* left = add->child1().node();
206 Node* right = add->child2().node();
207
208 if (left->hasConstant())
209 return addImmediateShouldSpeculateInt32(add, rightShouldSpeculateInt32, left, source);
210 if (right->hasConstant())
211 return addImmediateShouldSpeculateInt32(add, leftShouldSpeculateInt32, right, source);
212
213 return (leftShouldSpeculateInt32 && rightShouldSpeculateInt32 && add->canSpeculateInt32(source)) ? SpeculateInt32 : DontSpeculateInt32;
214 }
215
216 AddSpeculationMode valueAddSpeculationMode(Node* add, PredictionPass pass)
217 {
218 return addSpeculationMode(
219 add,
220 add->child1()->shouldSpeculateInt32OrBooleanExpectingDefined(),
221 add->child2()->shouldSpeculateInt32OrBooleanExpectingDefined(),
222 pass);
223 }
224
225 AddSpeculationMode arithAddSpeculationMode(Node* add, PredictionPass pass)
226 {
227 return addSpeculationMode(
228 add,
229 add->child1()->shouldSpeculateInt32OrBooleanForArithmetic(),
230 add->child2()->shouldSpeculateInt32OrBooleanForArithmetic(),
231 pass);
232 }
233
234 AddSpeculationMode addSpeculationMode(Node* add, PredictionPass pass)
235 {
236 if (add->op() == ValueAdd)
237 return valueAddSpeculationMode(add, pass);
238
239 return arithAddSpeculationMode(add, pass);
240 }
241
242 bool addShouldSpeculateInt32(Node* add, PredictionPass pass)
243 {
244 return addSpeculationMode(add, pass) != DontSpeculateInt32;
245 }
246
247 bool addShouldSpeculateMachineInt(Node* add)
248 {
249 if (!enableInt52())
250 return false;
251
252 Node* left = add->child1().node();
253 Node* right = add->child2().node();
254
255 bool speculation;
256 if (add->op() == ValueAdd)
257 speculation = Node::shouldSpeculateMachineInt(left, right);
258 else
259 speculation = Node::shouldSpeculateMachineInt(left, right);
260
261 return speculation && !hasExitSite(add, Int52Overflow);
262 }
263
264 bool mulShouldSpeculateInt32(Node* mul, PredictionPass pass)
265 {
266 ASSERT(mul->op() == ArithMul);
267
268 Node* left = mul->child1().node();
269 Node* right = mul->child2().node();
270
271 return Node::shouldSpeculateInt32OrBooleanForArithmetic(left, right)
272 && mul->canSpeculateInt32(mul->sourceFor(pass));
273 }
274
275 bool mulShouldSpeculateMachineInt(Node* mul, PredictionPass pass)
276 {
277 ASSERT(mul->op() == ArithMul);
278
279 if (!enableInt52())
280 return false;
281
282 Node* left = mul->child1().node();
283 Node* right = mul->child2().node();
284
285 return Node::shouldSpeculateMachineInt(left, right)
286 && mul->canSpeculateInt52(pass)
287 && !hasExitSite(mul, Int52Overflow);
288 }
289
290 bool negateShouldSpeculateInt32(Node* negate, PredictionPass pass)
291 {
292 ASSERT(negate->op() == ArithNegate);
293 return negate->child1()->shouldSpeculateInt32OrBooleanForArithmetic()
294 && negate->canSpeculateInt32(pass);
295 }
296
297 bool negateShouldSpeculateMachineInt(Node* negate, PredictionPass pass)
298 {
299 ASSERT(negate->op() == ArithNegate);
300 if (!enableInt52())
301 return false;
302 return negate->child1()->shouldSpeculateMachineInt()
303 && !hasExitSite(negate, Int52Overflow)
304 && negate->canSpeculateInt52(pass);
305 }
306
307 VirtualRegister bytecodeRegisterForArgument(CodeOrigin codeOrigin, int argument)
308 {
309 return VirtualRegister(
310 codeOrigin.inlineCallFrame->stackOffset +
311 baselineCodeBlockFor(codeOrigin)->argumentIndexAfterCapture(argument));
312 }
313
314 // Helper methods to check nodes for constants.
315 bool isConstant(Node* node)
316 {
317 return node->hasConstant();
318 }
319 bool isJSConstant(Node* node)
320 {
321 return node->hasConstant();
322 }
323 bool isInt32Constant(Node* node)
324 {
325 return node->isInt32Constant(m_codeBlock);
326 }
327 bool isDoubleConstant(Node* node)
328 {
329 return node->isDoubleConstant(m_codeBlock);
330 }
331 bool isNumberConstant(Node* node)
332 {
333 return node->isNumberConstant(m_codeBlock);
334 }
335 bool isMachineIntConstant(Node* node)
336 {
337 return node->isMachineIntConstant(m_codeBlock);
338 }
339 bool isBooleanConstant(Node* node)
340 {
341 return node->isBooleanConstant(m_codeBlock);
342 }
343 bool isCellConstant(Node* node)
344 {
345 if (!isJSConstant(node))
346 return false;
347 JSValue value = valueOfJSConstant(node);
348 return value.isCell() && !!value;
349 }
350 bool isFunctionConstant(Node* node)
351 {
352 if (!isJSConstant(node))
353 return false;
354 if (!getJSFunction(valueOfJSConstant(node)))
355 return false;
356 return true;
357 }
358 bool isInternalFunctionConstant(Node* node)
359 {
360 if (!isJSConstant(node))
361 return false;
362 JSValue value = valueOfJSConstant(node);
363 if (!value.isCell() || !value)
364 return false;
365 JSCell* cell = value.asCell();
366 if (!cell->inherits(InternalFunction::info()))
367 return false;
368 return true;
369 }
370 // Helper methods get constant values from nodes.
371 JSValue valueOfJSConstant(Node* node)
372 {
373 return node->valueOfJSConstant(m_codeBlock);
374 }
375 int32_t valueOfInt32Constant(Node* node)
376 {
377 JSValue value = valueOfJSConstant(node);
378 if (!value.isInt32()) {
379 dataLog("Value isn't int32: ", value, "\n");
380 dump();
381 RELEASE_ASSERT_NOT_REACHED();
382 }
383 return value.asInt32();
384 }
385 double valueOfNumberConstant(Node* node)
386 {
387 return valueOfJSConstant(node).asNumber();
388 }
389 bool valueOfBooleanConstant(Node* node)
390 {
391 return valueOfJSConstant(node).asBoolean();
392 }
393 JSFunction* valueOfFunctionConstant(Node* node)
394 {
395 JSCell* function = getJSFunction(valueOfJSConstant(node));
396 ASSERT(function);
397 return jsCast<JSFunction*>(function);
398 }
399
400 static const char *opName(NodeType);
401
402 StructureSet* addStructureSet(const StructureSet& structureSet)
403 {
404 ASSERT(structureSet.size());
405 m_structureSet.append(structureSet);
406 return &m_structureSet.last();
407 }
408
409 StructureTransitionData* addStructureTransitionData(const StructureTransitionData& structureTransitionData)
410 {
411 m_structureTransitionData.append(structureTransitionData);
412 return &m_structureTransitionData.last();
413 }
414
415 JSGlobalObject* globalObjectFor(CodeOrigin codeOrigin)
416 {
417 return m_codeBlock->globalObjectFor(codeOrigin);
418 }
419
420 JSObject* globalThisObjectFor(CodeOrigin codeOrigin)
421 {
422 JSGlobalObject* object = globalObjectFor(codeOrigin);
423 return jsCast<JSObject*>(object->methodTable()->toThis(object, object->globalExec(), NotStrictMode));
424 }
425
426 ScriptExecutable* executableFor(InlineCallFrame* inlineCallFrame)
427 {
428 if (!inlineCallFrame)
429 return m_codeBlock->ownerExecutable();
430
431 return inlineCallFrame->executable.get();
432 }
433
434 ScriptExecutable* executableFor(const CodeOrigin& codeOrigin)
435 {
436 return executableFor(codeOrigin.inlineCallFrame);
437 }
438
439 CodeBlock* baselineCodeBlockFor(InlineCallFrame* inlineCallFrame)
440 {
441 if (!inlineCallFrame)
442 return m_profiledBlock;
443 return baselineCodeBlockForInlineCallFrame(inlineCallFrame);
444 }
445
446 CodeBlock* baselineCodeBlockFor(const CodeOrigin& codeOrigin)
447 {
448 return baselineCodeBlockForOriginAndBaselineCodeBlock(codeOrigin, m_profiledBlock);
449 }
450
451 bool isStrictModeFor(CodeOrigin codeOrigin)
452 {
453 if (!codeOrigin.inlineCallFrame)
454 return m_codeBlock->isStrictMode();
455 return jsCast<FunctionExecutable*>(codeOrigin.inlineCallFrame->executable.get())->isStrictMode();
456 }
457
458 ECMAMode ecmaModeFor(CodeOrigin codeOrigin)
459 {
460 return isStrictModeFor(codeOrigin) ? StrictMode : NotStrictMode;
461 }
462
463 bool masqueradesAsUndefinedWatchpointIsStillValid(const CodeOrigin& codeOrigin)
464 {
465 return m_plan.watchpoints.isStillValid(
466 globalObjectFor(codeOrigin)->masqueradesAsUndefinedWatchpoint());
467 }
468
469 bool hasGlobalExitSite(const CodeOrigin& codeOrigin, ExitKind exitKind)
470 {
471 return baselineCodeBlockFor(codeOrigin)->hasExitSite(FrequentExitSite(exitKind));
472 }
473
474 bool hasExitSite(const CodeOrigin& codeOrigin, ExitKind exitKind)
475 {
476 return baselineCodeBlockFor(codeOrigin)->hasExitSite(FrequentExitSite(codeOrigin.bytecodeIndex, exitKind));
477 }
478
479 bool hasExitSite(Node* node, ExitKind exitKind)
480 {
481 return hasExitSite(node->origin.semantic, exitKind);
482 }
483
484 bool usesArguments(InlineCallFrame* inlineCallFrame)
485 {
486 if (!inlineCallFrame)
487 return m_profiledBlock->usesArguments();
488
489 return baselineCodeBlockForInlineCallFrame(inlineCallFrame)->usesArguments();
490 }
491
492 VirtualRegister argumentsRegisterFor(InlineCallFrame* inlineCallFrame)
493 {
494 if (!inlineCallFrame)
495 return m_profiledBlock->argumentsRegister();
496
497 return VirtualRegister(baselineCodeBlockForInlineCallFrame(
498 inlineCallFrame)->argumentsRegister().offset() +
499 inlineCallFrame->stackOffset);
500 }
501
502 VirtualRegister argumentsRegisterFor(const CodeOrigin& codeOrigin)
503 {
504 return argumentsRegisterFor(codeOrigin.inlineCallFrame);
505 }
506
507 VirtualRegister machineArgumentsRegisterFor(InlineCallFrame* inlineCallFrame)
508 {
509 if (!inlineCallFrame)
510 return m_codeBlock->argumentsRegister();
511
512 return inlineCallFrame->argumentsRegister;
513 }
514
515 VirtualRegister machineArgumentsRegisterFor(const CodeOrigin& codeOrigin)
516 {
517 return machineArgumentsRegisterFor(codeOrigin.inlineCallFrame);
518 }
519
520 VirtualRegister uncheckedArgumentsRegisterFor(InlineCallFrame* inlineCallFrame)
521 {
522 if (!inlineCallFrame)
523 return m_profiledBlock->uncheckedArgumentsRegister();
524
525 CodeBlock* codeBlock = baselineCodeBlockForInlineCallFrame(inlineCallFrame);
526 if (!codeBlock->usesArguments())
527 return VirtualRegister();
528
529 return VirtualRegister(codeBlock->argumentsRegister().offset() +
530 inlineCallFrame->stackOffset);
531 }
532
533 VirtualRegister uncheckedArgumentsRegisterFor(const CodeOrigin& codeOrigin)
534 {
535 return uncheckedArgumentsRegisterFor(codeOrigin.inlineCallFrame);
536 }
537
538 VirtualRegister activationRegister()
539 {
540 return m_profiledBlock->activationRegister();
541 }
542
543 VirtualRegister uncheckedActivationRegister()
544 {
545 return m_profiledBlock->uncheckedActivationRegister();
546 }
547
548 VirtualRegister machineActivationRegister()
549 {
550 return m_profiledBlock->activationRegister();
551 }
552
553 VirtualRegister uncheckedMachineActivationRegister()
554 {
555 return m_profiledBlock->uncheckedActivationRegister();
556 }
557
558 ValueProfile* valueProfileFor(Node* node)
559 {
560 if (!node)
561 return 0;
562
563 CodeBlock* profiledBlock = baselineCodeBlockFor(node->origin.semantic);
564
565 if (node->op() == GetArgument)
566 return profiledBlock->valueProfileForArgument(node->local().toArgument());
567
568 if (node->hasLocal(*this)) {
569 if (m_form == SSA)
570 return 0;
571 if (!node->local().isArgument())
572 return 0;
573 int argument = node->local().toArgument();
574 if (node->variableAccessData() != m_arguments[argument]->variableAccessData())
575 return 0;
576 return profiledBlock->valueProfileForArgument(argument);
577 }
578
579 if (node->hasHeapPrediction())
580 return profiledBlock->valueProfileForBytecodeOffset(node->origin.semantic.bytecodeIndex);
581
582 return 0;
583 }
584
585 MethodOfGettingAValueProfile methodOfGettingAValueProfileFor(Node* node)
586 {
587 if (!node)
588 return MethodOfGettingAValueProfile();
589
590 CodeBlock* profiledBlock = baselineCodeBlockFor(node->origin.semantic);
591
592 if (node->op() == GetLocal) {
593 return MethodOfGettingAValueProfile::fromLazyOperand(
594 profiledBlock,
595 LazyOperandValueProfileKey(
596 node->origin.semantic.bytecodeIndex, node->local()));
597 }
598
599 return MethodOfGettingAValueProfile(valueProfileFor(node));
600 }
601
602 bool usesArguments() const
603 {
604 return m_codeBlock->usesArguments();
605 }
606
607 BlockIndex numBlocks() const { return m_blocks.size(); }
608 BasicBlock* block(BlockIndex blockIndex) const { return m_blocks[blockIndex].get(); }
609 BasicBlock* lastBlock() const { return block(numBlocks() - 1); }
610
611 void appendBlock(PassRefPtr<BasicBlock> basicBlock)
612 {
613 basicBlock->index = m_blocks.size();
614 m_blocks.append(basicBlock);
615 }
616
617 void killBlock(BlockIndex blockIndex)
618 {
619 m_blocks[blockIndex].clear();
620 }
621
622 void killBlock(BasicBlock* basicBlock)
623 {
624 killBlock(basicBlock->index);
625 }
626
627 void killBlockAndItsContents(BasicBlock*);
628
629 void killUnreachableBlocks();
630
631 bool isPredictedNumerical(Node* node)
632 {
633 return isNumerical(node->child1().useKind()) && isNumerical(node->child2().useKind());
634 }
635
636 // Note that a 'true' return does not actually mean that the ByVal access clobbers nothing.
637 // It really means that it will not clobber the entire world. It's still up to you to
638 // carefully consider things like:
639 // - PutByVal definitely changes the array it stores to, and may even change its length.
640 // - PutByOffset definitely changes the object it stores to.
641 // - and so on.
642 bool byValIsPure(Node* node)
643 {
644 switch (node->arrayMode().type()) {
645 case Array::Generic:
646 return false;
647 case Array::Int32:
648 case Array::Double:
649 case Array::Contiguous:
650 case Array::ArrayStorage:
651 return !node->arrayMode().isOutOfBounds();
652 case Array::SlowPutArrayStorage:
653 return !node->arrayMode().mayStoreToHole();
654 case Array::String:
655 return node->op() == GetByVal && node->arrayMode().isInBounds();
656 #if USE(JSVALUE32_64)
657 case Array::Arguments:
658 if (node->op() == GetByVal)
659 return true;
660 return false;
661 #endif // USE(JSVALUE32_64)
662 default:
663 return true;
664 }
665 }
666
667 bool clobbersWorld(Node* node)
668 {
669 if (node->flags() & NodeClobbersWorld)
670 return true;
671 if (!(node->flags() & NodeMightClobber))
672 return false;
673 switch (node->op()) {
674 case GetByVal:
675 case PutByValDirect:
676 case PutByVal:
677 case PutByValAlias:
678 return !byValIsPure(node);
679 case ToString:
680 switch (node->child1().useKind()) {
681 case StringObjectUse:
682 case StringOrStringObjectUse:
683 return false;
684 case CellUse:
685 case UntypedUse:
686 return true;
687 default:
688 RELEASE_ASSERT_NOT_REACHED();
689 return true;
690 }
691 default:
692 RELEASE_ASSERT_NOT_REACHED();
693 return true; // If by some oddity we hit this case in release build it's safer to have CSE assume the worst.
694 }
695 }
696
697 void determineReachability();
698 void resetReachability();
699
700 void resetExitStates();
701
702 unsigned varArgNumChildren(Node* node)
703 {
704 ASSERT(node->flags() & NodeHasVarArgs);
705 return node->numChildren();
706 }
707
708 unsigned numChildren(Node* node)
709 {
710 if (node->flags() & NodeHasVarArgs)
711 return varArgNumChildren(node);
712 return AdjacencyList::Size;
713 }
714
715 Edge& varArgChild(Node* node, unsigned index)
716 {
717 ASSERT(node->flags() & NodeHasVarArgs);
718 return m_varArgChildren[node->firstChild() + index];
719 }
720
721 Edge& child(Node* node, unsigned index)
722 {
723 if (node->flags() & NodeHasVarArgs)
724 return varArgChild(node, index);
725 return node->children.child(index);
726 }
727
728 void voteNode(Node* node, unsigned ballot, float weight = 1)
729 {
730 switch (node->op()) {
731 case ValueToInt32:
732 case UInt32ToNumber:
733 node = node->child1().node();
734 break;
735 default:
736 break;
737 }
738
739 if (node->op() == GetLocal)
740 node->variableAccessData()->vote(ballot, weight);
741 }
742
743 void voteNode(Edge edge, unsigned ballot, float weight = 1)
744 {
745 voteNode(edge.node(), ballot, weight);
746 }
747
748 void voteChildren(Node* node, unsigned ballot, float weight = 1)
749 {
750 if (node->flags() & NodeHasVarArgs) {
751 for (unsigned childIdx = node->firstChild();
752 childIdx < node->firstChild() + node->numChildren();
753 childIdx++) {
754 if (!!m_varArgChildren[childIdx])
755 voteNode(m_varArgChildren[childIdx], ballot, weight);
756 }
757 return;
758 }
759
760 if (!node->child1())
761 return;
762 voteNode(node->child1(), ballot, weight);
763 if (!node->child2())
764 return;
765 voteNode(node->child2(), ballot, weight);
766 if (!node->child3())
767 return;
768 voteNode(node->child3(), ballot, weight);
769 }
770
771 template<typename T> // T = Node* or Edge
772 void substitute(BasicBlock& block, unsigned startIndexInBlock, T oldThing, T newThing)
773 {
774 for (unsigned indexInBlock = startIndexInBlock; indexInBlock < block.size(); ++indexInBlock) {
775 Node* node = block[indexInBlock];
776 if (node->flags() & NodeHasVarArgs) {
777 for (unsigned childIdx = node->firstChild(); childIdx < node->firstChild() + node->numChildren(); ++childIdx) {
778 if (!!m_varArgChildren[childIdx])
779 compareAndSwap(m_varArgChildren[childIdx], oldThing, newThing);
780 }
781 continue;
782 }
783 if (!node->child1())
784 continue;
785 compareAndSwap(node->children.child1(), oldThing, newThing);
786 if (!node->child2())
787 continue;
788 compareAndSwap(node->children.child2(), oldThing, newThing);
789 if (!node->child3())
790 continue;
791 compareAndSwap(node->children.child3(), oldThing, newThing);
792 }
793 }
794
795 // Use this if you introduce a new GetLocal and you know that you introduced it *before*
796 // any GetLocals in the basic block.
797 // FIXME: it may be appropriate, in the future, to generalize this to handle GetLocals
798 // introduced anywhere in the basic block.
799 void substituteGetLocal(BasicBlock& block, unsigned startIndexInBlock, VariableAccessData* variableAccessData, Node* newGetLocal);
800
801 void invalidateCFG();
802
803 void clearFlagsOnAllNodes(NodeFlags);
804
805 void clearReplacements();
806 void initializeNodeOwners();
807
808 void getBlocksInDepthFirstOrder(Vector<BasicBlock*>& result);
809
810 Profiler::Compilation* compilation() { return m_plan.compilation.get(); }
811
812 DesiredIdentifiers& identifiers() { return m_plan.identifiers; }
813 DesiredWatchpoints& watchpoints() { return m_plan.watchpoints; }
814 DesiredStructureChains& chains() { return m_plan.chains; }
815
816 FullBytecodeLiveness& livenessFor(CodeBlock*);
817 FullBytecodeLiveness& livenessFor(InlineCallFrame*);
818 bool isLiveInBytecode(VirtualRegister, CodeOrigin);
819
820 unsigned frameRegisterCount();
821 unsigned stackPointerOffset();
822 unsigned requiredRegisterCountForExit();
823 unsigned requiredRegisterCountForExecutionAndExit();
824
825 JSActivation* tryGetActivation(Node*);
826 WriteBarrierBase<Unknown>* tryGetRegisters(Node*);
827
828 JSArrayBufferView* tryGetFoldableView(Node*);
829 JSArrayBufferView* tryGetFoldableView(Node*, ArrayMode);
830 JSArrayBufferView* tryGetFoldableViewForChild1(Node*);
831
832 virtual void visitChildren(SlotVisitor&) override;
833
834 VM& m_vm;
835 Plan& m_plan;
836 CodeBlock* m_codeBlock;
837 CodeBlock* m_profiledBlock;
838
839 NodeAllocator& m_allocator;
840
841 Operands<AbstractValue> m_mustHandleAbstractValues;
842
843 Vector< RefPtr<BasicBlock> , 8> m_blocks;
844 Vector<Edge, 16> m_varArgChildren;
845 Vector<StorageAccessData> m_storageAccessData;
846 Vector<Node*, 8> m_arguments;
847 SegmentedVector<VariableAccessData, 16> m_variableAccessData;
848 SegmentedVector<ArgumentPosition, 8> m_argumentPositions;
849 SegmentedVector<StructureSet, 16> m_structureSet;
850 SegmentedVector<StructureTransitionData, 8> m_structureTransitionData;
851 SegmentedVector<NewArrayBufferData, 4> m_newArrayBufferData;
852 Bag<BranchData> m_branchData;
853 Bag<SwitchData> m_switchData;
854 Bag<MultiGetByOffsetData> m_multiGetByOffsetData;
855 Bag<MultiPutByOffsetData> m_multiPutByOffsetData;
856 Vector<InlineVariableData, 4> m_inlineVariableData;
857 HashMap<CodeBlock*, std::unique_ptr<FullBytecodeLiveness>> m_bytecodeLiveness;
858 bool m_hasArguments;
859 HashSet<ExecutableBase*> m_executablesWhoseArgumentsEscaped;
860 BitVector m_lazyVars;
861 Dominators m_dominators;
862 NaturalLoops m_naturalLoops;
863 unsigned m_localVars;
864 unsigned m_nextMachineLocal;
865 unsigned m_parameterSlots;
866 int m_machineCaptureStart;
867 std::unique_ptr<SlowArgument[]> m_slowArguments;
868
869 #if USE(JSVALUE32_64)
870 std::unordered_map<int64_t, double*> m_doubleConstantsMap;
871 std::unique_ptr<Bag<double>> m_doubleConstants;
872 #endif
873
874 OptimizationFixpointState m_fixpointState;
875 GraphForm m_form;
876 UnificationState m_unificationState;
877 RefCountState m_refCountState;
878 private:
879
880 void handleSuccessor(Vector<BasicBlock*, 16>& worklist, BasicBlock*, BasicBlock* successor);
881 void addForDepthFirstSort(Vector<BasicBlock*>& result, Vector<BasicBlock*, 16>& worklist, HashSet<BasicBlock*>& seen, BasicBlock*);
882
883 AddSpeculationMode addImmediateShouldSpeculateInt32(Node* add, bool variableShouldSpeculateInt32, Node* immediate, RareCaseProfilingSource source)
884 {
885 ASSERT(immediate->hasConstant());
886
887 JSValue immediateValue = immediate->valueOfJSConstant(m_codeBlock);
888 if (!immediateValue.isNumber() && !immediateValue.isBoolean())
889 return DontSpeculateInt32;
890
891 if (!variableShouldSpeculateInt32)
892 return DontSpeculateInt32;
893
894 if (immediateValue.isInt32() || immediateValue.isBoolean())
895 return add->canSpeculateInt32(source) ? SpeculateInt32 : DontSpeculateInt32;
896
897 double doubleImmediate = immediateValue.asDouble();
898 const double twoToThe48 = 281474976710656.0;
899 if (doubleImmediate < -twoToThe48 || doubleImmediate > twoToThe48)
900 return DontSpeculateInt32;
901
902 return bytecodeCanTruncateInteger(add->arithNodeFlags()) ? SpeculateInt32AndTruncateConstants : DontSpeculateInt32;
903 }
904 };
905
906 #define DFG_NODE_DO_TO_CHILDREN(graph, node, thingToDo) do { \
907 Node* _node = (node); \
908 if (_node->flags() & NodeHasVarArgs) { \
909 for (unsigned _childIdx = _node->firstChild(); \
910 _childIdx < _node->firstChild() + _node->numChildren(); \
911 _childIdx++) { \
912 if (!!(graph).m_varArgChildren[_childIdx]) \
913 thingToDo(_node, (graph).m_varArgChildren[_childIdx]); \
914 } \
915 } else { \
916 if (!_node->child1()) { \
917 ASSERT( \
918 !_node->child2() \
919 && !_node->child3()); \
920 break; \
921 } \
922 thingToDo(_node, _node->child1()); \
923 \
924 if (!_node->child2()) { \
925 ASSERT(!_node->child3()); \
926 break; \
927 } \
928 thingToDo(_node, _node->child2()); \
929 \
930 if (!_node->child3()) \
931 break; \
932 thingToDo(_node, _node->child3()); \
933 } \
934 } while (false)
935
936 } } // namespace JSC::DFG
937
938 #endif
939 #endif