]> git.saurik.com Git - apple/javascriptcore.git/blob - dfg/DFGGraph.h
JavaScriptCore-7601.1.46.3.tar.gz
[apple/javascriptcore.git] / dfg / DFGGraph.h
1 /*
2 * Copyright (C) 2011-2015 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26 #ifndef DFGGraph_h
27 #define DFGGraph_h
28
29 #if ENABLE(DFG_JIT)
30
31 #include "AssemblyHelpers.h"
32 #include "BytecodeLivenessAnalysisInlines.h"
33 #include "CodeBlock.h"
34 #include "DFGArgumentPosition.h"
35 #include "DFGBasicBlock.h"
36 #include "DFGDominators.h"
37 #include "DFGFrozenValue.h"
38 #include "DFGLongLivedState.h"
39 #include "DFGNaturalLoops.h"
40 #include "DFGNode.h"
41 #include "DFGNodeAllocator.h"
42 #include "DFGPlan.h"
43 #include "DFGPrePostNumbering.h"
44 #include "DFGScannable.h"
45 #include "FullBytecodeLiveness.h"
46 #include "JSStack.h"
47 #include "MethodOfGettingAValueProfile.h"
48 #include <unordered_map>
49 #include <wtf/BitVector.h>
50 #include <wtf/HashMap.h>
51 #include <wtf/Vector.h>
52 #include <wtf/StdLibExtras.h>
53
54 namespace JSC {
55
56 class CodeBlock;
57 class ExecState;
58
59 namespace DFG {
60
61 #define DFG_NODE_DO_TO_CHILDREN(graph, node, thingToDo) do { \
62 Node* _node = (node); \
63 if (_node->flags() & NodeHasVarArgs) { \
64 for (unsigned _childIdx = _node->firstChild(); \
65 _childIdx < _node->firstChild() + _node->numChildren(); \
66 _childIdx++) { \
67 if (!!(graph).m_varArgChildren[_childIdx]) \
68 thingToDo(_node, (graph).m_varArgChildren[_childIdx]); \
69 } \
70 } else { \
71 if (!_node->child1()) { \
72 ASSERT( \
73 !_node->child2() \
74 && !_node->child3()); \
75 break; \
76 } \
77 thingToDo(_node, _node->child1()); \
78 \
79 if (!_node->child2()) { \
80 ASSERT(!_node->child3()); \
81 break; \
82 } \
83 thingToDo(_node, _node->child2()); \
84 \
85 if (!_node->child3()) \
86 break; \
87 thingToDo(_node, _node->child3()); \
88 } \
89 } while (false)
90
91 #define DFG_ASSERT(graph, node, assertion) do { \
92 if (!!(assertion)) \
93 break; \
94 (graph).handleAssertionFailure( \
95 (node), __FILE__, __LINE__, WTF_PRETTY_FUNCTION, #assertion); \
96 } while (false)
97
98 #define DFG_CRASH(graph, node, reason) do { \
99 (graph).handleAssertionFailure( \
100 (node), __FILE__, __LINE__, WTF_PRETTY_FUNCTION, (reason)); \
101 } while (false)
102
103 struct InlineVariableData {
104 InlineCallFrame* inlineCallFrame;
105 unsigned argumentPositionStart;
106 VariableAccessData* calleeVariable;
107 };
108
109 enum AddSpeculationMode {
110 DontSpeculateInt32,
111 SpeculateInt32AndTruncateConstants,
112 SpeculateInt32
113 };
114
115 //
116 // === Graph ===
117 //
118 // The order may be significant for nodes with side-effects (property accesses, value conversions).
119 // Nodes that are 'dead' remain in the vector with refCount 0.
120 class Graph : public virtual Scannable {
121 public:
122 Graph(VM&, Plan&, LongLivedState&);
123 ~Graph();
124
125 void changeChild(Edge& edge, Node* newNode)
126 {
127 edge.setNode(newNode);
128 }
129
130 void changeEdge(Edge& edge, Edge newEdge)
131 {
132 edge = newEdge;
133 }
134
135 void compareAndSwap(Edge& edge, Node* oldNode, Node* newNode)
136 {
137 if (edge.node() != oldNode)
138 return;
139 changeChild(edge, newNode);
140 }
141
142 void compareAndSwap(Edge& edge, Edge oldEdge, Edge newEdge)
143 {
144 if (edge != oldEdge)
145 return;
146 changeEdge(edge, newEdge);
147 }
148
149 void performSubstitution(Node* node)
150 {
151 if (node->flags() & NodeHasVarArgs) {
152 for (unsigned childIdx = node->firstChild(); childIdx < node->firstChild() + node->numChildren(); childIdx++)
153 performSubstitutionForEdge(m_varArgChildren[childIdx]);
154 } else {
155 performSubstitutionForEdge(node->child1());
156 performSubstitutionForEdge(node->child2());
157 performSubstitutionForEdge(node->child3());
158 }
159 }
160
161 void performSubstitutionForEdge(Edge& child)
162 {
163 // Check if this operand is actually unused.
164 if (!child)
165 return;
166
167 // Check if there is any replacement.
168 Node* replacement = child->replacement();
169 if (!replacement)
170 return;
171
172 child.setNode(replacement);
173
174 // There is definitely a replacement. Assert that the replacement does not
175 // have a replacement.
176 ASSERT(!child->replacement());
177 }
178
179 template<typename... Params>
180 Node* addNode(SpeculatedType type, Params... params)
181 {
182 Node* node = new (m_allocator) Node(params...);
183 node->predict(type);
184 return node;
185 }
186
187 void dethread();
188
189 FrozenValue* freeze(JSValue); // We use weak freezing by default.
190 FrozenValue* freezeStrong(JSValue); // Shorthand for freeze(value)->strengthenTo(StrongValue).
191
192 void convertToConstant(Node* node, FrozenValue* value);
193 void convertToConstant(Node* node, JSValue value);
194 void convertToStrongConstant(Node* node, JSValue value);
195
196 StructureRegistrationResult registerStructure(Structure* structure);
197 void assertIsRegistered(Structure* structure);
198
199 // CodeBlock is optional, but may allow additional information to be dumped (e.g. Identifier names).
200 void dump(PrintStream& = WTF::dataFile(), DumpContext* = 0);
201
202 bool terminalsAreValid();
203
204 enum PhiNodeDumpMode { DumpLivePhisOnly, DumpAllPhis };
205 void dumpBlockHeader(PrintStream&, const char* prefix, BasicBlock*, PhiNodeDumpMode, DumpContext*);
206 void dump(PrintStream&, Edge);
207 void dump(PrintStream&, const char* prefix, Node*, DumpContext* = 0);
208 static int amountOfNodeWhiteSpace(Node*);
209 static void printNodeWhiteSpace(PrintStream&, Node*);
210
211 // Dump the code origin of the given node as a diff from the code origin of the
212 // preceding node. Returns true if anything was printed.
213 bool dumpCodeOrigin(PrintStream&, const char* prefix, Node* previousNode, Node* currentNode, DumpContext*);
214
215 AddSpeculationMode addSpeculationMode(Node* add, bool leftShouldSpeculateInt32, bool rightShouldSpeculateInt32, PredictionPass pass)
216 {
217 ASSERT(add->op() == ValueAdd || add->op() == ArithAdd || add->op() == ArithSub);
218
219 RareCaseProfilingSource source = add->sourceFor(pass);
220
221 Node* left = add->child1().node();
222 Node* right = add->child2().node();
223
224 if (left->hasConstant())
225 return addImmediateShouldSpeculateInt32(add, rightShouldSpeculateInt32, right, left, source);
226 if (right->hasConstant())
227 return addImmediateShouldSpeculateInt32(add, leftShouldSpeculateInt32, left, right, source);
228
229 return (leftShouldSpeculateInt32 && rightShouldSpeculateInt32 && add->canSpeculateInt32(source)) ? SpeculateInt32 : DontSpeculateInt32;
230 }
231
232 AddSpeculationMode valueAddSpeculationMode(Node* add, PredictionPass pass)
233 {
234 return addSpeculationMode(
235 add,
236 add->child1()->shouldSpeculateInt32OrBooleanExpectingDefined(),
237 add->child2()->shouldSpeculateInt32OrBooleanExpectingDefined(),
238 pass);
239 }
240
241 AddSpeculationMode arithAddSpeculationMode(Node* add, PredictionPass pass)
242 {
243 return addSpeculationMode(
244 add,
245 add->child1()->shouldSpeculateInt32OrBooleanForArithmetic(),
246 add->child2()->shouldSpeculateInt32OrBooleanForArithmetic(),
247 pass);
248 }
249
250 AddSpeculationMode addSpeculationMode(Node* add, PredictionPass pass)
251 {
252 if (add->op() == ValueAdd)
253 return valueAddSpeculationMode(add, pass);
254
255 return arithAddSpeculationMode(add, pass);
256 }
257
258 bool addShouldSpeculateInt32(Node* add, PredictionPass pass)
259 {
260 return addSpeculationMode(add, pass) != DontSpeculateInt32;
261 }
262
263 bool addShouldSpeculateMachineInt(Node* add)
264 {
265 if (!enableInt52())
266 return false;
267
268 Node* left = add->child1().node();
269 Node* right = add->child2().node();
270
271 bool speculation = Node::shouldSpeculateMachineInt(left, right);
272 return speculation && !hasExitSite(add, Int52Overflow);
273 }
274
275 bool mulShouldSpeculateInt32(Node* mul, PredictionPass pass)
276 {
277 ASSERT(mul->op() == ArithMul);
278
279 Node* left = mul->child1().node();
280 Node* right = mul->child2().node();
281
282 return Node::shouldSpeculateInt32OrBooleanForArithmetic(left, right)
283 && mul->canSpeculateInt32(mul->sourceFor(pass));
284 }
285
286 bool mulShouldSpeculateMachineInt(Node* mul, PredictionPass pass)
287 {
288 ASSERT(mul->op() == ArithMul);
289
290 if (!enableInt52())
291 return false;
292
293 Node* left = mul->child1().node();
294 Node* right = mul->child2().node();
295
296 return Node::shouldSpeculateMachineInt(left, right)
297 && mul->canSpeculateInt52(pass)
298 && !hasExitSite(mul, Int52Overflow);
299 }
300
301 bool negateShouldSpeculateInt32(Node* negate, PredictionPass pass)
302 {
303 ASSERT(negate->op() == ArithNegate);
304 return negate->child1()->shouldSpeculateInt32OrBooleanForArithmetic()
305 && negate->canSpeculateInt32(pass);
306 }
307
308 bool negateShouldSpeculateMachineInt(Node* negate, PredictionPass pass)
309 {
310 ASSERT(negate->op() == ArithNegate);
311 if (!enableInt52())
312 return false;
313 return negate->child1()->shouldSpeculateMachineInt()
314 && !hasExitSite(negate, Int52Overflow)
315 && negate->canSpeculateInt52(pass);
316 }
317
318 bool roundShouldSpeculateInt32(Node* arithRound, PredictionPass pass)
319 {
320 ASSERT(arithRound->op() == ArithRound);
321 return arithRound->canSpeculateInt32(pass) && !hasExitSite(arithRound->origin.semantic, Overflow) && !hasExitSite(arithRound->origin.semantic, NegativeZero);
322 }
323
324 static const char *opName(NodeType);
325
326 StructureSet* addStructureSet(const StructureSet& structureSet)
327 {
328 ASSERT(structureSet.size());
329 m_structureSet.append(structureSet);
330 return &m_structureSet.last();
331 }
332
333 JSGlobalObject* globalObjectFor(CodeOrigin codeOrigin)
334 {
335 return m_codeBlock->globalObjectFor(codeOrigin);
336 }
337
338 JSObject* globalThisObjectFor(CodeOrigin codeOrigin)
339 {
340 JSGlobalObject* object = globalObjectFor(codeOrigin);
341 return jsCast<JSObject*>(object->methodTable()->toThis(object, object->globalExec(), NotStrictMode));
342 }
343
344 ScriptExecutable* executableFor(InlineCallFrame* inlineCallFrame)
345 {
346 if (!inlineCallFrame)
347 return m_codeBlock->ownerExecutable();
348
349 return inlineCallFrame->executable.get();
350 }
351
352 ScriptExecutable* executableFor(const CodeOrigin& codeOrigin)
353 {
354 return executableFor(codeOrigin.inlineCallFrame);
355 }
356
357 CodeBlock* baselineCodeBlockFor(InlineCallFrame* inlineCallFrame)
358 {
359 if (!inlineCallFrame)
360 return m_profiledBlock;
361 return baselineCodeBlockForInlineCallFrame(inlineCallFrame);
362 }
363
364 CodeBlock* baselineCodeBlockFor(const CodeOrigin& codeOrigin)
365 {
366 return baselineCodeBlockForOriginAndBaselineCodeBlock(codeOrigin, m_profiledBlock);
367 }
368
369 SymbolTable* symbolTableFor(InlineCallFrame* inlineCallFrame)
370 {
371 return baselineCodeBlockFor(inlineCallFrame)->symbolTable();
372 }
373
374 SymbolTable* symbolTableFor(const CodeOrigin& codeOrigin)
375 {
376 return symbolTableFor(codeOrigin.inlineCallFrame);
377 }
378
379 bool isStrictModeFor(CodeOrigin codeOrigin)
380 {
381 if (!codeOrigin.inlineCallFrame)
382 return m_codeBlock->isStrictMode();
383 return jsCast<FunctionExecutable*>(codeOrigin.inlineCallFrame->executable.get())->isStrictMode();
384 }
385
386 ECMAMode ecmaModeFor(CodeOrigin codeOrigin)
387 {
388 return isStrictModeFor(codeOrigin) ? StrictMode : NotStrictMode;
389 }
390
391 bool masqueradesAsUndefinedWatchpointIsStillValid(const CodeOrigin& codeOrigin)
392 {
393 return globalObjectFor(codeOrigin)->masqueradesAsUndefinedWatchpoint()->isStillValid();
394 }
395
396 bool hasGlobalExitSite(const CodeOrigin& codeOrigin, ExitKind exitKind)
397 {
398 return baselineCodeBlockFor(codeOrigin)->hasExitSite(FrequentExitSite(exitKind));
399 }
400
401 bool hasExitSite(const CodeOrigin& codeOrigin, ExitKind exitKind)
402 {
403 return baselineCodeBlockFor(codeOrigin)->hasExitSite(FrequentExitSite(codeOrigin.bytecodeIndex, exitKind));
404 }
405
406 bool hasExitSite(Node* node, ExitKind exitKind)
407 {
408 return hasExitSite(node->origin.semantic, exitKind);
409 }
410
411 VirtualRegister activationRegister()
412 {
413 return m_profiledBlock->activationRegister();
414 }
415
416 VirtualRegister uncheckedActivationRegister()
417 {
418 return m_profiledBlock->uncheckedActivationRegister();
419 }
420
421 VirtualRegister machineActivationRegister()
422 {
423 return m_profiledBlock->activationRegister();
424 }
425
426 VirtualRegister uncheckedMachineActivationRegister()
427 {
428 return m_profiledBlock->uncheckedActivationRegister();
429 }
430
431 ValueProfile* valueProfileFor(Node*);
432 MethodOfGettingAValueProfile methodOfGettingAValueProfileFor(Node*);
433
434 BlockIndex numBlocks() const { return m_blocks.size(); }
435 BasicBlock* block(BlockIndex blockIndex) const { return m_blocks[blockIndex].get(); }
436 BasicBlock* lastBlock() const { return block(numBlocks() - 1); }
437
438 void appendBlock(PassRefPtr<BasicBlock> basicBlock)
439 {
440 basicBlock->index = m_blocks.size();
441 m_blocks.append(basicBlock);
442 }
443
444 void killBlock(BlockIndex blockIndex)
445 {
446 m_blocks[blockIndex] = nullptr;
447 }
448
449 void killBlock(BasicBlock* basicBlock)
450 {
451 killBlock(basicBlock->index);
452 }
453
454 void killBlockAndItsContents(BasicBlock*);
455
456 void killUnreachableBlocks();
457
458 void determineReachability();
459 void resetReachability();
460
461 void computeRefCounts();
462
463 unsigned varArgNumChildren(Node* node)
464 {
465 ASSERT(node->flags() & NodeHasVarArgs);
466 return node->numChildren();
467 }
468
469 unsigned numChildren(Node* node)
470 {
471 if (node->flags() & NodeHasVarArgs)
472 return varArgNumChildren(node);
473 return AdjacencyList::Size;
474 }
475
476 Edge& varArgChild(Node* node, unsigned index)
477 {
478 ASSERT(node->flags() & NodeHasVarArgs);
479 return m_varArgChildren[node->firstChild() + index];
480 }
481
482 Edge& child(Node* node, unsigned index)
483 {
484 if (node->flags() & NodeHasVarArgs)
485 return varArgChild(node, index);
486 return node->children.child(index);
487 }
488
489 void voteNode(Node* node, unsigned ballot, float weight = 1)
490 {
491 switch (node->op()) {
492 case ValueToInt32:
493 case UInt32ToNumber:
494 node = node->child1().node();
495 break;
496 default:
497 break;
498 }
499
500 if (node->op() == GetLocal)
501 node->variableAccessData()->vote(ballot, weight);
502 }
503
504 void voteNode(Edge edge, unsigned ballot, float weight = 1)
505 {
506 voteNode(edge.node(), ballot, weight);
507 }
508
509 void voteChildren(Node* node, unsigned ballot, float weight = 1)
510 {
511 if (node->flags() & NodeHasVarArgs) {
512 for (unsigned childIdx = node->firstChild();
513 childIdx < node->firstChild() + node->numChildren();
514 childIdx++) {
515 if (!!m_varArgChildren[childIdx])
516 voteNode(m_varArgChildren[childIdx], ballot, weight);
517 }
518 return;
519 }
520
521 if (!node->child1())
522 return;
523 voteNode(node->child1(), ballot, weight);
524 if (!node->child2())
525 return;
526 voteNode(node->child2(), ballot, weight);
527 if (!node->child3())
528 return;
529 voteNode(node->child3(), ballot, weight);
530 }
531
532 template<typename T> // T = Node* or Edge
533 void substitute(BasicBlock& block, unsigned startIndexInBlock, T oldThing, T newThing)
534 {
535 for (unsigned indexInBlock = startIndexInBlock; indexInBlock < block.size(); ++indexInBlock) {
536 Node* node = block[indexInBlock];
537 if (node->flags() & NodeHasVarArgs) {
538 for (unsigned childIdx = node->firstChild(); childIdx < node->firstChild() + node->numChildren(); ++childIdx) {
539 if (!!m_varArgChildren[childIdx])
540 compareAndSwap(m_varArgChildren[childIdx], oldThing, newThing);
541 }
542 continue;
543 }
544 if (!node->child1())
545 continue;
546 compareAndSwap(node->children.child1(), oldThing, newThing);
547 if (!node->child2())
548 continue;
549 compareAndSwap(node->children.child2(), oldThing, newThing);
550 if (!node->child3())
551 continue;
552 compareAndSwap(node->children.child3(), oldThing, newThing);
553 }
554 }
555
556 // Use this if you introduce a new GetLocal and you know that you introduced it *before*
557 // any GetLocals in the basic block.
558 // FIXME: it may be appropriate, in the future, to generalize this to handle GetLocals
559 // introduced anywhere in the basic block.
560 void substituteGetLocal(BasicBlock& block, unsigned startIndexInBlock, VariableAccessData* variableAccessData, Node* newGetLocal);
561
562 void invalidateCFG();
563
564 void clearFlagsOnAllNodes(NodeFlags);
565
566 void clearReplacements();
567 void clearEpochs();
568 void initializeNodeOwners();
569
570 BlockList blocksInPreOrder();
571 BlockList blocksInPostOrder();
572
573 class NaturalBlockIterable {
574 public:
575 NaturalBlockIterable()
576 : m_graph(nullptr)
577 {
578 }
579
580 NaturalBlockIterable(Graph& graph)
581 : m_graph(&graph)
582 {
583 }
584
585 class iterator {
586 public:
587 iterator()
588 : m_graph(nullptr)
589 , m_index(0)
590 {
591 }
592
593 iterator(Graph& graph, BlockIndex index)
594 : m_graph(&graph)
595 , m_index(findNext(index))
596 {
597 }
598
599 BasicBlock *operator*()
600 {
601 return m_graph->block(m_index);
602 }
603
604 iterator& operator++()
605 {
606 m_index = findNext(m_index + 1);
607 return *this;
608 }
609
610 bool operator==(const iterator& other) const
611 {
612 return m_index == other.m_index;
613 }
614
615 bool operator!=(const iterator& other) const
616 {
617 return !(*this == other);
618 }
619
620 private:
621 BlockIndex findNext(BlockIndex index)
622 {
623 while (index < m_graph->numBlocks() && !m_graph->block(index))
624 index++;
625 return index;
626 }
627
628 Graph* m_graph;
629 BlockIndex m_index;
630 };
631
632 iterator begin()
633 {
634 return iterator(*m_graph, 0);
635 }
636
637 iterator end()
638 {
639 return iterator(*m_graph, m_graph->numBlocks());
640 }
641
642 private:
643 Graph* m_graph;
644 };
645
646 NaturalBlockIterable blocksInNaturalOrder()
647 {
648 return NaturalBlockIterable(*this);
649 }
650
651 template<typename ChildFunctor>
652 void doToChildrenWithNode(Node* node, const ChildFunctor& functor)
653 {
654 DFG_NODE_DO_TO_CHILDREN(*this, node, functor);
655 }
656
657 template<typename ChildFunctor>
658 void doToChildren(Node* node, const ChildFunctor& functor)
659 {
660 doToChildrenWithNode(
661 node,
662 [&functor] (Node*, Edge& edge) {
663 functor(edge);
664 });
665 }
666
667 bool uses(Node* node, Node* child)
668 {
669 bool result = false;
670 doToChildren(node, [&] (Edge edge) { result |= edge == child; });
671 return result;
672 }
673
674 Profiler::Compilation* compilation() { return m_plan.compilation.get(); }
675
676 DesiredIdentifiers& identifiers() { return m_plan.identifiers; }
677 DesiredWatchpoints& watchpoints() { return m_plan.watchpoints; }
678
679 FullBytecodeLiveness& livenessFor(CodeBlock*);
680 FullBytecodeLiveness& livenessFor(InlineCallFrame*);
681
682 // Quickly query if a single local is live at the given point. This is faster than calling
683 // forAllLiveInBytecode() if you will only query one local. But, if you want to know all of the
684 // locals live, then calling this for each local is much slower than forAllLiveInBytecode().
685 bool isLiveInBytecode(VirtualRegister, CodeOrigin);
686
687 // Quickly get all of the non-argument locals live at the given point. This doesn't give you
688 // any arguments because those are all presumed live. You can call forAllLiveInBytecode() to
689 // also get the arguments. This is much faster than calling isLiveInBytecode() for each local.
690 template<typename Functor>
691 void forAllLocalsLiveInBytecode(CodeOrigin codeOrigin, const Functor& functor)
692 {
693 // Support for not redundantly reporting arguments. Necessary because in case of a varargs
694 // call, only the callee knows that arguments are live while in the case of a non-varargs
695 // call, both callee and caller will see the variables live.
696 VirtualRegister exclusionStart;
697 VirtualRegister exclusionEnd;
698
699 for (;;) {
700 InlineCallFrame* inlineCallFrame = codeOrigin.inlineCallFrame;
701 VirtualRegister stackOffset(inlineCallFrame ? inlineCallFrame->stackOffset : 0);
702
703 if (inlineCallFrame) {
704 if (inlineCallFrame->isClosureCall)
705 functor(stackOffset + JSStack::Callee);
706 if (inlineCallFrame->isVarargs())
707 functor(stackOffset + JSStack::ArgumentCount);
708 }
709
710 CodeBlock* codeBlock = baselineCodeBlockFor(inlineCallFrame);
711 FullBytecodeLiveness& fullLiveness = livenessFor(codeBlock);
712 const FastBitVector& liveness = fullLiveness.getLiveness(codeOrigin.bytecodeIndex);
713 for (unsigned relativeLocal = codeBlock->m_numCalleeRegisters; relativeLocal--;) {
714 VirtualRegister reg = stackOffset + virtualRegisterForLocal(relativeLocal);
715
716 // Don't report if our callee already reported.
717 if (reg >= exclusionStart && reg < exclusionEnd)
718 continue;
719
720 if (liveness.get(relativeLocal))
721 functor(reg);
722 }
723
724 if (!inlineCallFrame)
725 break;
726
727 // Arguments are always live. This would be redundant if it wasn't for our
728 // op_call_varargs inlining. See the comment above.
729 exclusionStart = stackOffset + CallFrame::argumentOffsetIncludingThis(0);
730 exclusionEnd = stackOffset + CallFrame::argumentOffsetIncludingThis(inlineCallFrame->arguments.size());
731
732 // We will always have a "this" argument and exclusionStart should be a smaller stack
733 // offset than exclusionEnd.
734 ASSERT(exclusionStart < exclusionEnd);
735
736 for (VirtualRegister reg = exclusionStart; reg < exclusionEnd; reg += 1)
737 functor(reg);
738
739 codeOrigin = inlineCallFrame->caller;
740 }
741 }
742
743 // Get a BitVector of all of the non-argument locals live right now. This is mostly useful if
744 // you want to compare two sets of live locals from two different CodeOrigins.
745 BitVector localsLiveInBytecode(CodeOrigin);
746
747 // Tells you all of the arguments and locals live at the given CodeOrigin. This is a small
748 // extension to forAllLocalsLiveInBytecode(), since all arguments are always presumed live.
749 template<typename Functor>
750 void forAllLiveInBytecode(CodeOrigin codeOrigin, const Functor& functor)
751 {
752 forAllLocalsLiveInBytecode(codeOrigin, functor);
753
754 // Report all arguments as being live.
755 for (unsigned argument = block(0)->variablesAtHead.numberOfArguments(); argument--;)
756 functor(virtualRegisterForArgument(argument));
757 }
758
759 BytecodeKills& killsFor(CodeBlock*);
760 BytecodeKills& killsFor(InlineCallFrame*);
761
762 unsigned frameRegisterCount();
763 unsigned stackPointerOffset();
764 unsigned requiredRegisterCountForExit();
765 unsigned requiredRegisterCountForExecutionAndExit();
766
767 JSValue tryGetConstantProperty(JSValue base, const StructureSet&, PropertyOffset);
768 JSValue tryGetConstantProperty(JSValue base, Structure*, PropertyOffset);
769 JSValue tryGetConstantProperty(JSValue base, const StructureAbstractValue&, PropertyOffset);
770 JSValue tryGetConstantProperty(const AbstractValue&, PropertyOffset);
771
772 JSValue tryGetConstantClosureVar(JSValue base, ScopeOffset);
773 JSValue tryGetConstantClosureVar(const AbstractValue&, ScopeOffset);
774 JSValue tryGetConstantClosureVar(Node*, ScopeOffset);
775
776 JSArrayBufferView* tryGetFoldableView(JSValue);
777 JSArrayBufferView* tryGetFoldableView(JSValue, ArrayMode arrayMode);
778
779 void registerFrozenValues();
780
781 virtual void visitChildren(SlotVisitor&) override;
782
783 NO_RETURN_DUE_TO_CRASH void handleAssertionFailure(
784 std::nullptr_t, const char* file, int line, const char* function,
785 const char* assertion);
786 NO_RETURN_DUE_TO_CRASH void handleAssertionFailure(
787 Node*, const char* file, int line, const char* function,
788 const char* assertion);
789 NO_RETURN_DUE_TO_CRASH void handleAssertionFailure(
790 BasicBlock*, const char* file, int line, const char* function,
791 const char* assertion);
792
793 bool hasDebuggerEnabled() const { return m_hasDebuggerEnabled; }
794
795 VM& m_vm;
796 Plan& m_plan;
797 CodeBlock* m_codeBlock;
798 CodeBlock* m_profiledBlock;
799
800 NodeAllocator& m_allocator;
801
802 Vector< RefPtr<BasicBlock> , 8> m_blocks;
803 Vector<Edge, 16> m_varArgChildren;
804
805 HashMap<EncodedJSValue, FrozenValue*, EncodedJSValueHash, EncodedJSValueHashTraits> m_frozenValueMap;
806 Bag<FrozenValue> m_frozenValues;
807
808 Vector<uint32_t> m_uint32ValuesInUse;
809
810 Bag<StorageAccessData> m_storageAccessData;
811
812 // In CPS, this is all of the SetArgument nodes for the arguments in the machine code block
813 // that survived DCE. All of them except maybe "this" will survive DCE, because of the Flush
814 // nodes.
815 //
816 // In SSA, this is all of the GetStack nodes for the arguments in the machine code block that
817 // may have some speculation in the prologue and survived DCE. Note that to get the speculation
818 // for an argument in SSA, you must use m_argumentFormats, since we still have to speculate
819 // even if the argument got killed. For example:
820 //
821 // function foo(x) {
822 // var tmp = x + 1;
823 // }
824 //
825 // Assume that x is always int during profiling. The ArithAdd for "x + 1" will be dead and will
826 // have a proven check for the edge to "x". So, we will not insert a Check node and we will
827 // kill the GetStack for "x". But, we must do the int check in the progolue, because that's the
828 // thing we used to allow DCE of ArithAdd. Otherwise the add could be impure:
829 //
830 // var o = {
831 // valueOf: function() { do side effects; }
832 // };
833 // foo(o);
834 //
835 // If we DCE the ArithAdd and we remove the int check on x, then this won't do the side
836 // effects.
837 Vector<Node*, 8> m_arguments;
838
839 // In CPS, this is meaningless. In SSA, this is the argument speculation that we've locked in.
840 Vector<FlushFormat> m_argumentFormats;
841
842 SegmentedVector<VariableAccessData, 16> m_variableAccessData;
843 SegmentedVector<ArgumentPosition, 8> m_argumentPositions;
844 SegmentedVector<StructureSet, 16> m_structureSet;
845 Bag<Transition> m_transitions;
846 SegmentedVector<NewArrayBufferData, 4> m_newArrayBufferData;
847 Bag<BranchData> m_branchData;
848 Bag<SwitchData> m_switchData;
849 Bag<MultiGetByOffsetData> m_multiGetByOffsetData;
850 Bag<MultiPutByOffsetData> m_multiPutByOffsetData;
851 Bag<ObjectMaterializationData> m_objectMaterializationData;
852 Bag<CallVarargsData> m_callVarargsData;
853 Bag<LoadVarargsData> m_loadVarargsData;
854 Bag<StackAccessData> m_stackAccessData;
855 Vector<InlineVariableData, 4> m_inlineVariableData;
856 HashMap<CodeBlock*, std::unique_ptr<FullBytecodeLiveness>> m_bytecodeLiveness;
857 HashMap<CodeBlock*, std::unique_ptr<BytecodeKills>> m_bytecodeKills;
858 Dominators m_dominators;
859 PrePostNumbering m_prePostNumbering;
860 NaturalLoops m_naturalLoops;
861 unsigned m_localVars;
862 unsigned m_nextMachineLocal;
863 unsigned m_parameterSlots;
864
865 #if USE(JSVALUE32_64)
866 std::unordered_map<int64_t, double*> m_doubleConstantsMap;
867 std::unique_ptr<Bag<double>> m_doubleConstants;
868 #endif
869
870 OptimizationFixpointState m_fixpointState;
871 StructureRegistrationState m_structureRegistrationState;
872 GraphForm m_form;
873 UnificationState m_unificationState;
874 PlanStage m_planStage { PlanStage::Initial };
875 RefCountState m_refCountState;
876 bool m_hasDebuggerEnabled;
877 private:
878
879 void handleSuccessor(Vector<BasicBlock*, 16>& worklist, BasicBlock*, BasicBlock* successor);
880
881 AddSpeculationMode addImmediateShouldSpeculateInt32(Node* add, bool variableShouldSpeculateInt32, Node* operand, Node*immediate, RareCaseProfilingSource source)
882 {
883 ASSERT(immediate->hasConstant());
884
885 JSValue immediateValue = immediate->asJSValue();
886 if (!immediateValue.isNumber() && !immediateValue.isBoolean())
887 return DontSpeculateInt32;
888
889 if (!variableShouldSpeculateInt32)
890 return DontSpeculateInt32;
891
892 // Integer constants can be typed Double if they are written like a double in the source code (e.g. 42.0).
893 // In that case, we stay conservative unless the other operand was explicitly typed as integer.
894 NodeFlags operandResultType = operand->result();
895 if (operandResultType != NodeResultInt32 && immediateValue.isDouble())
896 return DontSpeculateInt32;
897
898 if (immediateValue.isBoolean() || jsNumber(immediateValue.asNumber()).isInt32())
899 return add->canSpeculateInt32(source) ? SpeculateInt32 : DontSpeculateInt32;
900
901 double doubleImmediate = immediateValue.asDouble();
902 const double twoToThe48 = 281474976710656.0;
903 if (doubleImmediate < -twoToThe48 || doubleImmediate > twoToThe48)
904 return DontSpeculateInt32;
905
906 return bytecodeCanTruncateInteger(add->arithNodeFlags()) ? SpeculateInt32AndTruncateConstants : DontSpeculateInt32;
907 }
908 };
909
910 } } // namespace JSC::DFG
911
912 #endif
913 #endif