]> git.saurik.com Git - apple/javascriptcore.git/blob - dfg/DFGObjectAllocationSinkingPhase.cpp
JavaScriptCore-7601.1.46.3.tar.gz
[apple/javascriptcore.git] / dfg / DFGObjectAllocationSinkingPhase.cpp
1 /*
2 * Copyright (C) 2014, 2015 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26 #include "config.h"
27 #include "DFGObjectAllocationSinkingPhase.h"
28
29 #if ENABLE(DFG_JIT)
30
31 #include "DFGAbstractHeap.h"
32 #include "DFGBlockMapInlines.h"
33 #include "DFGClobberize.h"
34 #include "DFGCombinedLiveness.h"
35 #include "DFGGraph.h"
36 #include "DFGInsertOSRHintsForUpdate.h"
37 #include "DFGInsertionSet.h"
38 #include "DFGLivenessAnalysisPhase.h"
39 #include "DFGOSRAvailabilityAnalysisPhase.h"
40 #include "DFGPhase.h"
41 #include "DFGPromoteHeapAccess.h"
42 #include "DFGSSACalculator.h"
43 #include "DFGValidate.h"
44 #include "JSCInlines.h"
45
46 namespace JSC { namespace DFG {
47
48 static bool verbose = false;
49
50 class ObjectAllocationSinkingPhase : public Phase {
51 public:
52 ObjectAllocationSinkingPhase(Graph& graph)
53 : Phase(graph, "object allocation sinking")
54 , m_ssaCalculator(graph)
55 , m_insertionSet(graph)
56 {
57 }
58
59 bool run()
60 {
61 ASSERT(m_graph.m_fixpointState == FixpointNotConverged);
62
63 m_graph.m_dominators.computeIfNecessary(m_graph);
64
65 // Logically we wish to consider every NewObject and sink it. However it's probably not
66 // profitable to sink a NewObject that will always escape. So, first we do a very simple
67 // forward flow analysis that determines the set of NewObject nodes that have any chance
68 // of benefiting from object allocation sinking. Then we fixpoint the following rules:
69 //
70 // - For each NewObject, we turn the original NewObject into a PhantomNewObject and then
71 // we insert MaterializeNewObject just before those escaping sites that come before any
72 // other escaping sites - that is, there is no path between the allocation and those sites
73 // that would see any other escape. Note that Upsilons constitute escaping sites. Then we
74 // insert additional MaterializeNewObject nodes on Upsilons that feed into Phis that mix
75 // materializations and the original PhantomNewObject. We then turn each PutByOffset over a
76 // PhantomNewObject into a PutHint.
77 //
78 // - We perform the same optimization for MaterializeNewObject. This allows us to cover
79 // cases where we had MaterializeNewObject flowing into a PutHint.
80 //
81 // We could also add this rule:
82 //
83 // - If all of the Upsilons of a Phi have a MaterializeNewObject that isn't used by anyone
84 // else, then replace the Phi with the MaterializeNewObject.
85 //
86 // FIXME: Implement this. Note that this totally doable but it requires some gnarly
87 // code, and to be effective the pruner needs to be aware of it. Currently any Upsilon
88 // is considered to be an escape even by the pruner, so it's unlikely that we'll see
89 // many cases of Phi over Materializations.
90 // https://bugs.webkit.org/show_bug.cgi?id=136927
91
92 if (!performSinking())
93 return false;
94
95 while (performSinking()) { }
96
97 if (verbose) {
98 dataLog("Graph after sinking:\n");
99 m_graph.dump();
100 }
101
102 return true;
103 }
104
105 private:
106 bool performSinking()
107 {
108 m_graph.computeRefCounts();
109 performLivenessAnalysis(m_graph);
110 performOSRAvailabilityAnalysis(m_graph);
111 m_combinedLiveness = CombinedLiveness(m_graph);
112
113 CString graphBeforeSinking;
114 if (Options::verboseValidationFailure() && Options::validateGraphAtEachPhase()) {
115 StringPrintStream out;
116 m_graph.dump(out);
117 graphBeforeSinking = out.toCString();
118 }
119
120 if (verbose) {
121 dataLog("Graph before sinking:\n");
122 m_graph.dump();
123 }
124
125 determineMaterializationPoints();
126 if (m_sinkCandidates.isEmpty())
127 return false;
128
129 // At this point we are committed to sinking the sinking candidates.
130 placeMaterializationPoints();
131 lowerNonReadingOperationsOnPhantomAllocations();
132 promoteSunkenFields();
133
134 if (Options::validateGraphAtEachPhase())
135 validate(m_graph, DumpGraph, graphBeforeSinking);
136
137 if (verbose)
138 dataLog("Sinking iteration changed the graph.\n");
139 return true;
140 }
141
142 void determineMaterializationPoints()
143 {
144 // The premise of this pass is that if there exists a point in the program where some
145 // path from a phantom allocation site to that point causes materialization, then *all*
146 // paths cause materialization. This should mean that there are never any redundant
147 // materializations.
148
149 m_sinkCandidates.clear();
150 m_materializationToEscapee.clear();
151 m_materializationSiteToMaterializations.clear();
152
153 BlockMap<HashMap<Node*, bool>> materializedAtHead(m_graph);
154 BlockMap<HashMap<Node*, bool>> materializedAtTail(m_graph);
155
156 bool changed;
157 do {
158 if (verbose)
159 dataLog("Doing iteration of materialization point placement.\n");
160 changed = false;
161 for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
162 HashMap<Node*, bool> materialized = materializedAtHead[block];
163 if (verbose)
164 dataLog(" Looking at block ", pointerDump(block), "\n");
165 for (Node* node : *block) {
166 handleNode(
167 node,
168 [&] () {
169 materialized.add(node, false);
170 },
171 [&] (Node* escapee) {
172 auto iter = materialized.find(escapee);
173 if (iter != materialized.end()) {
174 if (verbose)
175 dataLog(" ", escapee, " escapes at ", node, "\n");
176 iter->value = true;
177 }
178 });
179 }
180
181 if (verbose)
182 dataLog(" Materialized at tail of ", pointerDump(block), ": ", mapDump(materialized), "\n");
183
184 if (materialized == materializedAtTail[block])
185 continue;
186
187 materializedAtTail[block] = materialized;
188 changed = true;
189
190 // Only propagate things to our successors if they are alive in all successors.
191 // So, we prune materialized-at-tail to only include things that are live.
192 Vector<Node*> toRemove;
193 for (auto pair : materialized) {
194 if (!m_combinedLiveness.liveAtTail[block].contains(pair.key))
195 toRemove.append(pair.key);
196 }
197 for (Node* key : toRemove)
198 materialized.remove(key);
199
200 for (BasicBlock* successorBlock : block->successors()) {
201 for (auto pair : materialized) {
202 materializedAtHead[successorBlock].add(
203 pair.key, false).iterator->value |= pair.value;
204 }
205 }
206 }
207 } while (changed);
208
209 // Determine the sink candidates. Broadly, a sink candidate is a node that handleNode()
210 // believes is sinkable, and one of the following is true:
211 //
212 // 1) There exists a basic block with only backward outgoing edges (or no outgoing edges)
213 // in which the node wasn't materialized. This is meant to catch effectively-infinite
214 // loops in which we don't need to have allocated the object.
215 //
216 // 2) There exists a basic block at the tail of which the node is not materialized and the
217 // node is dead.
218 //
219 // 3) The sum of execution counts of the materializations is less than the sum of
220 // execution counts of the original node.
221 //
222 // We currently implement only rule #2.
223 // FIXME: Implement the two other rules.
224 // https://bugs.webkit.org/show_bug.cgi?id=137073 (rule #1)
225 // https://bugs.webkit.org/show_bug.cgi?id=137074 (rule #3)
226
227 for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
228 for (auto pair : materializedAtTail[block]) {
229 if (pair.value)
230 continue; // It was materialized.
231
232 if (m_combinedLiveness.liveAtTail[block].contains(pair.key))
233 continue; // It might still get materialized in all of the successors.
234
235 // We know that it died in this block and it wasn't materialized. That means that
236 // if we sink this allocation, then *this* will be a path along which we never
237 // have to allocate. Profit!
238 m_sinkCandidates.add(pair.key);
239 }
240 }
241
242 if (m_sinkCandidates.isEmpty())
243 return;
244
245 // A materialization edge exists at any point where a node escapes but hasn't been
246 // materialized yet. We do this in two parts. First we find all of the nodes that cause
247 // escaping to happen, where the escapee had not yet been materialized. This catches
248 // everything but loops. We then catch loops - as well as weirder control flow constructs -
249 // in a subsequent pass that looks at places in the CFG where an edge exists from a block
250 // that hasn't materialized to a block that has. We insert the materialization along such an
251 // edge, and we rely on the fact that critical edges were already broken so that we actually
252 // either insert the materialization at the head of the successor or the tail of the
253 // predecessor.
254 //
255 // FIXME: This can create duplicate allocations when we really only needed to perform one.
256 // For example:
257 //
258 // var o = new Object();
259 // if (rare) {
260 // if (stuff)
261 // call(o); // o escapes here.
262 // return;
263 // }
264 // // o doesn't escape down here.
265 //
266 // In this example, we would place a materialization point at call(o) and then we would find
267 // ourselves having to insert another one at the implicit else case of that if statement
268 // ('cause we've broken critical edges). We would instead really like to just have one
269 // materialization point right at the top of the then case of "if (rare)". To do this, we can
270 // find the LCA of the various materializations in the dom tree.
271 // https://bugs.webkit.org/show_bug.cgi?id=137124
272
273 // First pass: find intra-block materialization points.
274 for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
275 HashSet<Node*> materialized;
276 for (auto pair : materializedAtHead[block]) {
277 if (pair.value && m_sinkCandidates.contains(pair.key))
278 materialized.add(pair.key);
279 }
280
281 if (verbose)
282 dataLog("Placing materialization points in ", pointerDump(block), " with materialization set ", listDump(materialized), "\n");
283
284 for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) {
285 Node* node = block->at(nodeIndex);
286
287 handleNode(
288 node,
289 [&] () { },
290 [&] (Node* escapee) {
291 if (verbose)
292 dataLog("Seeing ", escapee, " escape at ", node, "\n");
293
294 if (!m_sinkCandidates.contains(escapee)) {
295 if (verbose)
296 dataLog(" It's not a sink candidate.\n");
297 return;
298 }
299
300 if (!materialized.add(escapee).isNewEntry) {
301 if (verbose)
302 dataLog(" It's already materialized.\n");
303 return;
304 }
305
306 createMaterialize(escapee, node);
307 });
308 }
309 }
310
311 // Second pass: find CFG edge materialization points.
312 for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
313 for (BasicBlock* successorBlock : block->successors()) {
314 for (auto pair : materializedAtHead[successorBlock]) {
315 Node* allocation = pair.key;
316
317 // We only care if it is materialized in the successor.
318 if (!pair.value)
319 continue;
320
321 // We only care about sinking candidates.
322 if (!m_sinkCandidates.contains(allocation))
323 continue;
324
325 // We only care if it isn't materialized in the predecessor.
326 if (materializedAtTail[block].get(allocation))
327 continue;
328
329 // We need to decide if we put the materialization at the head of the successor,
330 // or the tail of the predecessor. It needs to be placed so that the allocation
331 // is never materialized before, and always materialized after.
332
333 // Is it never materialized in any of successor's predecessors? I like to think
334 // of "successors' predecessors" and "predecessor's successors" as the "shadow",
335 // because of what it looks like when you draw it.
336 bool neverMaterializedInShadow = true;
337 for (BasicBlock* shadowBlock : successorBlock->predecessors) {
338 if (materializedAtTail[shadowBlock].get(allocation)) {
339 neverMaterializedInShadow = false;
340 break;
341 }
342 }
343
344 if (neverMaterializedInShadow) {
345 createMaterialize(allocation, successorBlock->firstOriginNode());
346 continue;
347 }
348
349 // Because we had broken critical edges, it must be the case that the
350 // predecessor's successors all materialize the object. This is because the
351 // previous case is guaranteed to catch the case where the successor only has
352 // one predecessor. When critical edges are broken, this is also the only case
353 // where the predecessor could have had multiple successors. Therefore we have
354 // already handled the case where the predecessor has multiple successors.
355 DFG_ASSERT(m_graph, block, block->numSuccessors() == 1);
356
357 createMaterialize(allocation, block->terminal());
358 }
359 }
360 }
361 }
362
363 void placeMaterializationPoints()
364 {
365 m_ssaCalculator.reset();
366
367 // The "variables" are the object allocations that we are sinking. So, nodeToVariable maps
368 // sink candidates (aka escapees) to the SSACalculator's notion of Variable, and indexToNode
369 // maps in the opposite direction using the SSACalculator::Variable::index() as the key.
370 HashMap<Node*, SSACalculator::Variable*> nodeToVariable;
371 Vector<Node*> indexToNode;
372
373 for (Node* node : m_sinkCandidates) {
374 SSACalculator::Variable* variable = m_ssaCalculator.newVariable();
375 nodeToVariable.add(node, variable);
376 ASSERT(indexToNode.size() == variable->index());
377 indexToNode.append(node);
378 }
379
380 for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
381 for (Node* node : *block) {
382 if (SSACalculator::Variable* variable = nodeToVariable.get(node))
383 m_ssaCalculator.newDef(variable, block, node);
384
385 for (Node* materialize : m_materializationSiteToMaterializations.get(node)) {
386 m_ssaCalculator.newDef(
387 nodeToVariable.get(m_materializationToEscapee.get(materialize)),
388 block, materialize);
389 }
390 }
391 }
392
393 m_ssaCalculator.computePhis(
394 [&] (SSACalculator::Variable* variable, BasicBlock* block) -> Node* {
395 Node* allocation = indexToNode[variable->index()];
396 if (!m_combinedLiveness.liveAtHead[block].contains(allocation))
397 return nullptr;
398
399 Node* phiNode = m_graph.addNode(allocation->prediction(), Phi, NodeOrigin());
400 phiNode->mergeFlags(NodeResultJS);
401 return phiNode;
402 });
403
404 // Place Phis in the right places. Replace all uses of any allocation with the appropriate
405 // materialization. Create the appropriate Upsilon nodes.
406 LocalOSRAvailabilityCalculator availabilityCalculator;
407 for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
408 HashMap<Node*, Node*> mapping;
409
410 for (Node* candidate : m_combinedLiveness.liveAtHead[block]) {
411 SSACalculator::Variable* variable = nodeToVariable.get(candidate);
412 if (!variable)
413 continue;
414
415 SSACalculator::Def* def = m_ssaCalculator.reachingDefAtHead(block, variable);
416 if (!def)
417 continue;
418
419 mapping.set(indexToNode[variable->index()], def->value());
420 }
421
422 availabilityCalculator.beginBlock(block);
423 for (SSACalculator::Def* phiDef : m_ssaCalculator.phisForBlock(block)) {
424 m_insertionSet.insert(0, phiDef->value());
425
426 Node* originalNode = indexToNode[phiDef->variable()->index()];
427 insertOSRHintsForUpdate(
428 m_insertionSet, 0, NodeOrigin(), availabilityCalculator.m_availability,
429 originalNode, phiDef->value());
430
431 mapping.set(originalNode, phiDef->value());
432 }
433
434 for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) {
435 Node* node = block->at(nodeIndex);
436
437 for (Node* materialize : m_materializationSiteToMaterializations.get(node)) {
438 Node* escapee = m_materializationToEscapee.get(materialize);
439 m_insertionSet.insert(nodeIndex, materialize);
440 insertOSRHintsForUpdate(
441 m_insertionSet, nodeIndex, node->origin,
442 availabilityCalculator.m_availability, escapee, materialize);
443 mapping.set(escapee, materialize);
444 }
445
446 availabilityCalculator.executeNode(node);
447
448 m_graph.doToChildren(
449 node,
450 [&] (Edge& edge) {
451 if (Node* materialize = mapping.get(edge.node()))
452 edge.setNode(materialize);
453 });
454
455 // If you cause an escape, you shouldn't see the original escapee.
456 if (validationEnabled()) {
457 handleNode(
458 node,
459 [&] () { },
460 [&] (Node* escapee) {
461 DFG_ASSERT(m_graph, node, !m_sinkCandidates.contains(escapee));
462 });
463 }
464 }
465
466 NodeAndIndex terminal = block->findTerminal();
467 size_t upsilonInsertionPoint = terminal.index;
468 Node* upsilonWhere = terminal.node;
469 NodeOrigin upsilonOrigin = upsilonWhere->origin;
470 for (BasicBlock* successorBlock : block->successors()) {
471 for (SSACalculator::Def* phiDef : m_ssaCalculator.phisForBlock(successorBlock)) {
472 Node* phiNode = phiDef->value();
473 SSACalculator::Variable* variable = phiDef->variable();
474 Node* allocation = indexToNode[variable->index()];
475
476 Node* incoming = mapping.get(allocation);
477 DFG_ASSERT(m_graph, incoming, incoming != allocation);
478
479 m_insertionSet.insertNode(
480 upsilonInsertionPoint, SpecNone, Upsilon, upsilonOrigin,
481 OpInfo(phiNode), incoming->defaultEdge());
482 }
483 }
484
485 m_insertionSet.execute(block);
486 }
487
488 // At this point we have dummy materialization nodes along with edges to them. This means
489 // that the part of the control flow graph that prefers to see actual object allocations
490 // is completely fixed up, except for the materializations themselves.
491 }
492
493 void lowerNonReadingOperationsOnPhantomAllocations()
494 {
495 // Lower everything but reading operations on phantom allocations. We absolutely have to
496 // lower all writes so as to reveal them to the SSA calculator. We cannot lower reads
497 // because the whole point is that those go away completely.
498
499 for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
500 for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) {
501 Node* node = block->at(nodeIndex);
502 switch (node->op()) {
503 case PutByOffset: {
504 Node* target = node->child2().node();
505 if (m_sinkCandidates.contains(target)) {
506 ASSERT(target->isPhantomObjectAllocation());
507 node->convertToPutByOffsetHint();
508 }
509 break;
510 }
511
512 case PutClosureVar: {
513 Node* target = node->child1().node();
514 if (m_sinkCandidates.contains(target)) {
515 ASSERT(target->isPhantomActivationAllocation());
516 node->convertToPutClosureVarHint();
517 }
518 break;
519 }
520
521 case PutStructure: {
522 Node* target = node->child1().node();
523 if (m_sinkCandidates.contains(target)) {
524 ASSERT(target->isPhantomObjectAllocation());
525 Node* structure = m_insertionSet.insertConstant(
526 nodeIndex, node->origin, JSValue(node->transition()->next));
527 node->convertToPutStructureHint(structure);
528 }
529 break;
530 }
531
532 case NewObject: {
533 if (m_sinkCandidates.contains(node)) {
534 Node* structure = m_insertionSet.insertConstant(
535 nodeIndex + 1, node->origin, JSValue(node->structure()));
536 m_insertionSet.insert(
537 nodeIndex + 1,
538 PromotedHeapLocation(StructurePLoc, node).createHint(
539 m_graph, node->origin, structure));
540 node->convertToPhantomNewObject();
541 }
542 break;
543 }
544
545 case MaterializeNewObject: {
546 if (m_sinkCandidates.contains(node)) {
547 m_insertionSet.insert(
548 nodeIndex + 1,
549 PromotedHeapLocation(StructurePLoc, node).createHint(
550 m_graph, node->origin, m_graph.varArgChild(node, 0).node()));
551 for (unsigned i = 0; i < node->objectMaterializationData().m_properties.size(); ++i) {
552 unsigned identifierNumber =
553 node->objectMaterializationData().m_properties[i].m_identifierNumber;
554 m_insertionSet.insert(
555 nodeIndex + 1,
556 PromotedHeapLocation(
557 NamedPropertyPLoc, node, identifierNumber).createHint(
558 m_graph, node->origin,
559 m_graph.varArgChild(node, i + 1).node()));
560 }
561 node->convertToPhantomNewObject();
562 }
563 break;
564 }
565
566 case NewFunction: {
567 if (m_sinkCandidates.contains(node)) {
568 Node* executable = m_insertionSet.insertConstant(
569 nodeIndex + 1, node->origin, node->cellOperand());
570 m_insertionSet.insert(
571 nodeIndex + 1,
572 PromotedHeapLocation(FunctionExecutablePLoc, node).createHint(
573 m_graph, node->origin, executable));
574 m_insertionSet.insert(
575 nodeIndex + 1,
576 PromotedHeapLocation(FunctionActivationPLoc, node).createHint(
577 m_graph, node->origin, node->child1().node()));
578 node->convertToPhantomNewFunction();
579 }
580 break;
581 }
582
583 case CreateActivation: {
584 if (m_sinkCandidates.contains(node)) {
585 m_insertionSet.insert(
586 nodeIndex + 1,
587 PromotedHeapLocation(ActivationScopePLoc, node).createHint(
588 m_graph, node->origin, node->child1().node()));
589 Node* symbolTableNode = m_insertionSet.insertConstant(
590 nodeIndex + 1, node->origin, node->cellOperand());
591 m_insertionSet.insert(
592 nodeIndex + 1,
593 PromotedHeapLocation(ActivationSymbolTablePLoc, node).createHint(
594 m_graph, node->origin, symbolTableNode));
595
596 {
597 SymbolTable* symbolTable = node->castOperand<SymbolTable*>();
598 Node* undefined = m_insertionSet.insertConstant(
599 nodeIndex + 1, node->origin, jsUndefined());
600 ConcurrentJITLocker locker(symbolTable->m_lock);
601 for (auto iter = symbolTable->begin(locker), end = symbolTable->end(locker); iter != end; ++iter) {
602 m_insertionSet.insert(
603 nodeIndex + 1,
604 PromotedHeapLocation(
605 ClosureVarPLoc, node, iter->value.scopeOffset().offset()).createHint(
606 m_graph, node->origin, undefined));
607 }
608 }
609
610 node->convertToPhantomCreateActivation();
611 }
612 break;
613 }
614
615 case MaterializeCreateActivation: {
616 if (m_sinkCandidates.contains(node)) {
617 m_insertionSet.insert(
618 nodeIndex + 1,
619 PromotedHeapLocation(ActivationScopePLoc, node).createHint(
620 m_graph, node->origin, m_graph.varArgChild(node, 0).node()));
621 Node* symbolTableNode = m_insertionSet.insertConstant(
622 nodeIndex + 1, node->origin, node->cellOperand());
623 m_insertionSet.insert(
624 nodeIndex + 1,
625 PromotedHeapLocation(ActivationSymbolTablePLoc, node).createHint(
626 m_graph, node->origin, symbolTableNode));
627 ObjectMaterializationData& data = node->objectMaterializationData();
628 for (unsigned i = 0; i < data.m_properties.size(); ++i) {
629 unsigned identifierNumber = data.m_properties[i].m_identifierNumber;
630 m_insertionSet.insert(
631 nodeIndex + 1,
632 PromotedHeapLocation(
633 ClosureVarPLoc, node, identifierNumber).createHint(
634 m_graph, node->origin,
635 m_graph.varArgChild(node, i + 1).node()));
636 }
637 node->convertToPhantomCreateActivation();
638 }
639 break;
640 }
641
642 case StoreBarrier: {
643 DFG_CRASH(m_graph, node, "Unexpected store barrier during sinking.");
644 break;
645 }
646
647 default:
648 break;
649 }
650
651 m_graph.doToChildren(
652 node,
653 [&] (Edge& edge) {
654 if (m_sinkCandidates.contains(edge.node()))
655 edge.setUseKind(KnownCellUse);
656 });
657 }
658 m_insertionSet.execute(block);
659 }
660 }
661
662 void promoteSunkenFields()
663 {
664 // Collect the set of heap locations that we will be operating over.
665 HashSet<PromotedHeapLocation> locations;
666 for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
667 for (Node* node : *block) {
668 promoteHeapAccess(
669 node,
670 [&] (PromotedHeapLocation location, Edge) {
671 if (m_sinkCandidates.contains(location.base()))
672 locations.add(location);
673 },
674 [&] (PromotedHeapLocation location) {
675 if (m_sinkCandidates.contains(location.base()))
676 locations.add(location);
677 });
678 }
679 }
680
681 // Figure out which locations belong to which allocations.
682 m_locationsForAllocation.clear();
683 for (PromotedHeapLocation location : locations) {
684 auto result = m_locationsForAllocation.add(location.base(), Vector<PromotedHeapLocation>());
685 ASSERT(!result.iterator->value.contains(location));
686 result.iterator->value.append(location);
687 }
688
689 // For each sunken thingy, make sure we create Bottom values for all of its fields.
690 // Note that this has the hilarious slight inefficiency of creating redundant hints for
691 // things that were previously materializations. This should only impact compile times and
692 // not code quality, and it's necessary for soundness without some data structure hackage.
693 // For example, a MaterializeNewObject that we choose to sink may have new fields added to
694 // it conditionally. That would necessitate Bottoms.
695 Node* bottom = nullptr;
696 for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
697 if (block == m_graph.block(0))
698 bottom = m_insertionSet.insertConstant(0, NodeOrigin(), jsUndefined());
699
700 for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) {
701 Node* node = block->at(nodeIndex);
702 for (PromotedHeapLocation location : m_locationsForAllocation.get(node)) {
703 m_insertionSet.insert(
704 nodeIndex + 1, location.createHint(m_graph, node->origin, bottom));
705 }
706 }
707 m_insertionSet.execute(block);
708 }
709
710 m_ssaCalculator.reset();
711
712 // Collect the set of "variables" that we will be sinking.
713 m_locationToVariable.clear();
714 m_indexToLocation.clear();
715 for (PromotedHeapLocation location : locations) {
716 SSACalculator::Variable* variable = m_ssaCalculator.newVariable();
717 m_locationToVariable.add(location, variable);
718 ASSERT(m_indexToLocation.size() == variable->index());
719 m_indexToLocation.append(location);
720 }
721
722 // Create Defs from the existing hints.
723 for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
724 for (Node* node : *block) {
725 promoteHeapAccess(
726 node,
727 [&] (PromotedHeapLocation location, Edge value) {
728 if (!m_sinkCandidates.contains(location.base()))
729 return;
730 SSACalculator::Variable* variable = m_locationToVariable.get(location);
731 m_ssaCalculator.newDef(variable, block, value.node());
732 },
733 [&] (PromotedHeapLocation) { });
734 }
735 }
736
737 // OMG run the SSA calculator to create Phis!
738 m_ssaCalculator.computePhis(
739 [&] (SSACalculator::Variable* variable, BasicBlock* block) -> Node* {
740 PromotedHeapLocation location = m_indexToLocation[variable->index()];
741 if (!m_combinedLiveness.liveAtHead[block].contains(location.base()))
742 return nullptr;
743
744 Node* phiNode = m_graph.addNode(SpecHeapTop, Phi, NodeOrigin());
745 phiNode->mergeFlags(NodeResultJS);
746 return phiNode;
747 });
748
749 // Place Phis in the right places, replace all uses of any load with the appropriate
750 // value, and create the appropriate Upsilon nodes.
751 m_graph.clearReplacements();
752 for (BasicBlock* block : m_graph.blocksInPreOrder()) {
753 // This mapping table is intended to be lazy. If something is omitted from the table,
754 // it means that there haven't been any local stores to that promoted heap location.
755 m_localMapping.clear();
756
757 // Insert the Phi functions that we had previously created.
758 for (SSACalculator::Def* phiDef : m_ssaCalculator.phisForBlock(block)) {
759 PromotedHeapLocation location = m_indexToLocation[phiDef->variable()->index()];
760
761 m_insertionSet.insert(
762 0, phiDef->value());
763 m_insertionSet.insert(
764 0, location.createHint(m_graph, NodeOrigin(), phiDef->value()));
765 m_localMapping.add(location, phiDef->value());
766 }
767
768 if (verbose)
769 dataLog("Local mapping at ", pointerDump(block), ": ", mapDump(m_localMapping), "\n");
770
771 // Process the block and replace all uses of loads with the promoted value.
772 for (Node* node : *block) {
773 m_graph.performSubstitution(node);
774
775 if (Node* escapee = m_materializationToEscapee.get(node))
776 populateMaterialize(block, node, escapee);
777
778 promoteHeapAccess(
779 node,
780 [&] (PromotedHeapLocation location, Edge value) {
781 if (m_sinkCandidates.contains(location.base()))
782 m_localMapping.set(location, value.node());
783 },
784 [&] (PromotedHeapLocation location) {
785 if (m_sinkCandidates.contains(location.base())) {
786 switch (node->op()) {
787 case CheckStructure:
788 node->convertToCheckStructureImmediate(resolve(block, location));
789 break;
790
791 default:
792 node->replaceWith(resolve(block, location));
793 break;
794 }
795 }
796 });
797 }
798
799 // Gotta drop some Upsilons.
800 NodeAndIndex terminal = block->findTerminal();
801 size_t upsilonInsertionPoint = terminal.index;
802 NodeOrigin upsilonOrigin = terminal.node->origin;
803 for (BasicBlock* successorBlock : block->successors()) {
804 for (SSACalculator::Def* phiDef : m_ssaCalculator.phisForBlock(successorBlock)) {
805 Node* phiNode = phiDef->value();
806 SSACalculator::Variable* variable = phiDef->variable();
807 PromotedHeapLocation location = m_indexToLocation[variable->index()];
808 Node* incoming = resolve(block, location);
809
810 m_insertionSet.insertNode(
811 upsilonInsertionPoint, SpecNone, Upsilon, upsilonOrigin,
812 OpInfo(phiNode), incoming->defaultEdge());
813 }
814 }
815
816 m_insertionSet.execute(block);
817 }
818 }
819
820 Node* resolve(BasicBlock* block, PromotedHeapLocation location)
821 {
822 if (Node* result = m_localMapping.get(location))
823 return result;
824
825 // This implies that there is no local mapping. Find a non-local mapping.
826 SSACalculator::Def* def = m_ssaCalculator.nonLocalReachingDef(
827 block, m_locationToVariable.get(location));
828 ASSERT(def);
829 ASSERT(def->value());
830 m_localMapping.add(location, def->value());
831 return def->value();
832 }
833
834 template<typename SinkCandidateFunctor, typename EscapeFunctor>
835 void handleNode(
836 Node* node,
837 const SinkCandidateFunctor& sinkCandidate,
838 const EscapeFunctor& escape)
839 {
840 switch (node->op()) {
841 case NewObject:
842 case MaterializeNewObject:
843 case MaterializeCreateActivation:
844 sinkCandidate();
845 m_graph.doToChildren(
846 node,
847 [&] (Edge edge) {
848 escape(edge.node());
849 });
850 break;
851
852 case NewFunction:
853 if (!node->castOperand<FunctionExecutable*>()->singletonFunction()->isStillValid())
854 sinkCandidate();
855 escape(node->child1().node());
856 break;
857
858 case CreateActivation:
859 if (!node->castOperand<SymbolTable*>()->singletonScope()->isStillValid())
860 sinkCandidate();
861 escape(node->child1().node());
862 break;
863
864 case Check:
865 m_graph.doToChildren(
866 node,
867 [&] (Edge edge) {
868 if (edge.willNotHaveCheck())
869 return;
870
871 if (alreadyChecked(edge.useKind(), SpecObject))
872 return;
873
874 escape(edge.node());
875 });
876 break;
877
878 case MovHint:
879 case PutHint:
880 break;
881
882 case PutStructure:
883 case CheckStructure:
884 case GetByOffset:
885 case MultiGetByOffset:
886 case GetGetterSetterByOffset: {
887 Node* target = node->child1().node();
888 if (!target->isObjectAllocation())
889 escape(target);
890 break;
891 }
892
893 case PutByOffset: {
894 Node* target = node->child2().node();
895 if (!target->isObjectAllocation()) {
896 escape(target);
897 escape(node->child1().node());
898 }
899 escape(node->child3().node());
900 break;
901 }
902
903 case GetClosureVar: {
904 Node* target = node->child1().node();
905 if (!target->isActivationAllocation())
906 escape(target);
907 break;
908 }
909
910 case PutClosureVar: {
911 Node* target = node->child1().node();
912 if (!target->isActivationAllocation())
913 escape(target);
914 escape(node->child2().node());
915 break;
916 }
917
918 case GetScope: {
919 Node* target = node->child1().node();
920 if (!target->isFunctionAllocation())
921 escape(target);
922 break;
923 }
924
925 case GetExecutable: {
926 Node* target = node->child1().node();
927 if (!target->isFunctionAllocation())
928 escape(target);
929 break;
930 }
931
932 case SkipScope: {
933 Node* target = node->child1().node();
934 if (!target->isActivationAllocation())
935 escape(target);
936 break;
937 }
938
939 case MultiPutByOffset:
940 // FIXME: In the future we should be able to handle this. It's just a matter of
941 // building the appropriate *Hint variant of this instruction, along with a
942 // PhantomStructureSelect node - since this transforms the Structure in a conditional
943 // way.
944 // https://bugs.webkit.org/show_bug.cgi?id=136924
945 escape(node->child1().node());
946 escape(node->child2().node());
947 break;
948
949 default:
950 m_graph.doToChildren(
951 node,
952 [&] (Edge edge) {
953 escape(edge.node());
954 });
955 break;
956 }
957 }
958
959 Node* createMaterialize(Node* escapee, Node* where)
960 {
961 Node* result = nullptr;
962
963 switch (escapee->op()) {
964 case NewObject:
965 case MaterializeNewObject: {
966 ObjectMaterializationData* data = m_graph.m_objectMaterializationData.add();
967
968 result = m_graph.addNode(
969 escapee->prediction(), Node::VarArg, MaterializeNewObject,
970 NodeOrigin(
971 escapee->origin.semantic,
972 where->origin.forExit),
973 OpInfo(data), OpInfo(), 0, 0);
974 break;
975 }
976
977 case NewFunction:
978 result = m_graph.addNode(
979 escapee->prediction(), NewFunction,
980 NodeOrigin(
981 escapee->origin.semantic,
982 where->origin.forExit),
983 OpInfo(escapee->cellOperand()),
984 escapee->child1());
985 break;
986
987 case CreateActivation:
988 case MaterializeCreateActivation: {
989 ObjectMaterializationData* data = m_graph.m_objectMaterializationData.add();
990 FrozenValue* symbolTable = escapee->cellOperand();
991 result = m_graph.addNode(
992 escapee->prediction(), Node::VarArg, MaterializeCreateActivation,
993 NodeOrigin(
994 escapee->origin.semantic,
995 where->origin.forExit),
996 OpInfo(data), OpInfo(symbolTable), 0, 0);
997 break;
998 }
999
1000 default:
1001 DFG_CRASH(m_graph, escapee, "Bad escapee op");
1002 break;
1003 }
1004
1005 if (verbose)
1006 dataLog("Creating materialization point at ", where, " for ", escapee, ": ", result, "\n");
1007
1008 m_materializationToEscapee.add(result, escapee);
1009 m_materializationSiteToMaterializations.add(
1010 where, Vector<Node*>()).iterator->value.append(result);
1011
1012 return result;
1013 }
1014
1015 void populateMaterialize(BasicBlock* block, Node* node, Node* escapee)
1016 {
1017 switch (node->op()) {
1018 case MaterializeNewObject: {
1019 ObjectMaterializationData& data = node->objectMaterializationData();
1020 unsigned firstChild = m_graph.m_varArgChildren.size();
1021
1022 Vector<PromotedHeapLocation> locations = m_locationsForAllocation.get(escapee);
1023
1024 PromotedHeapLocation structure(StructurePLoc, escapee);
1025 ASSERT(locations.contains(structure));
1026
1027 m_graph.m_varArgChildren.append(Edge(resolve(block, structure), KnownCellUse));
1028
1029 for (unsigned i = 0; i < locations.size(); ++i) {
1030 switch (locations[i].kind()) {
1031 case StructurePLoc: {
1032 ASSERT(locations[i] == structure);
1033 break;
1034 }
1035
1036 case NamedPropertyPLoc: {
1037 Node* value = resolve(block, locations[i]);
1038 if (value->op() == BottomValue) {
1039 // We can skip Bottoms entirely.
1040 break;
1041 }
1042
1043 data.m_properties.append(PhantomPropertyValue(locations[i].info()));
1044 m_graph.m_varArgChildren.append(value);
1045 break;
1046 }
1047
1048 default:
1049 DFG_CRASH(m_graph, node, "Bad location kind");
1050 }
1051 }
1052
1053 node->children = AdjacencyList(
1054 AdjacencyList::Variable,
1055 firstChild, m_graph.m_varArgChildren.size() - firstChild);
1056 break;
1057 }
1058
1059 case MaterializeCreateActivation: {
1060 ObjectMaterializationData& data = node->objectMaterializationData();
1061
1062 unsigned firstChild = m_graph.m_varArgChildren.size();
1063
1064 Vector<PromotedHeapLocation> locations = m_locationsForAllocation.get(escapee);
1065
1066 PromotedHeapLocation scope(ActivationScopePLoc, escapee);
1067 PromotedHeapLocation symbolTable(ActivationSymbolTablePLoc, escapee);
1068 ASSERT(locations.contains(scope));
1069
1070 m_graph.m_varArgChildren.append(Edge(resolve(block, scope), KnownCellUse));
1071
1072 for (unsigned i = 0; i < locations.size(); ++i) {
1073 switch (locations[i].kind()) {
1074 case ActivationScopePLoc: {
1075 ASSERT(locations[i] == scope);
1076 break;
1077 }
1078
1079 case ActivationSymbolTablePLoc: {
1080 ASSERT(locations[i] == symbolTable);
1081 break;
1082 }
1083
1084 case ClosureVarPLoc: {
1085 Node* value = resolve(block, locations[i]);
1086 if (value->op() == BottomValue)
1087 break;
1088
1089 data.m_properties.append(PhantomPropertyValue(locations[i].info()));
1090 m_graph.m_varArgChildren.append(value);
1091 break;
1092 }
1093
1094 default:
1095 DFG_CRASH(m_graph, node, "Bad location kind");
1096 }
1097 }
1098
1099 node->children = AdjacencyList(
1100 AdjacencyList::Variable,
1101 firstChild, m_graph.m_varArgChildren.size() - firstChild);
1102 break;
1103 }
1104
1105 case NewFunction: {
1106 if (!ASSERT_DISABLED) {
1107 Vector<PromotedHeapLocation> locations = m_locationsForAllocation.get(escapee);
1108
1109 ASSERT(locations.size() == 2);
1110
1111 PromotedHeapLocation executable(FunctionExecutablePLoc, escapee);
1112 ASSERT(locations.contains(executable));
1113
1114 PromotedHeapLocation activation(FunctionActivationPLoc, escapee);
1115 ASSERT(locations.contains(activation));
1116
1117 for (unsigned i = 0; i < locations.size(); ++i) {
1118 switch (locations[i].kind()) {
1119 case FunctionExecutablePLoc: {
1120 ASSERT(locations[i] == executable);
1121 break;
1122 }
1123
1124 case FunctionActivationPLoc: {
1125 ASSERT(locations[i] == activation);
1126 break;
1127 }
1128
1129 default:
1130 DFG_CRASH(m_graph, node, "Bad location kind");
1131 }
1132 }
1133 }
1134
1135 break;
1136 }
1137
1138 default:
1139 DFG_CRASH(m_graph, node, "Bad materialize op");
1140 break;
1141 }
1142 }
1143
1144 CombinedLiveness m_combinedLiveness;
1145 SSACalculator m_ssaCalculator;
1146 HashSet<Node*> m_sinkCandidates;
1147 HashMap<Node*, Node*> m_materializationToEscapee;
1148 HashMap<Node*, Vector<Node*>> m_materializationSiteToMaterializations;
1149 HashMap<Node*, Vector<PromotedHeapLocation>> m_locationsForAllocation;
1150 HashMap<PromotedHeapLocation, SSACalculator::Variable*> m_locationToVariable;
1151 Vector<PromotedHeapLocation> m_indexToLocation;
1152 HashMap<PromotedHeapLocation, Node*> m_localMapping;
1153 InsertionSet m_insertionSet;
1154 };
1155
1156 bool performObjectAllocationSinking(Graph& graph)
1157 {
1158 SamplingRegion samplingRegion("DFG Object Allocation Sinking Phase");
1159 return runPhase<ObjectAllocationSinkingPhase>(graph);
1160 }
1161
1162 } } // namespace JSC::DFG
1163
1164 #endif // ENABLE(DFG_JIT)
1165