]> git.saurik.com Git - apple/javascriptcore.git/blame - dfg/DFGCSEPhase.cpp
JavaScriptCore-7601.1.46.3.tar.gz
[apple/javascriptcore.git] / dfg / DFGCSEPhase.cpp
CommitLineData
6fe7ccc8 1/*
ed1e77d3 2 * Copyright (C) 2011-2015 Apple Inc. All rights reserved.
6fe7ccc8
A
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#include "config.h"
27#include "DFGCSEPhase.h"
28
29#if ENABLE(DFG_JIT)
30
81345200 31#include "DFGAbstractHeap.h"
ed1e77d3
A
32#include "DFGBlockMapInlines.h"
33#include "DFGClobberSet.h"
81345200
A
34#include "DFGClobberize.h"
35#include "DFGEdgeUsesStructure.h"
6fe7ccc8
A
36#include "DFGGraph.h"
37#include "DFGPhase.h"
81345200
A
38#include "JSCInlines.h"
39#include <array>
93a37866 40#include <wtf/FastBitVector.h>
6fe7ccc8
A
41
42namespace JSC { namespace DFG {
43
ed1e77d3
A
44// This file contains two CSE implementations: local and global. LocalCSE typically runs when we're
45// in DFG mode, i.e. we want to compile quickly. LocalCSE contains a lot of optimizations for
46// compile time. GlobalCSE, on the other hand, is fairly straight-forward. It will find more
47// optimization opportunities by virtue of being global.
93a37866 48
ed1e77d3
A
49namespace {
50
51const bool verbose = false;
52
53class ClobberFilter {
6fe7ccc8 54public:
ed1e77d3
A
55 ClobberFilter(AbstractHeap heap)
56 : m_heap(heap)
57 {
58 }
59
60 bool operator()(const ImpureMap::KeyValuePairType& pair) const
61 {
62 return m_heap.overlaps(pair.key.heap());
63 }
64
65private:
66 AbstractHeap m_heap;
67};
68
69inline void clobber(ImpureMap& map, AbstractHeap heap)
70{
71 ClobberFilter filter(heap);
72 map.removeIf(filter);
73}
74
75class LocalCSEPhase : public Phase {
76public:
77 LocalCSEPhase(Graph& graph)
78 : Phase(graph, "local common subexpression elimination")
79 , m_smallBlock(graph)
80 , m_largeBlock(graph)
6fe7ccc8 81 {
6fe7ccc8
A
82 }
83
93a37866 84 bool run()
6fe7ccc8 85 {
ed1e77d3
A
86 ASSERT(m_graph.m_fixpointState == FixpointNotConverged);
87 ASSERT(m_graph.m_form == ThreadedCPS || m_graph.m_form == LoadStore);
93a37866 88
ed1e77d3 89 bool changed = false;
93a37866 90
81345200
A
91 m_graph.clearReplacements();
92
93 for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) {
94 BasicBlock* block = m_graph.block(blockIndex);
95 if (!block)
96 continue;
97
ed1e77d3
A
98 if (block->size() <= SmallMaps::capacity)
99 changed |= m_smallBlock.run(block);
100 else
101 changed |= m_largeBlock.run(block);
81345200
A
102 }
103
ed1e77d3 104 return changed;
6fe7ccc8
A
105 }
106
107private:
ed1e77d3
A
108 class SmallMaps {
109 public:
110 // This permits SmallMaps to be used for blocks that have up to 100 nodes. In practice,
111 // fewer than half of the nodes in a block have pure defs, and even fewer have impure defs.
112 // Thus, a capacity limit of 100 probably means that somewhere around ~40 things may end up
113 // in one of these "small" list-based maps. That number still seems largeish, except that
114 // the overhead of HashMaps can be quite high currently: clearing them, or even removing
115 // enough things from them, deletes (or resizes) their backing store eagerly. Hence
116 // HashMaps induce a lot of malloc traffic.
117 static const unsigned capacity = 100;
6fe7ccc8 118
ed1e77d3
A
119 SmallMaps()
120 : m_pureLength(0)
121 , m_impureLength(0)
122 {
93a37866 123 }
6fe7ccc8 124
ed1e77d3
A
125 void clear()
126 {
127 m_pureLength = 0;
128 m_impureLength = 0;
6fe7ccc8 129 }
6fe7ccc8 130
ed1e77d3
A
131 void write(AbstractHeap heap)
132 {
133 for (unsigned i = 0; i < m_impureLength; ++i) {
134 if (heap.overlaps(m_impureMap[i].key.heap()))
135 m_impureMap[i--] = m_impureMap[--m_impureLength];
93a37866 136 }
6fe7ccc8 137 }
6fe7ccc8 138
ed1e77d3
A
139 Node* addPure(PureValue value, Node* node)
140 {
141 for (unsigned i = m_pureLength; i--;) {
142 if (m_pureMap[i].key == value)
143 return m_pureMap[i].value;
6fe7ccc8 144 }
ed1e77d3
A
145
146 ASSERT(m_pureLength < capacity);
147 m_pureMap[m_pureLength++] = WTF::KeyValuePair<PureValue, Node*>(value, node);
148 return nullptr;
6fe7ccc8 149 }
ed1e77d3
A
150
151 LazyNode findReplacement(HeapLocation location)
152 {
153 for (unsigned i = m_impureLength; i--;) {
154 if (m_impureMap[i].key == location)
155 return m_impureMap[i].value;
93a37866 156 }
ed1e77d3 157 return nullptr;
93a37866 158 }
93a37866 159
ed1e77d3
A
160 LazyNode addImpure(HeapLocation location, LazyNode node)
161 {
162 // FIXME: If we are using small maps, we must not def() derived values.
163 // For now the only derived values we def() are constant-based.
164 if (location.index() && !location.index().isNode())
165 return nullptr;
166 if (LazyNode result = findReplacement(location))
167 return result;
168 ASSERT(m_impureLength < capacity);
169 m_impureMap[m_impureLength++] = WTF::KeyValuePair<HeapLocation, LazyNode>(location, node);
170 return nullptr;
93a37866 171 }
93a37866 172
ed1e77d3
A
173 private:
174 WTF::KeyValuePair<PureValue, Node*> m_pureMap[capacity];
175 WTF::KeyValuePair<HeapLocation, LazyNode> m_impureMap[capacity];
176 unsigned m_pureLength;
177 unsigned m_impureLength;
178 };
6fe7ccc8 179
ed1e77d3
A
180 class LargeMaps {
181 public:
182 LargeMaps()
183 {
93a37866 184 }
93a37866 185
ed1e77d3
A
186 void clear()
187 {
188 m_pureMap.clear();
189 m_impureMap.clear();
6fe7ccc8 190 }
ed1e77d3
A
191
192 void write(AbstractHeap heap)
193 {
194 clobber(m_impureMap, heap);
93a37866 195 }
93a37866 196
ed1e77d3
A
197 Node* addPure(PureValue value, Node* node)
198 {
199 auto result = m_pureMap.add(value, node);
200 if (result.isNewEntry)
201 return nullptr;
202 return result.iterator->value;
6fe7ccc8 203 }
ed1e77d3
A
204
205 LazyNode findReplacement(HeapLocation location)
206 {
207 return m_impureMap.get(location);
208 }
209
210 LazyNode addImpure(HeapLocation location, LazyNode node)
211 {
212 auto result = m_impureMap.add(location, node);
213 if (result.isNewEntry)
214 return nullptr;
215 return result.iterator->value;
216 }
217
218 private:
219 HashMap<PureValue, Node*> m_pureMap;
220 HashMap<HeapLocation, LazyNode> m_impureMap;
221 };
222
223 template<typename Maps>
224 class BlockCSE {
225 public:
226 BlockCSE(Graph& graph)
227 : m_graph(graph)
228 , m_insertionSet(graph)
229 {
230 }
231
232 bool run(BasicBlock* block)
233 {
234 m_maps.clear();
235 m_changed = false;
236 m_block = block;
237
238 for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) {
239 m_node = block->at(nodeIndex);
240 m_graph.performSubstitution(m_node);
241
242 if (m_node->op() == Identity) {
243 m_node->replaceWith(m_node->child1().node());
244 m_changed = true;
245 } else {
246 // This rule only makes sense for local CSE, since in SSA form we have already
247 // factored the bounds check out of the PutByVal. It's kind of gross, but we
248 // still have reason to believe that PutByValAlias is a good optimization and
249 // that it's better to do it with a single node rather than separating out the
250 // CheckInBounds.
251 if (m_node->op() == PutByVal || m_node->op() == PutByValDirect) {
252 HeapLocation heap;
253
254 Node* base = m_graph.varArgChild(m_node, 0).node();
255 Node* index = m_graph.varArgChild(m_node, 1).node();
256
257 ArrayMode mode = m_node->arrayMode();
258 switch (mode.type()) {
259 case Array::Int32:
260 if (!mode.isInBounds())
261 break;
262 heap = HeapLocation(
263 IndexedPropertyLoc, IndexedInt32Properties, base, index);
264 break;
265
266 case Array::Double:
267 if (!mode.isInBounds())
268 break;
269 heap = HeapLocation(
270 IndexedPropertyLoc, IndexedDoubleProperties, base, index);
271 break;
272
273 case Array::Contiguous:
274 if (!mode.isInBounds())
275 break;
276 heap = HeapLocation(
277 IndexedPropertyLoc, IndexedContiguousProperties, base, index);
278 break;
279
280 case Array::Int8Array:
281 case Array::Int16Array:
282 case Array::Int32Array:
283 case Array::Uint8Array:
284 case Array::Uint8ClampedArray:
285 case Array::Uint16Array:
286 case Array::Uint32Array:
287 case Array::Float32Array:
288 case Array::Float64Array:
289 if (!mode.isInBounds())
290 break;
291 heap = HeapLocation(
292 IndexedPropertyLoc, TypedArrayProperties, base, index);
293 break;
294
295 default:
296 break;
297 }
298
299 if (!!heap && m_maps.findReplacement(heap))
300 m_node->setOp(PutByValAlias);
301 }
302
303 clobberize(m_graph, m_node, *this);
6fe7ccc8 304 }
6fe7ccc8 305 }
6fe7ccc8 306
ed1e77d3
A
307 m_insertionSet.execute(block);
308
309 return m_changed;
93a37866 310 }
93a37866 311
ed1e77d3 312 void read(AbstractHeap) { }
93a37866 313
ed1e77d3
A
314 void write(AbstractHeap heap)
315 {
316 m_maps.write(heap);
6fe7ccc8 317 }
ed1e77d3
A
318
319 void def(PureValue value)
320 {
321 Node* match = m_maps.addPure(value, m_node);
322 if (!match)
323 return;
93a37866 324
ed1e77d3
A
325 m_node->replaceWith(match);
326 m_changed = true;
93a37866 327 }
93a37866 328
ed1e77d3
A
329 void def(HeapLocation location, LazyNode value)
330 {
331 LazyNode match = m_maps.addImpure(location, value);
332 if (!match)
333 return;
334
335 if (m_node->op() == GetLocal) {
336 // Usually the CPS rethreading phase does this. But it's OK for us to mess with
337 // locals so long as:
338 //
339 // - We dethread the graph. Any changes we make may invalidate the assumptions of
340 // our CPS form, particularly if this GetLocal is linked to the variablesAtTail.
341 //
342 // - We don't introduce a Phantom for the child of the GetLocal. This wouldn't be
343 // totally wrong but it would pessimize the code. Just because there is a
344 // GetLocal doesn't mean that the child was live. Simply rerouting the all uses
345 // of this GetLocal will preserve the live-at-exit information just fine.
346 //
347 // We accomplish the latter by just clearing the child; then the Phantom that we
348 // introduce won't have children and so it will eventually just be deleted.
349
350 m_node->child1() = Edge();
351 m_graph.dethread();
6fe7ccc8 352 }
ed1e77d3
A
353
354 if (value.isNode() && value.asNode() == m_node) {
355 match.ensureIsNode(m_insertionSet, m_block, 0)->owner = m_block;
356 ASSERT(match.isNode());
357 m_node->replaceWith(match.asNode());
358 m_changed = true;
81345200
A
359 }
360 }
81345200 361
ed1e77d3
A
362 private:
363 Graph& m_graph;
364
365 bool m_changed;
366 Node* m_node;
367 BasicBlock* m_block;
368
369 Maps m_maps;
81345200 370
ed1e77d3
A
371 InsertionSet m_insertionSet;
372 };
81345200 373
ed1e77d3
A
374 BlockCSE<SmallMaps> m_smallBlock;
375 BlockCSE<LargeMaps> m_largeBlock;
376};
377
378class GlobalCSEPhase : public Phase {
379public:
380 GlobalCSEPhase(Graph& graph)
381 : Phase(graph, "global common subexpression elimination")
382 , m_impureDataMap(graph)
383 , m_insertionSet(graph)
6fe7ccc8 384 {
6fe7ccc8
A
385 }
386
ed1e77d3 387 bool run()
6fe7ccc8 388 {
ed1e77d3
A
389 ASSERT(m_graph.m_fixpointState == FixpointNotConverged);
390 ASSERT(m_graph.m_form == SSA);
6fe7ccc8 391
ed1e77d3
A
392 m_graph.initializeNodeOwners();
393 m_graph.m_dominators.computeIfNecessary(m_graph);
394
395 m_preOrder = m_graph.blocksInPreOrder();
396
397 // First figure out what gets clobbered by blocks. Node that this uses the preOrder list
398 // for convenience only.
399 for (unsigned i = m_preOrder.size(); i--;) {
400 m_block = m_preOrder[i];
401 m_impureData = &m_impureDataMap[m_block];
402 for (unsigned nodeIndex = m_block->size(); nodeIndex--;)
403 addWrites(m_graph, m_block->at(nodeIndex), m_impureData->writes);
93a37866 404 }
ed1e77d3
A
405
406 // Based on my experience doing this before, what follows might have to be made iterative.
407 // Right now it doesn't have to be iterative because everything is dominator-bsed. But when
408 // validation is enabled, we check if iterating would find new CSE opportunities.
409
410 bool changed = iterate();
411
412 // FIXME: It should be possible to assert that CSE will not find any new opportunities if you
413 // run it a second time. Unfortunately, we cannot assert this right now. Note that if we did
414 // this, we'd have to first reset all of our state.
415 // https://bugs.webkit.org/show_bug.cgi?id=145853
416
417 return changed;
93a37866
A
418 }
419
ed1e77d3 420 bool iterate()
93a37866 421 {
ed1e77d3
A
422 if (verbose)
423 dataLog("Performing iteration.\n");
424
425 m_changed = false;
426 m_graph.clearReplacements();
427
428 for (unsigned i = 0; i < m_preOrder.size(); ++i) {
429 m_block = m_preOrder[i];
430 m_impureData = &m_impureDataMap[m_block];
431 m_writesSoFar.clear();
432
433 if (verbose)
434 dataLog("Processing block ", *m_block, ":\n");
81345200 435
ed1e77d3
A
436 for (unsigned nodeIndex = 0; nodeIndex < m_block->size(); ++nodeIndex) {
437 m_nodeIndex = nodeIndex;
438 m_node = m_block->at(nodeIndex);
439 if (verbose)
440 dataLog(" Looking at node ", m_node, ":\n");
81345200 441
ed1e77d3 442 m_graph.performSubstitution(m_node);
81345200 443
ed1e77d3
A
444 if (m_node->op() == Identity) {
445 m_node->replaceWith(m_node->child1().node());
446 m_changed = true;
447 } else
448 clobberize(m_graph, m_node, *this);
81345200 449 }
ed1e77d3
A
450
451 m_insertionSet.execute(m_block);
452
453 m_impureData->didVisit = true;
81345200 454 }
ed1e77d3
A
455
456 return m_changed;
81345200 457 }
ed1e77d3
A
458
459 void read(AbstractHeap) { }
81345200 460
ed1e77d3 461 void write(AbstractHeap heap)
81345200 462 {
ed1e77d3
A
463 clobber(m_impureData->availableAtTail, heap);
464 m_writesSoFar.add(heap);
465 if (verbose)
466 dataLog(" Clobbered, new tail map: ", mapDump(m_impureData->availableAtTail), "\n");
6fe7ccc8
A
467 }
468
ed1e77d3 469 void def(PureValue value)
6fe7ccc8 470 {
ed1e77d3
A
471 // With pure values we do not have to worry about the possibility of some control flow path
472 // clobbering the value. So, we just search for all of the like values that have been
473 // computed. We pick one that is in a block that dominates ours. Note that this means that
474 // a PureValue will map to a list of nodes, since there may be many places in the control
475 // flow graph that compute a value but only one of them that dominates us. We may build up
476 // a large list of nodes that compute some value in the case of gnarly control flow. This
477 // is probably OK.
6fe7ccc8 478
ed1e77d3
A
479 auto result = m_pureValues.add(value, Vector<Node*>());
480 if (result.isNewEntry) {
481 result.iterator->value.append(m_node);
482 return;
81345200 483 }
6fe7ccc8 484
ed1e77d3
A
485 for (unsigned i = result.iterator->value.size(); i--;) {
486 Node* candidate = result.iterator->value[i];
487 if (m_graph.m_dominators.dominates(candidate->owner, m_block)) {
488 m_node->replaceWith(candidate);
489 m_changed = true;
490 return;
491 }
492 }
93a37866 493
ed1e77d3 494 result.iterator->value.append(m_node);
6fe7ccc8
A
495 }
496
ed1e77d3 497 LazyNode findReplacement(HeapLocation location)
6fe7ccc8 498 {
ed1e77d3
A
499 // At this instant, our "availableAtTail" reflects the set of things that are available in
500 // this block so far. We check this map to find block-local CSE opportunities before doing
501 // a global search.
502 LazyNode match = m_impureData->availableAtTail.get(location);
503 if (!!match) {
504 if (verbose)
505 dataLog(" Found local match: ", match, "\n");
506 return match;
507 }
93a37866 508
ed1e77d3
A
509 // If it's not available at this point in the block, and at some prior point in the block
510 // we have clobbered this heap location, then there is no point in doing a global search.
511 if (m_writesSoFar.overlaps(location.heap())) {
512 if (verbose)
513 dataLog(" Not looking globally because of local clobber: ", m_writesSoFar, "\n");
514 return nullptr;
515 }
93a37866 516
ed1e77d3
A
517 // This perfoms a backward search over the control flow graph to find some possible
518 // non-local def() that matches our heap location. Such a match is only valid if there does
519 // not exist any path from that def() to our block that contains a write() that overlaps
520 // our heap. This algorithm looks for both of these things (the matching def and the
521 // overlapping writes) in one backwards DFS pass.
522 //
523 // This starts by looking at the starting block's predecessors, and then it continues along
524 // their predecessors. As soon as this finds a possible def() - one that defines the heap
525 // location we want while dominating our starting block - it assumes that this one must be
526 // the match. It then lets the DFS over predecessors complete, but it doesn't add the
527 // def()'s predecessors; this ensures that any blocks we visit thereafter are on some path
528 // from the def() to us. As soon as the DFG finds a write() that overlaps the location's
529 // heap, it stops, assuming that there is no possible match. Note that the write() case may
530 // trigger before we find a def(), or after. Either way, the write() case causes this
531 // function to immediately return nullptr.
532 //
533 // If the write() is found before we find the def(), then we know that any def() we would
534 // find would have a path to us that trips over the write() and hence becomes invalid. This
535 // is just a direct outcome of us looking for a def() that dominates us. Given a block A
536 // that dominates block B - so that A is the one that would have the def() and B is our
537 // starting block - we know that any other block must either be on the path from A to B, or
538 // it must be on a path from the root to A, but not both. So, if we haven't found A yet but
539 // we already have found a block C that has a write(), then C must be on some path from A
540 // to B, which means that A's def() is invalid for our purposes. Hence, before we find the
541 // def(), stopping on write() is the right thing to do.
542 //
543 // Stopping on write() is also the right thing to do after we find the def(). After we find
544 // the def(), we don't add that block's predecessors to the search worklist. That means
545 // that henceforth the only blocks we will see in the search are blocks on the path from
546 // the def() to us. If any such block has a write() that clobbers our heap then we should
547 // give up.
548 //
549 // Hence this graph search algorithm ends up being deceptively simple: any overlapping
550 // write() causes us to immediately return nullptr, and a matching def() means that we just
551 // record it and neglect to visit its precessors.
93a37866 552
ed1e77d3
A
553 Vector<BasicBlock*, 8> worklist;
554 Vector<BasicBlock*, 8> seenList;
555 BitVector seen;
6fe7ccc8 556
ed1e77d3
A
557 for (unsigned i = m_block->predecessors.size(); i--;) {
558 BasicBlock* predecessor = m_block->predecessors[i];
559 if (!seen.get(predecessor->index)) {
560 worklist.append(predecessor);
561 seen.set(predecessor->index);
81345200 562 }
93a37866 563 }
ed1e77d3
A
564
565 while (!worklist.isEmpty()) {
566 BasicBlock* block = worklist.takeLast();
567 seenList.append(block);
568
569 if (verbose)
570 dataLog(" Searching in block ", *block, "\n");
571 ImpureBlockData& data = m_impureDataMap[block];
572
573 // We require strict domination because this would only see things in our own block if
574 // they came *after* our position in the block. Clearly, while our block dominates
575 // itself, the things in the block after us don't dominate us.
576 if (m_graph.m_dominators.strictlyDominates(block, m_block)) {
577 if (verbose)
578 dataLog(" It strictly dominates.\n");
579 DFG_ASSERT(m_graph, m_node, data.didVisit);
580 DFG_ASSERT(m_graph, m_node, !match);
581 if (verbose)
582 dataLog(" Availability map: ", mapDump(data.availableAtTail), "\n");
583 match = data.availableAtTail.get(location);
584 if (verbose)
585 dataLog(" Availability: ", match, "\n");
586 if (!!match) {
587 // Don't examine the predecessors of a match. At this point we just want to
588 // establish that other blocks on the path from here to there don't clobber
589 // the location we're interested in.
590 continue;
591 }
93a37866 592 }
93a37866 593
ed1e77d3
A
594 if (verbose)
595 dataLog(" Dealing with write set ", data.writes, "\n");
596 if (data.writes.overlaps(location.heap())) {
597 if (verbose)
598 dataLog(" Clobbered.\n");
599 return nullptr;
6fe7ccc8 600 }
93a37866 601
ed1e77d3
A
602 for (unsigned i = block->predecessors.size(); i--;) {
603 BasicBlock* predecessor = block->predecessors[i];
604 if (!seen.get(predecessor->index)) {
605 worklist.append(predecessor);
606 seen.set(predecessor->index);
607 }
93a37866 608 }
6fe7ccc8
A
609 }
610
ed1e77d3
A
611 if (!match)
612 return nullptr;
613
614 // Cache the results for next time. We cache them both for this block and for all of our
615 // predecessors, since even though we've already visited our predecessors, our predecessors
616 // probably have successors other than us.
617 // FIXME: Consider caching failed searches as well, when match is null. It's not clear that
618 // the reduction in compile time would warrant the increase in complexity, though.
619 // https://bugs.webkit.org/show_bug.cgi?id=134876
620 for (BasicBlock* block : seenList)
621 m_impureDataMap[block].availableAtTail.add(location, match);
622 m_impureData->availableAtTail.add(location, match);
623
624 return match;
6fe7ccc8
A
625 }
626
ed1e77d3 627 void def(HeapLocation location, LazyNode value)
6fe7ccc8 628 {
ed1e77d3
A
629 if (verbose)
630 dataLog(" Got heap location def: ", location, " -> ", value, "\n");
93a37866 631
ed1e77d3 632 LazyNode match = findReplacement(location);
93a37866 633
ed1e77d3
A
634 if (verbose)
635 dataLog(" Got match: ", match, "\n");
93a37866 636
ed1e77d3
A
637 if (!match) {
638 if (verbose)
639 dataLog(" Adding at-tail mapping: ", location, " -> ", value, "\n");
640 auto result = m_impureData->availableAtTail.add(location, value);
641 ASSERT_UNUSED(result, result.isNewEntry);
642 return;
643 }
644
645 if (value.isNode() && value.asNode() == m_node) {
646 if (!match.isNode()) {
647 // We need to properly record the constant in order to use an existing one if applicable.
648 // This ensures that re-running GCSE will not find new optimizations.
649 match.ensureIsNode(m_insertionSet, m_block, m_nodeIndex)->owner = m_block;
650 auto result = m_pureValues.add(PureValue(match.asNode(), match->constant()), Vector<Node*>());
651 bool replaced = false;
652 if (!result.isNewEntry) {
653 for (unsigned i = result.iterator->value.size(); i--;) {
654 Node* candidate = result.iterator->value[i];
655 if (m_graph.m_dominators.dominates(candidate->owner, m_block)) {
656 ASSERT(candidate);
657 match->replaceWith(candidate);
658 match.setNode(candidate);
659 replaced = true;
660 break;
661 }
662 }
663 }
664 if (!replaced)
665 result.iterator->value.append(match.asNode());
666 }
667 ASSERT(match.asNode());
668 m_node->replaceWith(match.asNode());
669 m_changed = true;
6fe7ccc8
A
670 }
671 }
672
ed1e77d3
A
673 struct ImpureBlockData {
674 ImpureBlockData()
675 : didVisit(false)
676 {
677 }
678
679 ClobberSet writes;
680 ImpureMap availableAtTail;
681 bool didVisit;
682 };
683
684 Vector<BasicBlock*> m_preOrder;
685
686 PureMultiMap m_pureValues;
687 BlockMap<ImpureBlockData> m_impureDataMap;
688
689 BasicBlock* m_block;
690 Node* m_node;
691 unsigned m_nodeIndex;
692 ImpureBlockData* m_impureData;
693 ClobberSet m_writesSoFar;
694 InsertionSet m_insertionSet;
695
696 bool m_changed;
6fe7ccc8
A
697};
698
ed1e77d3
A
699} // anonymous namespace
700
701bool performLocalCSE(Graph& graph)
93a37866 702{
ed1e77d3
A
703 SamplingRegion samplingRegion("DFG LocalCSE Phase");
704 return runPhase<LocalCSEPhase>(graph);
93a37866
A
705}
706
ed1e77d3 707bool performGlobalCSE(Graph& graph)
6fe7ccc8 708{
ed1e77d3
A
709 SamplingRegion samplingRegion("DFG GlobalCSE Phase");
710 return runPhase<GlobalCSEPhase>(graph);
6fe7ccc8
A
711}
712
713} } // namespace JSC::DFG
714
715#endif // ENABLE(DFG_JIT)