]> git.saurik.com Git - apple/javascriptcore.git/blob - dfg/DFGStoreBarrierInsertionPhase.cpp
JavaScriptCore-7601.1.46.3.tar.gz
[apple/javascriptcore.git] / dfg / DFGStoreBarrierInsertionPhase.cpp
1 /*
2 * Copyright (C) 2015 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
14 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
15 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
17 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
18 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
19 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
20 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
21 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
22 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
23 * THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26 #include "config.h"
27 #include "DFGStoreBarrierInsertionPhase.h"
28
29 #if ENABLE(DFG_JIT)
30
31 #include "DFGAbstractInterpreterInlines.h"
32 #include "DFGBlockMapInlines.h"
33 #include "DFGDoesGC.h"
34 #include "DFGGraph.h"
35 #include "DFGInPlaceAbstractState.h"
36 #include "DFGInsertionSet.h"
37 #include "DFGPhase.h"
38 #include "JSCInlines.h"
39 #include <wtf/CommaPrinter.h>
40 #include <wtf/HashSet.h>
41
42 namespace JSC { namespace DFG {
43
44 namespace {
45
46 bool verbose = false;
47
48 enum class PhaseMode {
49 // Does only a local analysis for store barrier insertion and assumes that pointers live
50 // from predecessor blocks may need barriers. Assumes CPS conventions. Does not use AI for
51 // eliminating store barriers, but does a best effort to eliminate barriers when you're
52 // storing a non-cell value by using Node::result() and by looking at constants. The local
53 // analysis is based on GC epochs, so it will eliminate a lot of locally redundant barriers.
54 Fast,
55
56 // Does a global analysis for store barrier insertion. Reuses the GC-epoch-based analysis
57 // used by Fast, but adds a conservative merge rule for propagating information from one
58 // block to the next. This will ensure for example that if a value V coming from multiple
59 // predecessors in B didn't need any more barriers at the end of each predecessor (either
60 // because it was the last allocated object in that predecessor or because it just had a
61 // barrier executed), then until we hit another GC point in B, we won't need another barrier
62 // on V. Uses AI for eliminating barriers when we know that the value being stored is not a
63 // cell. Assumes SSA conventions.
64 Global
65 };
66
67 template<PhaseMode mode>
68 class StoreBarrierInsertionPhase : public Phase {
69 public:
70 StoreBarrierInsertionPhase(Graph& graph)
71 : Phase(graph, mode == PhaseMode::Fast ? "fast store barrier insertion" : "global store barrier insertion")
72 , m_insertionSet(graph)
73 {
74 }
75
76 bool run()
77 {
78 if (verbose) {
79 dataLog("Starting store barrier insertion:\n");
80 m_graph.dump();
81 }
82
83 switch (mode) {
84 case PhaseMode::Fast: {
85 DFG_ASSERT(m_graph, nullptr, m_graph.m_form != SSA);
86
87 m_graph.clearEpochs();
88 for (BasicBlock* block : m_graph.blocksInNaturalOrder())
89 handleBlock(block);
90 return true;
91 }
92
93 case PhaseMode::Global: {
94 DFG_ASSERT(m_graph, nullptr, m_graph.m_form == SSA);
95
96 m_state = std::make_unique<InPlaceAbstractState>(m_graph);
97 m_interpreter = std::make_unique<AbstractInterpreter<InPlaceAbstractState>>(m_graph, *m_state);
98
99 m_isConverged = false;
100
101 // First run the analysis. Inside basic blocks we use an epoch-based analysis that
102 // is very precise. At block boundaries, we just propagate which nodes may need a
103 // barrier. This gives us a very nice bottom->top fixpoint: we start out assuming
104 // that no node needs any barriers at block boundaries, and then we converge
105 // towards believing that all nodes need barriers. "Needing a barrier" is like
106 // saying that the node is in a past epoch. "Not needing a barrier" is like saying
107 // that the node is in the current epoch.
108 m_stateAtHead = std::make_unique<BlockMap<HashSet<Node*>>>(m_graph);
109 m_stateAtTail = std::make_unique<BlockMap<HashSet<Node*>>>(m_graph);
110
111 BlockList postOrder = m_graph.blocksInPostOrder();
112
113 bool changed = true;
114 while (changed) {
115 changed = false;
116
117 // Intentional backwards loop because we are using RPO.
118 for (unsigned blockIndex = postOrder.size(); blockIndex--;) {
119 BasicBlock* block = postOrder[blockIndex];
120
121 if (!handleBlock(block)) {
122 // If the block didn't finish, then it cannot affect the fixpoint.
123 continue;
124 }
125
126 // Construct the state-at-tail based on the epochs of live nodes and the
127 // current epoch. We grow state-at-tail monotonically to ensure convergence.
128 bool thisBlockChanged = false;
129 for (Node* node : block->ssa->liveAtTail) {
130 if (node->epoch() != m_currentEpoch) {
131 // If the node is older than the current epoch, then we may need to
132 // run a barrier on it in the future. So, add it to the state.
133 thisBlockChanged |= m_stateAtTail->at(block).add(node).isNewEntry;
134 }
135 }
136
137 if (!thisBlockChanged) {
138 // This iteration didn't learn anything new about this block.
139 continue;
140 }
141
142 // Changed things. Make sure that we loop one more time.
143 changed = true;
144
145 for (BasicBlock* successor : block->successors()) {
146 for (Node* node : m_stateAtTail->at(block))
147 m_stateAtHead->at(successor).add(node);
148 }
149 }
150 }
151
152 // Tell handleBlock() that it's time to actually insert barriers for real.
153 m_isConverged = true;
154
155 for (BasicBlock* block : m_graph.blocksInNaturalOrder())
156 handleBlock(block);
157
158 return true;
159 } }
160
161 RELEASE_ASSERT_NOT_REACHED();
162 return false;
163 }
164
165 private:
166 bool handleBlock(BasicBlock* block)
167 {
168 if (verbose) {
169 dataLog("Dealing with block ", pointerDump(block), "\n");
170 if (reallyInsertBarriers())
171 dataLog(" Really inserting barriers.\n");
172 }
173
174 m_currentEpoch = Epoch::first();
175
176 if (mode == PhaseMode::Global) {
177 if (!block->cfaHasVisited)
178 return false;
179 m_state->beginBasicBlock(block);
180
181 for (Node* node : block->ssa->liveAtHead) {
182 if (m_stateAtHead->at(block).contains(node)) {
183 // If previous blocks tell us that this node may need a barrier in the
184 // future, then put it in the ancient primordial epoch. This forces us to
185 // emit a barrier on any possibly-cell store, regardless of the epoch of the
186 // stored value.
187 node->setEpoch(Epoch());
188 } else {
189 // If previous blocks aren't requiring us to run a barrier on this node,
190 // then put it in the current epoch. This means that we will skip barriers
191 // on this node so long as we don't allocate. It also means that we won't
192 // run barriers on stores to on one such node into another such node. That's
193 // fine, because nodes would be excluded from the state set if at the tails
194 // of all predecessors they always had the current epoch.
195 node->setEpoch(m_currentEpoch);
196 }
197 }
198 }
199
200 bool result = true;
201
202 for (m_nodeIndex = 0; m_nodeIndex < block->size(); ++m_nodeIndex) {
203 m_node = block->at(m_nodeIndex);
204
205 if (verbose) {
206 dataLog(
207 " ", m_currentEpoch, ": Looking at node ", m_node, " with children: ");
208 CommaPrinter comma;
209 m_graph.doToChildren(
210 m_node,
211 [&] (Edge edge) {
212 dataLog(comma, edge, " (", edge->epoch(), ")");
213 });
214 dataLog("\n");
215 }
216
217 if (mode == PhaseMode::Global) {
218 // Execute edges separately because we don't want to insert barriers if the
219 // operation doing the store does a check that ensures that the child is not
220 // a cell.
221 m_interpreter->startExecuting();
222 m_interpreter->executeEdges(m_node);
223 }
224
225 switch (m_node->op()) {
226 case PutByValDirect:
227 case PutByVal:
228 case PutByValAlias: {
229 switch (m_node->arrayMode().modeForPut().type()) {
230 case Array::Contiguous:
231 case Array::ArrayStorage:
232 case Array::SlowPutArrayStorage: {
233 Edge child1 = m_graph.varArgChild(m_node, 0);
234 Edge child3 = m_graph.varArgChild(m_node, 2);
235 considerBarrier(child1, child3);
236 break;
237 }
238 default:
239 break;
240 }
241 break;
242 }
243
244 case ArrayPush: {
245 switch (m_node->arrayMode().type()) {
246 case Array::Contiguous:
247 case Array::ArrayStorage:
248 considerBarrier(m_node->child1(), m_node->child2());
249 break;
250 default:
251 break;
252 }
253 break;
254 }
255
256 case PutStructure: {
257 considerBarrier(m_node->child1());
258 break;
259 }
260
261 case PutClosureVar:
262 case PutToArguments:
263 case PutById:
264 case PutByIdFlush:
265 case PutByIdDirect:
266 case MultiPutByOffset: {
267 considerBarrier(m_node->child1(), m_node->child2());
268 break;
269 }
270
271 case PutByOffset: {
272 considerBarrier(m_node->child2(), m_node->child3());
273 break;
274 }
275
276 case PutGlobalVar: {
277 considerBarrier(m_node->child1(), m_node->child2());
278 break;
279 }
280
281 default:
282 break;
283 }
284
285 if (doesGC(m_graph, m_node))
286 m_currentEpoch.bump();
287
288 switch (m_node->op()) {
289 case NewObject:
290 case NewArray:
291 case NewArrayWithSize:
292 case NewArrayBuffer:
293 case NewTypedArray:
294 case NewRegexp:
295 case MaterializeNewObject:
296 case MaterializeCreateActivation:
297 case NewStringObject:
298 case MakeRope:
299 case CreateActivation:
300 case CreateDirectArguments:
301 case CreateScopedArguments:
302 case CreateClonedArguments:
303 case NewFunction:
304 // Nodes that allocate get to set their epoch because for those nodes we know
305 // that they will be the newest object in the heap.
306 m_node->setEpoch(m_currentEpoch);
307 break;
308
309 case AllocatePropertyStorage:
310 case ReallocatePropertyStorage:
311 // These allocate but then run their own barrier.
312 insertBarrier(m_nodeIndex + 1, m_node->child1().node());
313 m_node->setEpoch(Epoch());
314 break;
315
316 case Upsilon:
317 m_node->phi()->setEpoch(m_node->epoch());
318 m_node->setEpoch(Epoch());
319 break;
320
321 default:
322 // For nodes that aren't guaranteed to allocate, we say that their return value
323 // (if there is one) could be arbitrarily old.
324 m_node->setEpoch(Epoch());
325 break;
326 }
327
328 if (verbose) {
329 dataLog(
330 " ", m_currentEpoch, ": Done with node ", m_node, " (", m_node->epoch(),
331 ") with children: ");
332 CommaPrinter comma;
333 m_graph.doToChildren(
334 m_node,
335 [&] (Edge edge) {
336 dataLog(comma, edge, " (", edge->epoch(), ")");
337 });
338 dataLog("\n");
339 }
340
341 if (mode == PhaseMode::Global) {
342 if (!m_interpreter->executeEffects(m_nodeIndex, m_node)) {
343 result = false;
344 break;
345 }
346 }
347 }
348
349 if (mode == PhaseMode::Global)
350 m_state->reset();
351
352 if (reallyInsertBarriers())
353 m_insertionSet.execute(block);
354
355 return result;
356 }
357
358 void considerBarrier(Edge base, Edge child)
359 {
360 if (verbose)
361 dataLog(" Considering adding barrier ", base, " => ", child, "\n");
362
363 // We don't need a store barrier if the child is guaranteed to not be a cell.
364 switch (mode) {
365 case PhaseMode::Fast: {
366 // Don't try too hard because it's too expensive to run AI.
367 if (child->hasConstant()) {
368 if (!child->asJSValue().isCell()) {
369 if (verbose)
370 dataLog(" Rejecting because of constant type.\n");
371 return;
372 }
373 } else {
374 switch (child->result()) {
375 case NodeResultNumber:
376 case NodeResultDouble:
377 case NodeResultInt32:
378 case NodeResultInt52:
379 case NodeResultBoolean:
380 if (verbose)
381 dataLog(" Rejecting because of result type.\n");
382 return;
383 default:
384 break;
385 }
386 }
387 break;
388 }
389
390 case PhaseMode::Global: {
391 // Go into rage mode to eliminate any chance of a barrier with a non-cell child. We
392 // can afford to keep around AI in Global mode.
393 if (!m_interpreter->needsTypeCheck(child, ~SpecCell)) {
394 if (verbose)
395 dataLog(" Rejecting because of AI type.\n");
396 return;
397 }
398 break;
399 } }
400
401 // We don't need a store barrier if the base is at least as new as the child. For
402 // example this won't need a barrier:
403 //
404 // var o = {}
405 // var p = {}
406 // p.f = o
407 //
408 // This is stronger than the currentEpoch rule in considerBarrier(Edge), because it will
409 // also eliminate barriers in cases like this:
410 //
411 // var o = {} // o.epoch = 1, currentEpoch = 1
412 // var p = {} // o.epoch = 1, p.epoch = 2, currentEpoch = 2
413 // var q = {} // o.epoch = 1, p.epoch = 2, q.epoch = 3, currentEpoch = 3
414 // p.f = o // p.epoch >= o.epoch
415 //
416 // This relationship works because if it holds then we are in one of the following
417 // scenarios. Note that we don't know *which* of these scenarios we are in, but it's
418 // one of them (though without loss of generality, you can replace "a GC happened" with
419 // "many GCs happened").
420 //
421 // 1) There is no GC between the allocation/last-barrier of base, child and now. Then
422 // we definitely don't need a barrier.
423 //
424 // 2) There was a GC after child was allocated but before base was allocated. Then we
425 // don't need a barrier, because base is still a new object.
426 //
427 // 3) There was a GC after both child and base were allocated. Then they are both old.
428 // We don't need barriers on stores of old into old. Note that in this case it
429 // doesn't matter if there was also a GC between the allocation of child and base.
430 //
431 // Note that barriers will lift an object into the current epoch. This is sort of weird.
432 // It means that later if you store that object into some other object, and that other
433 // object was previously newer object, you'll think that you need a barrier. We could
434 // avoid this by tracking allocation epoch and barrier epoch separately. For now I think
435 // that this would be overkill. But this does mean that there are the following
436 // possibilities when this relationship holds:
437 //
438 // 4) Base is allocated first. A GC happens and base becomes old. Then we allocate
439 // child. (Note that alternatively the GC could happen during the allocation of
440 // child.) Then we run a barrier on base. Base will appear to be as new as child
441 // (same epoch). At this point, we don't need another barrier on base.
442 //
443 // 5) Base is allocated first. Then we allocate child. Then we run a GC. Then we run a
444 // barrier on base. Base will appear newer than child. We don't need a barrier
445 // because both objects are old.
446 //
447 // Something we watch out for here is that the null epoch is a catch-all for objects
448 // allocated before we did any epoch tracking. Two objects being in the null epoch
449 // means that we don't know their epoch relationship.
450 if (!!base->epoch() && base->epoch() >= child->epoch()) {
451 if (verbose)
452 dataLog(" Rejecting because of epoch ordering.\n");
453 return;
454 }
455
456 considerBarrier(base);
457 }
458
459 void considerBarrier(Edge base)
460 {
461 if (verbose)
462 dataLog(" Considering adding barrier on ", base, "\n");
463
464 // We don't need a store barrier if the epoch of the base is identical to the current
465 // epoch. That means that we either just allocated the object and so it's guaranteed to
466 // be in newgen, or we just ran a barrier on it so it's guaranteed to be remembered
467 // already.
468 if (base->epoch() == m_currentEpoch) {
469 if (verbose)
470 dataLog(" Rejecting because it's in the current epoch.\n");
471 return;
472 }
473
474 if (verbose)
475 dataLog(" Inserting barrier.\n");
476 insertBarrier(m_nodeIndex, base.node());
477 }
478
479 void insertBarrier(unsigned nodeIndex, Node* base)
480 {
481 // If we're in global mode, we should only insert the barriers once we have converged.
482 if (!reallyInsertBarriers())
483 return;
484
485 // FIXME: We could support StoreBarrier(UntypedUse:). That would be sort of cool.
486 // But right now we don't need it.
487 m_insertionSet.insertNode(
488 nodeIndex, SpecNone, StoreBarrier, m_node->origin, Edge(base, CellUse));
489
490 base->setEpoch(m_currentEpoch);
491 }
492
493 bool reallyInsertBarriers()
494 {
495 return mode == PhaseMode::Fast || m_isConverged;
496 }
497
498 InsertionSet m_insertionSet;
499 Epoch m_currentEpoch;
500 unsigned m_nodeIndex;
501 Node* m_node;
502
503 // Things we only use in Global mode.
504 std::unique_ptr<InPlaceAbstractState> m_state;
505 std::unique_ptr<AbstractInterpreter<InPlaceAbstractState>> m_interpreter;
506 std::unique_ptr<BlockMap<HashSet<Node*>>> m_stateAtHead;
507 std::unique_ptr<BlockMap<HashSet<Node*>>> m_stateAtTail;
508 bool m_isConverged;
509 };
510
511 } // anonymous namespace
512
513 bool performFastStoreBarrierInsertion(Graph& graph)
514 {
515 SamplingRegion samplingRegion("DFG Fast Store Barrier Insertion Phase");
516 return runPhase<StoreBarrierInsertionPhase<PhaseMode::Fast>>(graph);
517 }
518
519 bool performGlobalStoreBarrierInsertion(Graph& graph)
520 {
521 SamplingRegion samplingRegion("DFG Global Store Barrier Insertion Phase");
522 return runPhase<StoreBarrierInsertionPhase<PhaseMode::Global>>(graph);
523 }
524
525 } } // namespace JSC::DFG
526
527 #endif // ENABLE(DFG_JIT)
528