]>
Commit | Line | Data |
---|---|---|
ed1e77d3 A |
1 | /* |
2 | * Copyright (C) 2014, 2015 Apple Inc. All rights reserved. | |
3 | * | |
4 | * Redistribution and use in source and binary forms, with or without | |
5 | * modification, are permitted provided that the following conditions | |
6 | * are met: | |
7 | * 1. Redistributions of source code must retain the above copyright | |
8 | * notice, this list of conditions and the following disclaimer. | |
9 | * 2. Redistributions in binary form must reproduce the above copyright | |
10 | * notice, this list of conditions and the following disclaimer in the | |
11 | * documentation and/or other materials provided with the distribution. | |
12 | * | |
13 | * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY | |
14 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | |
15 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR | |
16 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR | |
17 | * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, | |
18 | * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, | |
19 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR | |
20 | * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY | |
21 | * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
22 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |
23 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
24 | */ | |
25 | ||
26 | #include "config.h" | |
27 | #include "DFGPutStackSinkingPhase.h" | |
28 | ||
29 | #if ENABLE(DFG_JIT) | |
30 | ||
31 | #include "DFGBlockMapInlines.h" | |
32 | #include "DFGGraph.h" | |
33 | #include "DFGInsertionSet.h" | |
34 | #include "DFGPhase.h" | |
35 | #include "DFGPreciseLocalClobberize.h" | |
36 | #include "DFGSSACalculator.h" | |
37 | #include "DFGValidate.h" | |
38 | #include "JSCInlines.h" | |
39 | #include "OperandsInlines.h" | |
40 | ||
41 | namespace JSC { namespace DFG { | |
42 | ||
43 | namespace { | |
44 | ||
45 | bool verbose = false; | |
46 | ||
47 | class PutStackSinkingPhase : public Phase { | |
48 | public: | |
49 | PutStackSinkingPhase(Graph& graph) | |
50 | : Phase(graph, "PutStack sinking") | |
51 | { | |
52 | } | |
53 | ||
54 | bool run() | |
55 | { | |
56 | // FIXME: One of the problems of this approach is that it will create a duplicate Phi graph | |
57 | // for sunken PutStacks in the presence of interesting control flow merges, and where the | |
58 | // value being PutStack'd is also otherwise live in the DFG code. We could work around this | |
59 | // by doing the sinking over CPS, or maybe just by doing really smart hoisting. It's also | |
60 | // possible that the duplicate Phi graph can be deduplicated by LLVM. It would be best if we | |
61 | // could observe that there is already a Phi graph in place that does what we want. In | |
62 | // principle if we have a request to place a Phi at a particular place, we could just check | |
63 | // if there is already a Phi that does what we want. Because PutStackSinkingPhase runs just | |
64 | // after SSA conversion, we have almost a guarantee that the Phi graph we produce here would | |
65 | // be trivially redundant to the one we already have. | |
66 | ||
67 | // FIXME: This phase doesn't adequately use KillStacks. KillStack can be viewed as a def. | |
68 | // This is mostly inconsequential; it would be a bug to have a local live at a KillStack. | |
69 | // More important is that KillStack should swallow any deferral. After a KillStack, the | |
70 | // local should behave like a TOP deferral because it would be invalid for anyone to trust | |
71 | // the stack. It's not clear to me if this is important or not. | |
72 | // https://bugs.webkit.org/show_bug.cgi?id=145296 | |
73 | ||
74 | if (verbose) { | |
75 | dataLog("Graph before PutStack sinking:\n"); | |
76 | m_graph.dump(); | |
77 | } | |
78 | ||
79 | SSACalculator ssaCalculator(m_graph); | |
80 | InsertionSet insertionSet(m_graph); | |
81 | ||
82 | // First figure out where various locals are live. | |
83 | BlockMap<Operands<bool>> liveAtHead(m_graph); | |
84 | BlockMap<Operands<bool>> liveAtTail(m_graph); | |
85 | ||
86 | for (BasicBlock* block : m_graph.blocksInNaturalOrder()) { | |
87 | liveAtHead[block] = Operands<bool>(OperandsLike, block->variablesAtHead); | |
88 | liveAtTail[block] = Operands<bool>(OperandsLike, block->variablesAtHead); | |
89 | ||
90 | liveAtHead[block].fill(false); | |
91 | liveAtTail[block].fill(false); | |
92 | } | |
93 | ||
94 | bool changed; | |
95 | do { | |
96 | changed = false; | |
97 | ||
98 | for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) { | |
99 | BasicBlock* block = m_graph.block(blockIndex); | |
100 | if (!block) | |
101 | continue; | |
102 | ||
103 | Operands<bool> live = liveAtTail[block]; | |
104 | for (unsigned nodeIndex = block->size(); nodeIndex--;) { | |
105 | Node* node = block->at(nodeIndex); | |
106 | if (verbose) | |
107 | dataLog("Live at ", node, ": ", live, "\n"); | |
108 | ||
109 | auto escapeHandler = [&] (VirtualRegister operand) { | |
110 | if (operand.isHeader()) | |
111 | return; | |
112 | if (verbose) | |
113 | dataLog(" ", operand, " is live at ", node, "\n"); | |
114 | live.operand(operand) = true; | |
115 | }; | |
116 | ||
117 | // FIXME: This might mishandle LoadVarargs and ForwardVarargs. It might make us | |
118 | // think that the locals being written are stack-live here. They aren't. This | |
119 | // should be harmless since we overwrite them anyway, but still, it's sloppy. | |
120 | // https://bugs.webkit.org/show_bug.cgi?id=145295 | |
121 | preciseLocalClobberize( | |
122 | m_graph, node, escapeHandler, escapeHandler, | |
123 | [&] (VirtualRegister operand, LazyNode source) { | |
124 | RELEASE_ASSERT(source.isNode()); | |
125 | ||
126 | if (source.asNode() == node) { | |
127 | // This is a load. Ignore it. | |
128 | return; | |
129 | } | |
130 | ||
131 | RELEASE_ASSERT(node->op() == PutStack); | |
132 | live.operand(operand) = false; | |
133 | }); | |
134 | } | |
135 | ||
136 | if (live == liveAtHead[block]) | |
137 | continue; | |
138 | ||
139 | liveAtHead[block] = live; | |
140 | changed = true; | |
141 | ||
142 | for (BasicBlock* predecessor : block->predecessors) { | |
143 | for (size_t i = live.size(); i--;) | |
144 | liveAtTail[predecessor][i] |= live[i]; | |
145 | } | |
146 | } | |
147 | ||
148 | } while (changed); | |
149 | ||
150 | // All of the arguments should be live at head of root. Note that we may find that some | |
151 | // locals are live at head of root. This seems wrong but isn't. This will happen for example | |
152 | // if the function accesses closure variable #42 for some other function and we either don't | |
153 | // have variable #42 at all or we haven't set it at root, for whatever reason. Basically this | |
154 | // arises since our aliasing for closure variables is conservatively based on variable number | |
155 | // and ignores the owning symbol table. We should probably fix this eventually and make our | |
156 | // aliasing more precise. | |
157 | // | |
158 | // For our purposes here, the imprecision in the aliasing is harmless. It just means that we | |
159 | // may not do as much Phi pruning as we wanted. | |
160 | for (size_t i = liveAtHead.atIndex(0).numberOfArguments(); i--;) | |
161 | DFG_ASSERT(m_graph, nullptr, liveAtHead.atIndex(0).argument(i)); | |
162 | ||
163 | // Next identify where we would want to sink PutStacks to. We say that there is a deferred | |
164 | // flush if we had a PutStack with a given FlushFormat but it hasn't been materialized yet. | |
165 | // Deferrals have the following lattice; but it's worth noting that the TOP part of the | |
166 | // lattice serves an entirely different purpose than the rest of the lattice: it just means | |
167 | // that we're in a region of code where nobody should have been relying on the value. The | |
168 | // rest of the lattice means that we either have a PutStack that is deferred (i.e. still | |
169 | // needs to be executed) or there isn't one (because we've alraedy executed it). | |
170 | // | |
171 | // Bottom: | |
172 | // Represented as DeadFlush. | |
173 | // Means that all previous PutStacks have been executed so there is nothing deferred. | |
174 | // During merging this is subordinate to the other kinds of deferrals, because it | |
175 | // represents the fact that we've already executed all necessary PutStacks. This implies | |
176 | // that there *had* been some PutStacks that we should have executed. | |
177 | // | |
178 | // Top: | |
179 | // Represented as ConflictingFlush. | |
180 | // Represents the fact that we know, via forward flow, that there isn't any value in the | |
181 | // given local that anyone should have been relying on. This comes into play at the | |
182 | // prologue (because in SSA form at the prologue no local has any value) or when we merge | |
183 | // deferrals for different formats's. A lexical scope in which a local had some semantic | |
184 | // meaning will by this point share the same format; if we had stores from different | |
185 | // lexical scopes that got merged together then we may have a conflicting format. Hence | |
186 | // a conflicting format proves that we're no longer in an area in which the variable was | |
187 | // in scope. Note that this is all approximate and only precise enough to later answer | |
188 | // questions pertinent to sinking. For example, this doesn't always detect when a local | |
189 | // is no longer semantically relevant - we may well have a deferral from inside some | |
190 | // inlined call survive outside of that inlined code, and this is generally OK. In the | |
191 | // worst case it means that we might think that a deferral that is actually dead must | |
192 | // still be executed. But we usually catch that with liveness. Liveness usually catches | |
193 | // such cases, but that's not guaranteed since liveness is conservative. | |
194 | // | |
195 | // What Top does give us is detects situations where we both don't need to care about a | |
196 | // deferral and there is no way that we could reason about it anyway. If we merged | |
197 | // deferrals for different formats then we wouldn't know the format to use. So, we use | |
198 | // Top in that case because that's also a case where we know that we can ignore the | |
199 | // deferral. | |
200 | // | |
201 | // Deferral with a concrete format: | |
202 | // Represented by format values other than DeadFlush or ConflictingFlush. | |
203 | // Represents the fact that the original code would have done a PutStack but we haven't | |
204 | // identified an operation that would have observed that PutStack. | |
205 | // | |
206 | // This code has some interesting quirks because of the fact that neither liveness nor | |
207 | // deferrals are very precise. They are only precise enough to be able to correctly tell us | |
208 | // when we may [sic] need to execute PutStacks. This means that they may report the need to | |
209 | // execute a PutStack in cases where we actually don't really need it, and that's totally OK. | |
210 | BlockMap<Operands<FlushFormat>> deferredAtHead(m_graph); | |
211 | BlockMap<Operands<FlushFormat>> deferredAtTail(m_graph); | |
212 | ||
213 | for (BasicBlock* block : m_graph.blocksInNaturalOrder()) { | |
214 | deferredAtHead[block] = | |
215 | Operands<FlushFormat>(OperandsLike, block->variablesAtHead); | |
216 | deferredAtTail[block] = | |
217 | Operands<FlushFormat>(OperandsLike, block->variablesAtHead); | |
218 | } | |
219 | ||
220 | deferredAtHead.atIndex(0).fill(ConflictingFlush); | |
221 | ||
222 | do { | |
223 | changed = false; | |
224 | ||
225 | for (BasicBlock* block : m_graph.blocksInNaturalOrder()) { | |
226 | Operands<FlushFormat> deferred = deferredAtHead[block]; | |
227 | ||
228 | for (Node* node : *block) { | |
229 | if (verbose) | |
230 | dataLog("Deferred at ", node, ":", deferred, "\n"); | |
231 | ||
232 | if (node->op() == GetStack) { | |
233 | // A GetStack doesn't affect anything, since we know which local we are reading | |
234 | // from. | |
235 | continue; | |
236 | } | |
237 | ||
238 | auto escapeHandler = [&] (VirtualRegister operand) { | |
239 | if (operand.isHeader()) | |
240 | return; | |
241 | // We will materialize just before any reads. | |
242 | deferred.operand(operand) = DeadFlush; | |
243 | }; | |
244 | ||
245 | preciseLocalClobberize( | |
246 | m_graph, node, escapeHandler, escapeHandler, | |
247 | [&] (VirtualRegister operand, LazyNode source) { | |
248 | RELEASE_ASSERT(source.isNode()); | |
249 | ||
250 | if (source.asNode() == node) { | |
251 | // This is a load. Ignore it. | |
252 | return; | |
253 | } | |
254 | ||
255 | deferred.operand(operand) = node->stackAccessData()->format; | |
256 | }); | |
257 | } | |
258 | ||
259 | if (deferred == deferredAtTail[block]) | |
260 | continue; | |
261 | ||
262 | deferredAtTail[block] = deferred; | |
263 | changed = true; | |
264 | ||
265 | for (BasicBlock* successor : block->successors()) { | |
266 | for (size_t i = deferred.size(); i--;) { | |
267 | if (verbose) | |
268 | dataLog("Considering ", VirtualRegister(deferred.operandForIndex(i)), " at ", pointerDump(block), "->", pointerDump(successor), ": ", deferred[i], " and ", deferredAtHead[successor][i], " merges to "); | |
269 | ||
270 | deferredAtHead[successor][i] = | |
271 | merge(deferredAtHead[successor][i], deferred[i]); | |
272 | ||
273 | if (verbose) | |
274 | dataLog(deferredAtHead[successor][i], "\n"); | |
275 | } | |
276 | } | |
277 | } | |
278 | ||
279 | } while (changed); | |
280 | ||
281 | // We wish to insert PutStacks at all of the materialization points, which are defined | |
282 | // implicitly as the places where we set deferred to Dead while it was previously not Dead. | |
283 | // To do this, we may need to build some Phi functions to handle stuff like this: | |
284 | // | |
285 | // Before: | |
286 | // | |
287 | // if (p) | |
288 | // PutStack(r42, @x) | |
289 | // else | |
290 | // PutStack(r42, @y) | |
291 | // | |
292 | // After: | |
293 | // | |
294 | // if (p) | |
295 | // Upsilon(@x, ^z) | |
296 | // else | |
297 | // Upsilon(@y, ^z) | |
298 | // z: Phi() | |
299 | // PutStack(r42, @z) | |
300 | // | |
301 | // This means that we have an SSACalculator::Variable for each local, and a Def is any | |
302 | // PutStack in the original program. The original PutStacks will simply vanish. | |
303 | ||
304 | Operands<SSACalculator::Variable*> operandToVariable( | |
305 | OperandsLike, m_graph.block(0)->variablesAtHead); | |
306 | Vector<VirtualRegister> indexToOperand; | |
307 | for (size_t i = m_graph.block(0)->variablesAtHead.size(); i--;) { | |
308 | VirtualRegister operand(m_graph.block(0)->variablesAtHead.operandForIndex(i)); | |
309 | ||
310 | SSACalculator::Variable* variable = ssaCalculator.newVariable(); | |
311 | operandToVariable.operand(operand) = variable; | |
312 | ASSERT(indexToOperand.size() == variable->index()); | |
313 | indexToOperand.append(operand); | |
314 | } | |
315 | ||
316 | HashSet<Node*> putLocalsToSink; | |
317 | ||
318 | for (BasicBlock* block : m_graph.blocksInNaturalOrder()) { | |
319 | for (Node* node : *block) { | |
320 | switch (node->op()) { | |
321 | case PutStack: | |
322 | putLocalsToSink.add(node); | |
323 | ssaCalculator.newDef( | |
324 | operandToVariable.operand(node->stackAccessData()->local), | |
325 | block, node->child1().node()); | |
326 | break; | |
327 | case GetStack: | |
328 | ssaCalculator.newDef( | |
329 | operandToVariable.operand(node->stackAccessData()->local), | |
330 | block, node); | |
331 | break; | |
332 | default: | |
333 | break; | |
334 | } | |
335 | } | |
336 | } | |
337 | ||
338 | ssaCalculator.computePhis( | |
339 | [&] (SSACalculator::Variable* variable, BasicBlock* block) -> Node* { | |
340 | VirtualRegister operand = indexToOperand[variable->index()]; | |
341 | ||
342 | if (!liveAtHead[block].operand(operand)) | |
343 | return nullptr; | |
344 | ||
345 | FlushFormat format = deferredAtHead[block].operand(operand); | |
346 | ||
347 | // We could have an invalid deferral because liveness is imprecise. | |
348 | if (!isConcrete(format)) | |
349 | return nullptr; | |
350 | ||
351 | if (verbose) | |
352 | dataLog("Adding Phi for ", operand, " at ", pointerDump(block), "\n"); | |
353 | ||
354 | Node* phiNode = m_graph.addNode(SpecHeapTop, Phi, NodeOrigin()); | |
355 | phiNode->mergeFlags(resultFor(format)); | |
356 | return phiNode; | |
357 | }); | |
358 | ||
359 | Operands<Node*> mapping(OperandsLike, m_graph.block(0)->variablesAtHead); | |
360 | Operands<FlushFormat> deferred; | |
361 | for (BasicBlock* block : m_graph.blocksInNaturalOrder()) { | |
362 | mapping.fill(nullptr); | |
363 | ||
364 | for (size_t i = mapping.size(); i--;) { | |
365 | VirtualRegister operand(mapping.operandForIndex(i)); | |
366 | ||
367 | SSACalculator::Variable* variable = operandToVariable.operand(operand); | |
368 | SSACalculator::Def* def = ssaCalculator.reachingDefAtHead(block, variable); | |
369 | if (!def) | |
370 | continue; | |
371 | ||
372 | mapping.operand(operand) = def->value(); | |
373 | } | |
374 | ||
375 | if (verbose) | |
376 | dataLog("Mapping at top of ", pointerDump(block), ": ", mapping, "\n"); | |
377 | ||
378 | for (SSACalculator::Def* phiDef : ssaCalculator.phisForBlock(block)) { | |
379 | VirtualRegister operand = indexToOperand[phiDef->variable()->index()]; | |
380 | ||
381 | insertionSet.insert(0, phiDef->value()); | |
382 | ||
383 | if (verbose) | |
384 | dataLog(" Mapping ", operand, " to ", phiDef->value(), "\n"); | |
385 | mapping.operand(operand) = phiDef->value(); | |
386 | } | |
387 | ||
388 | deferred = deferredAtHead[block]; | |
389 | for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) { | |
390 | Node* node = block->at(nodeIndex); | |
391 | if (verbose) | |
392 | dataLog("Deferred at ", node, ":", deferred, "\n"); | |
393 | ||
394 | switch (node->op()) { | |
395 | case PutStack: { | |
396 | StackAccessData* data = node->stackAccessData(); | |
397 | VirtualRegister operand = data->local; | |
398 | deferred.operand(operand) = data->format; | |
399 | if (verbose) | |
400 | dataLog(" Mapping ", operand, " to ", node->child1().node(), " at ", node, "\n"); | |
401 | mapping.operand(operand) = node->child1().node(); | |
402 | break; | |
403 | } | |
404 | ||
405 | case GetStack: { | |
406 | StackAccessData* data = node->stackAccessData(); | |
407 | FlushFormat format = deferred.operand(data->local); | |
408 | if (!isConcrete(format)) { | |
409 | // This means there is no deferral. No deferral means that the most | |
410 | // authoritative value for this stack slot is what is stored in the stack. So, | |
411 | // keep the GetStack. | |
412 | mapping.operand(data->local) = node; | |
413 | break; | |
414 | } | |
415 | ||
416 | // We have a concrete deferral, which means a PutStack that hasn't executed yet. It | |
417 | // would have stored a value with a certain format. That format must match our | |
418 | // format. But more importantly, we can simply use the value that the PutStack would | |
419 | // have stored and get rid of the GetStack. | |
420 | DFG_ASSERT(m_graph, node, format == data->format); | |
421 | ||
422 | Node* incoming = mapping.operand(data->local); | |
423 | node->child1() = incoming->defaultEdge(); | |
424 | node->convertToIdentity(); | |
425 | break; | |
426 | } | |
427 | ||
428 | default: { | |
429 | auto escapeHandler = [&] (VirtualRegister operand) { | |
430 | if (operand.isHeader()) | |
431 | return; | |
432 | ||
433 | FlushFormat format = deferred.operand(operand); | |
434 | if (!isConcrete(format)) | |
435 | return; | |
436 | ||
437 | // Gotta insert a PutStack. | |
438 | if (verbose) | |
439 | dataLog("Inserting a PutStack for ", operand, " at ", node, "\n"); | |
440 | ||
441 | Node* incoming = mapping.operand(operand); | |
442 | DFG_ASSERT(m_graph, node, incoming); | |
443 | ||
444 | insertionSet.insertNode( | |
445 | nodeIndex, SpecNone, PutStack, node->origin, | |
446 | OpInfo(m_graph.m_stackAccessData.add(operand, format)), | |
447 | Edge(incoming, useKindFor(format))); | |
448 | ||
449 | deferred.operand(operand) = DeadFlush; | |
450 | }; | |
451 | ||
452 | preciseLocalClobberize( | |
453 | m_graph, node, escapeHandler, escapeHandler, | |
454 | [&] (VirtualRegister, LazyNode) { }); | |
455 | break; | |
456 | } } | |
457 | } | |
458 | ||
459 | NodeAndIndex terminal = block->findTerminal(); | |
460 | size_t upsilonInsertionPoint = terminal.index; | |
461 | NodeOrigin upsilonOrigin = terminal.node->origin; | |
462 | for (BasicBlock* successorBlock : block->successors()) { | |
463 | for (SSACalculator::Def* phiDef : ssaCalculator.phisForBlock(successorBlock)) { | |
464 | Node* phiNode = phiDef->value(); | |
465 | SSACalculator::Variable* variable = phiDef->variable(); | |
466 | VirtualRegister operand = indexToOperand[variable->index()]; | |
467 | if (verbose) | |
468 | dataLog("Creating Upsilon for ", operand, " at ", pointerDump(block), "->", pointerDump(successorBlock), "\n"); | |
469 | FlushFormat format = deferredAtHead[successorBlock].operand(operand); | |
470 | DFG_ASSERT(m_graph, nullptr, isConcrete(format)); | |
471 | UseKind useKind = useKindFor(format); | |
472 | ||
473 | // We need to get a value for the stack slot. This phase doesn't really have a | |
474 | // good way of determining if a stack location got clobbered. It just knows if | |
475 | // there is a deferral. The lack of a deferral might mean that a PutStack or | |
476 | // GetStack had never happened, or it might mean that the value was read, or | |
477 | // that it was written. It's OK for us to make some bad decisions here, since | |
478 | // GCSE will clean it up anyway. | |
479 | Node* incoming; | |
480 | if (isConcrete(deferred.operand(operand))) { | |
481 | incoming = mapping.operand(operand); | |
482 | DFG_ASSERT(m_graph, phiNode, incoming); | |
483 | } else { | |
484 | // Issue a GetStack to get the value. This might introduce some redundancy | |
485 | // into the code, but if it's bad enough, GCSE will clean it up. | |
486 | incoming = insertionSet.insertNode( | |
487 | upsilonInsertionPoint, SpecNone, GetStack, upsilonOrigin, | |
488 | OpInfo(m_graph.m_stackAccessData.add(operand, format))); | |
489 | incoming->setResult(resultFor(format)); | |
490 | } | |
491 | ||
492 | insertionSet.insertNode( | |
493 | upsilonInsertionPoint, SpecNone, Upsilon, upsilonOrigin, | |
494 | OpInfo(phiNode), Edge(incoming, useKind)); | |
495 | } | |
496 | } | |
497 | ||
498 | insertionSet.execute(block); | |
499 | } | |
500 | ||
501 | // Finally eliminate the sunken PutStacks by turning them into Checks. This keeps whatever | |
502 | // type check they were doing. Also prepend KillStacks to them to ensure that we know that | |
503 | // the relevant value was *not* stored to the stack. | |
504 | for (BasicBlock* block : m_graph.blocksInNaturalOrder()) { | |
505 | for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) { | |
506 | Node* node = block->at(nodeIndex); | |
507 | ||
508 | if (!putLocalsToSink.contains(node)) | |
509 | continue; | |
510 | ||
511 | insertionSet.insertNode( | |
512 | nodeIndex, SpecNone, KillStack, node->origin, OpInfo(node->stackAccessData()->local.offset())); | |
513 | node->remove(); | |
514 | } | |
515 | ||
516 | insertionSet.execute(block); | |
517 | } | |
518 | ||
519 | if (verbose) { | |
520 | dataLog("Graph after PutStack sinking:\n"); | |
521 | m_graph.dump(); | |
522 | } | |
523 | ||
524 | return true; | |
525 | } | |
526 | }; | |
527 | ||
528 | } // anonymous namespace | |
529 | ||
530 | bool performPutStackSinking(Graph& graph) | |
531 | { | |
532 | SamplingRegion samplingRegion("DFG PutStack Sinking Phase"); | |
533 | return runPhase<PutStackSinkingPhase>(graph); | |
534 | } | |
535 | ||
536 | } } // namespace JSC::DFG | |
537 | ||
538 | #endif // ENABLE(DFG_JIT) | |
539 |