]>
git.saurik.com Git - apple/javascriptcore.git/blob - dfg/DFGPutStackSinkingPhase.cpp
2 * Copyright (C) 2014, 2015 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include "DFGPutStackSinkingPhase.h"
31 #include "DFGBlockMapInlines.h"
33 #include "DFGInsertionSet.h"
35 #include "DFGPreciseLocalClobberize.h"
36 #include "DFGSSACalculator.h"
37 #include "DFGValidate.h"
38 #include "JSCInlines.h"
39 #include "OperandsInlines.h"
41 namespace JSC
{ namespace DFG
{
47 class PutStackSinkingPhase
: public Phase
{
49 PutStackSinkingPhase ( Graph
& graph
)
50 : Phase ( graph
, "PutStack sinking" )
56 // FIXME: One of the problems of this approach is that it will create a duplicate Phi graph
57 // for sunken PutStacks in the presence of interesting control flow merges, and where the
58 // value being PutStack'd is also otherwise live in the DFG code. We could work around this
59 // by doing the sinking over CPS, or maybe just by doing really smart hoisting. It's also
60 // possible that the duplicate Phi graph can be deduplicated by LLVM. It would be best if we
61 // could observe that there is already a Phi graph in place that does what we want. In
62 // principle if we have a request to place a Phi at a particular place, we could just check
63 // if there is already a Phi that does what we want. Because PutStackSinkingPhase runs just
64 // after SSA conversion, we have almost a guarantee that the Phi graph we produce here would
65 // be trivially redundant to the one we already have.
67 // FIXME: This phase doesn't adequately use KillStacks. KillStack can be viewed as a def.
68 // This is mostly inconsequential; it would be a bug to have a local live at a KillStack.
69 // More important is that KillStack should swallow any deferral. After a KillStack, the
70 // local should behave like a TOP deferral because it would be invalid for anyone to trust
71 // the stack. It's not clear to me if this is important or not.
72 // https://bugs.webkit.org/show_bug.cgi?id=145296
75 dataLog ( "Graph before PutStack sinking: \n " );
79 SSACalculator
ssaCalculator ( m_graph
);
80 InsertionSet
insertionSet ( m_graph
);
82 // First figure out where various locals are live.
83 BlockMap
< Operands
< bool >> liveAtHead ( m_graph
);
84 BlockMap
< Operands
< bool >> liveAtTail ( m_graph
);
86 for ( BasicBlock
* block
: m_graph
. blocksInNaturalOrder ()) {
87 liveAtHead
[ block
] = Operands
< bool >( OperandsLike
, block
-> variablesAtHead
);
88 liveAtTail
[ block
] = Operands
< bool >( OperandsLike
, block
-> variablesAtHead
);
90 liveAtHead
[ block
]. fill ( false );
91 liveAtTail
[ block
]. fill ( false );
98 for ( BlockIndex blockIndex
= m_graph
. numBlocks (); blockIndex
--;) {
99 BasicBlock
* block
= m_graph
. block ( blockIndex
);
103 Operands
< bool > live
= liveAtTail
[ block
];
104 for ( unsigned nodeIndex
= block
-> size (); nodeIndex
--;) {
105 Node
* node
= block
-> at ( nodeIndex
);
107 dataLog ( "Live at " , node
, ": " , live
, " \n " );
109 auto escapeHandler
= [&] ( VirtualRegister operand
) {
110 if ( operand
. isHeader ())
113 dataLog ( " " , operand
, " is live at " , node
, " \n " );
114 live
. operand ( operand
) = true ;
117 // FIXME: This might mishandle LoadVarargs and ForwardVarargs. It might make us
118 // think that the locals being written are stack-live here. They aren't. This
119 // should be harmless since we overwrite them anyway, but still, it's sloppy.
120 // https://bugs.webkit.org/show_bug.cgi?id=145295
121 preciseLocalClobberize (
122 m_graph
, node
, escapeHandler
, escapeHandler
,
123 [&] ( VirtualRegister operand
, LazyNode source
) {
124 RELEASE_ASSERT ( source
. isNode ());
126 if ( source
. asNode () == node
) {
127 // This is a load. Ignore it.
131 RELEASE_ASSERT ( node
-> op () == PutStack
);
132 live
. operand ( operand
) = false ;
136 if ( live
== liveAtHead
[ block
])
139 liveAtHead
[ block
] = live
;
142 for ( BasicBlock
* predecessor
: block
-> predecessors
) {
143 for ( size_t i
= live
. size (); i
--;)
144 liveAtTail
[ predecessor
][ i
] |= live
[ i
];
150 // All of the arguments should be live at head of root. Note that we may find that some
151 // locals are live at head of root. This seems wrong but isn't. This will happen for example
152 // if the function accesses closure variable #42 for some other function and we either don't
153 // have variable #42 at all or we haven't set it at root, for whatever reason. Basically this
154 // arises since our aliasing for closure variables is conservatively based on variable number
155 // and ignores the owning symbol table. We should probably fix this eventually and make our
156 // aliasing more precise.
158 // For our purposes here, the imprecision in the aliasing is harmless. It just means that we
159 // may not do as much Phi pruning as we wanted.
160 for ( size_t i
= liveAtHead
. atIndex ( 0 ). numberOfArguments (); i
--;)
161 DFG_ASSERT ( m_graph
, nullptr , liveAtHead
. atIndex ( 0 ). argument ( i
));
163 // Next identify where we would want to sink PutStacks to. We say that there is a deferred
164 // flush if we had a PutStack with a given FlushFormat but it hasn't been materialized yet.
165 // Deferrals have the following lattice; but it's worth noting that the TOP part of the
166 // lattice serves an entirely different purpose than the rest of the lattice: it just means
167 // that we're in a region of code where nobody should have been relying on the value. The
168 // rest of the lattice means that we either have a PutStack that is deferred (i.e. still
169 // needs to be executed) or there isn't one (because we've alraedy executed it).
172 // Represented as DeadFlush.
173 // Means that all previous PutStacks have been executed so there is nothing deferred.
174 // During merging this is subordinate to the other kinds of deferrals, because it
175 // represents the fact that we've already executed all necessary PutStacks. This implies
176 // that there *had* been some PutStacks that we should have executed.
179 // Represented as ConflictingFlush.
180 // Represents the fact that we know, via forward flow, that there isn't any value in the
181 // given local that anyone should have been relying on. This comes into play at the
182 // prologue (because in SSA form at the prologue no local has any value) or when we merge
183 // deferrals for different formats's. A lexical scope in which a local had some semantic
184 // meaning will by this point share the same format; if we had stores from different
185 // lexical scopes that got merged together then we may have a conflicting format. Hence
186 // a conflicting format proves that we're no longer in an area in which the variable was
187 // in scope. Note that this is all approximate and only precise enough to later answer
188 // questions pertinent to sinking. For example, this doesn't always detect when a local
189 // is no longer semantically relevant - we may well have a deferral from inside some
190 // inlined call survive outside of that inlined code, and this is generally OK. In the
191 // worst case it means that we might think that a deferral that is actually dead must
192 // still be executed. But we usually catch that with liveness. Liveness usually catches
193 // such cases, but that's not guaranteed since liveness is conservative.
195 // What Top does give us is detects situations where we both don't need to care about a
196 // deferral and there is no way that we could reason about it anyway. If we merged
197 // deferrals for different formats then we wouldn't know the format to use. So, we use
198 // Top in that case because that's also a case where we know that we can ignore the
201 // Deferral with a concrete format:
202 // Represented by format values other than DeadFlush or ConflictingFlush.
203 // Represents the fact that the original code would have done a PutStack but we haven't
204 // identified an operation that would have observed that PutStack.
206 // This code has some interesting quirks because of the fact that neither liveness nor
207 // deferrals are very precise. They are only precise enough to be able to correctly tell us
208 // when we may [sic] need to execute PutStacks. This means that they may report the need to
209 // execute a PutStack in cases where we actually don't really need it, and that's totally OK.
210 BlockMap
< Operands
< FlushFormat
>> deferredAtHead ( m_graph
);
211 BlockMap
< Operands
< FlushFormat
>> deferredAtTail ( m_graph
);
213 for ( BasicBlock
* block
: m_graph
. blocksInNaturalOrder ()) {
214 deferredAtHead
[ block
] =
215 Operands
< FlushFormat
>( OperandsLike
, block
-> variablesAtHead
);
216 deferredAtTail
[ block
] =
217 Operands
< FlushFormat
>( OperandsLike
, block
-> variablesAtHead
);
220 deferredAtHead
. atIndex ( 0 ). fill ( ConflictingFlush
);
225 for ( BasicBlock
* block
: m_graph
. blocksInNaturalOrder ()) {
226 Operands
< FlushFormat
> deferred
= deferredAtHead
[ block
];
228 for ( Node
* node
: * block
) {
230 dataLog ( "Deferred at " , node
, ":" , deferred
, " \n " );
232 if ( node
-> op () == GetStack
) {
233 // A GetStack doesn't affect anything, since we know which local we are reading
238 auto escapeHandler
= [&] ( VirtualRegister operand
) {
239 if ( operand
. isHeader ())
241 // We will materialize just before any reads.
242 deferred
. operand ( operand
) = DeadFlush
;
245 preciseLocalClobberize (
246 m_graph
, node
, escapeHandler
, escapeHandler
,
247 [&] ( VirtualRegister operand
, LazyNode source
) {
248 RELEASE_ASSERT ( source
. isNode ());
250 if ( source
. asNode () == node
) {
251 // This is a load. Ignore it.
255 deferred
. operand ( operand
) = node
-> stackAccessData ()-> format
;
259 if ( deferred
== deferredAtTail
[ block
])
262 deferredAtTail
[ block
] = deferred
;
265 for ( BasicBlock
* successor
: block
-> successors ()) {
266 for ( size_t i
= deferred
. size (); i
--;) {
268 dataLog ( "Considering " , VirtualRegister ( deferred
. operandForIndex ( i
)), " at " , pointerDump ( block
), "->" , pointerDump ( successor
), ": " , deferred
[ i
], " and " , deferredAtHead
[ successor
][ i
], " merges to " );
270 deferredAtHead
[ successor
][ i
] =
271 merge ( deferredAtHead
[ successor
][ i
], deferred
[ i
]);
274 dataLog ( deferredAtHead
[ successor
][ i
], " \n " );
281 // We wish to insert PutStacks at all of the materialization points, which are defined
282 // implicitly as the places where we set deferred to Dead while it was previously not Dead.
283 // To do this, we may need to build some Phi functions to handle stuff like this:
301 // This means that we have an SSACalculator::Variable for each local, and a Def is any
302 // PutStack in the original program. The original PutStacks will simply vanish.
304 Operands
< SSACalculator :: Variable
*> operandToVariable (
305 OperandsLike
, m_graph
. block ( 0 )-> variablesAtHead
);
306 Vector
< VirtualRegister
> indexToOperand
;
307 for ( size_t i
= m_graph
. block ( 0 )-> variablesAtHead
. size (); i
--;) {
308 VirtualRegister
operand ( m_graph
. block ( 0 )-> variablesAtHead
. operandForIndex ( i
));
310 SSACalculator :: Variable
* variable
= ssaCalculator
. newVariable ();
311 operandToVariable
. operand ( operand
) = variable
;
312 ASSERT ( indexToOperand
. size () == variable
-> index ());
313 indexToOperand
. append ( operand
);
316 HashSet
< Node
*> putLocalsToSink
;
318 for ( BasicBlock
* block
: m_graph
. blocksInNaturalOrder ()) {
319 for ( Node
* node
: * block
) {
320 switch ( node
-> op ()) {
322 putLocalsToSink
. add ( node
);
323 ssaCalculator
. newDef (
324 operandToVariable
. operand ( node
-> stackAccessData ()-> local
),
325 block
, node
-> child1 (). node ());
328 ssaCalculator
. newDef (
329 operandToVariable
. operand ( node
-> stackAccessData ()-> local
),
338 ssaCalculator
. computePhis (
339 [&] ( SSACalculator :: Variable
* variable
, BasicBlock
* block
) -> Node
* {
340 VirtualRegister operand
= indexToOperand
[ variable
-> index ()];
342 if (! liveAtHead
[ block
]. operand ( operand
))
345 FlushFormat format
= deferredAtHead
[ block
]. operand ( operand
);
347 // We could have an invalid deferral because liveness is imprecise.
348 if (! isConcrete ( format
))
352 dataLog ( "Adding Phi for " , operand
, " at " , pointerDump ( block
), " \n " );
354 Node
* phiNode
= m_graph
. addNode ( SpecHeapTop
, Phi
, NodeOrigin ());
355 phiNode
-> mergeFlags ( resultFor ( format
));
359 Operands
< Node
*> mapping ( OperandsLike
, m_graph
. block ( 0 )-> variablesAtHead
);
360 Operands
< FlushFormat
> deferred
;
361 for ( BasicBlock
* block
: m_graph
. blocksInNaturalOrder ()) {
362 mapping
. fill ( nullptr );
364 for ( size_t i
= mapping
. size (); i
--;) {
365 VirtualRegister
operand ( mapping
. operandForIndex ( i
));
367 SSACalculator :: Variable
* variable
= operandToVariable
. operand ( operand
);
368 SSACalculator :: Def
* def
= ssaCalculator
. reachingDefAtHead ( block
, variable
);
372 mapping
. operand ( operand
) = def
-> value ();
376 dataLog ( "Mapping at top of " , pointerDump ( block
), ": " , mapping
, " \n " );
378 for ( SSACalculator :: Def
* phiDef
: ssaCalculator
. phisForBlock ( block
)) {
379 VirtualRegister operand
= indexToOperand
[ phiDef
-> variable ()-> index ()];
381 insertionSet
. insert ( 0 , phiDef
-> value ());
384 dataLog ( " Mapping " , operand
, " to " , phiDef
-> value (), " \n " );
385 mapping
. operand ( operand
) = phiDef
-> value ();
388 deferred
= deferredAtHead
[ block
];
389 for ( unsigned nodeIndex
= 0 ; nodeIndex
< block
-> size (); ++ nodeIndex
) {
390 Node
* node
= block
-> at ( nodeIndex
);
392 dataLog ( "Deferred at " , node
, ":" , deferred
, " \n " );
394 switch ( node
-> op ()) {
396 StackAccessData
* data
= node
-> stackAccessData ();
397 VirtualRegister operand
= data
-> local
;
398 deferred
. operand ( operand
) = data
-> format
;
400 dataLog ( " Mapping " , operand
, " to " , node
-> child1 (). node (), " at " , node
, " \n " );
401 mapping
. operand ( operand
) = node
-> child1 (). node ();
406 StackAccessData
* data
= node
-> stackAccessData ();
407 FlushFormat format
= deferred
. operand ( data
-> local
);
408 if (! isConcrete ( format
)) {
409 // This means there is no deferral. No deferral means that the most
410 // authoritative value for this stack slot is what is stored in the stack. So,
411 // keep the GetStack.
412 mapping
. operand ( data
-> local
) = node
;
416 // We have a concrete deferral, which means a PutStack that hasn't executed yet. It
417 // would have stored a value with a certain format. That format must match our
418 // format. But more importantly, we can simply use the value that the PutStack would
419 // have stored and get rid of the GetStack.
420 DFG_ASSERT ( m_graph
, node
, format
== data
-> format
);
422 Node
* incoming
= mapping
. operand ( data
-> local
);
423 node
-> child1 () = incoming
-> defaultEdge ();
424 node
-> convertToIdentity ();
429 auto escapeHandler
= [&] ( VirtualRegister operand
) {
430 if ( operand
. isHeader ())
433 FlushFormat format
= deferred
. operand ( operand
);
434 if (! isConcrete ( format
))
437 // Gotta insert a PutStack.
439 dataLog ( "Inserting a PutStack for " , operand
, " at " , node
, " \n " );
441 Node
* incoming
= mapping
. operand ( operand
);
442 DFG_ASSERT ( m_graph
, node
, incoming
);
444 insertionSet
. insertNode (
445 nodeIndex
, SpecNone
, PutStack
, node
-> origin
,
446 OpInfo ( m_graph
. m_stackAccessData
. add ( operand
, format
)),
447 Edge ( incoming
, useKindFor ( format
)));
449 deferred
. operand ( operand
) = DeadFlush
;
452 preciseLocalClobberize (
453 m_graph
, node
, escapeHandler
, escapeHandler
,
454 [&] ( VirtualRegister
, LazyNode
) { });
459 NodeAndIndex terminal
= block
-> findTerminal ();
460 size_t upsilonInsertionPoint
= terminal
. index
;
461 NodeOrigin upsilonOrigin
= terminal
. node
-> origin
;
462 for ( BasicBlock
* successorBlock
: block
-> successors ()) {
463 for ( SSACalculator :: Def
* phiDef
: ssaCalculator
. phisForBlock ( successorBlock
)) {
464 Node
* phiNode
= phiDef
-> value ();
465 SSACalculator :: Variable
* variable
= phiDef
-> variable ();
466 VirtualRegister operand
= indexToOperand
[ variable
-> index ()];
468 dataLog ( "Creating Upsilon for " , operand
, " at " , pointerDump ( block
), "->" , pointerDump ( successorBlock
), " \n " );
469 FlushFormat format
= deferredAtHead
[ successorBlock
]. operand ( operand
);
470 DFG_ASSERT ( m_graph
, nullptr , isConcrete ( format
));
471 UseKind useKind
= useKindFor ( format
);
473 // We need to get a value for the stack slot. This phase doesn't really have a
474 // good way of determining if a stack location got clobbered. It just knows if
475 // there is a deferral. The lack of a deferral might mean that a PutStack or
476 // GetStack had never happened, or it might mean that the value was read, or
477 // that it was written. It's OK for us to make some bad decisions here, since
478 // GCSE will clean it up anyway.
480 if ( isConcrete ( deferred
. operand ( operand
))) {
481 incoming
= mapping
. operand ( operand
);
482 DFG_ASSERT ( m_graph
, phiNode
, incoming
);
484 // Issue a GetStack to get the value. This might introduce some redundancy
485 // into the code, but if it's bad enough, GCSE will clean it up.
486 incoming
= insertionSet
. insertNode (
487 upsilonInsertionPoint
, SpecNone
, GetStack
, upsilonOrigin
,
488 OpInfo ( m_graph
. m_stackAccessData
. add ( operand
, format
)));
489 incoming
-> setResult ( resultFor ( format
));
492 insertionSet
. insertNode (
493 upsilonInsertionPoint
, SpecNone
, Upsilon
, upsilonOrigin
,
494 OpInfo ( phiNode
), Edge ( incoming
, useKind
));
498 insertionSet
. execute ( block
);
501 // Finally eliminate the sunken PutStacks by turning them into Checks. This keeps whatever
502 // type check they were doing. Also prepend KillStacks to them to ensure that we know that
503 // the relevant value was *not* stored to the stack.
504 for ( BasicBlock
* block
: m_graph
. blocksInNaturalOrder ()) {
505 for ( unsigned nodeIndex
= 0 ; nodeIndex
< block
-> size (); ++ nodeIndex
) {
506 Node
* node
= block
-> at ( nodeIndex
);
508 if (! putLocalsToSink
. contains ( node
))
511 insertionSet
. insertNode (
512 nodeIndex
, SpecNone
, KillStack
, node
-> origin
, OpInfo ( node
-> stackAccessData ()-> local
. offset ()));
516 insertionSet
. execute ( block
);
520 dataLog ( "Graph after PutStack sinking: \n " );
528 } // anonymous namespace
530 bool performPutStackSinking ( Graph
& graph
)
532 SamplingRegion
samplingRegion ( "DFG PutStack Sinking Phase" );
533 return runPhase
< PutStackSinkingPhase
>( graph
);
536 } } // namespace JSC::DFG
538 #endif // ENABLE(DFG_JIT)