2 * Copyright (C) 2011, 2013-2015 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 #ifndef DFGBasicBlock_h
27 #define DFGBasicBlock_h
31 #include "DFGAbstractValue.h"
32 #include "DFGAvailability.h"
33 #include "DFGAvailabilityMap.h"
34 #include "DFGBranchDirection.h"
35 #include "DFGFlushedAt.h"
37 #include "DFGNodeOrigin.h"
38 #include "DFGStructureClobberState.h"
40 #include <wtf/HashMap.h>
41 #include <wtf/HashSet.h>
42 #include <wtf/Vector.h>
44 namespace JSC
{ namespace DFG
{
49 typedef Vector
<BasicBlock
*, 2> PredecessorList
;
50 typedef Vector
<Node
*, 8> BlockNodeList
;
52 struct BasicBlock
: RefCounted
<BasicBlock
> {
54 unsigned bytecodeBegin
, unsigned numArguments
, unsigned numLocals
,
55 float executionCount
);
58 void ensureLocals(unsigned newNumLocals
);
60 size_t size() const { return m_nodes
.size(); }
61 bool isEmpty() const { return !size(); }
62 Node
*& at(size_t i
) { return m_nodes
[i
]; }
63 Node
* at(size_t i
) const { return m_nodes
[i
]; }
64 Node
* tryAt(size_t i
) const
70 Node
*& operator[](size_t i
) { return at(i
); }
71 Node
* operator[](size_t i
) const { return at(i
); }
73 // Use this to find both the index of the terminal and the terminal itself in one go. May
74 // return a clear NodeAndIndex if the basic block currently lacks a terminal. That may happen
75 // in the middle of IR transformations within a phase but should never be the case in between
78 // The reason why this is more than just "at(size() - 1)" is that we may place non-terminal
79 // liveness marking instructions after the terminal. This is supposed to happen infrequently
80 // but some basic blocks - most notably return blocks - will have liveness markers for all of
81 // the flushed variables right after the return.
83 // It turns out that doing this linear search is basically perf-neutral, so long as we force
84 // the method to be inlined. Hence the ALWAYS_INLINE.
85 ALWAYS_INLINE NodeAndIndex
findTerminal() const
96 return NodeAndIndex(node
, i
);
97 // The bitter end can contain Phantoms and the like. There will probably only be one or two nodes after the terminal. They are all no-ops and will not have any checked children.
98 case Check
: // This is here because it's our universal no-op.
104 return NodeAndIndex();
107 return NodeAndIndex();
110 ALWAYS_INLINE Node
* terminal() const
112 return findTerminal().node
;
115 void resize(size_t size
) { m_nodes
.resize(size
); }
116 void grow(size_t size
) { m_nodes
.grow(size
); }
118 void append(Node
* node
) { m_nodes
.append(node
); }
119 void insertBeforeTerminal(Node
* node
)
121 NodeAndIndex result
= findTerminal();
125 m_nodes
.insert(result
.index
, node
);
128 void replaceTerminal(Node
*);
130 size_t numNodes() const { return phis
.size() + size(); }
131 Node
* node(size_t i
) const
135 return at(i
- phis
.size());
137 bool isPhiIndex(size_t i
) const { return i
< phis
.size(); }
139 bool isInPhis(Node
* node
) const;
140 bool isInBlock(Node
* myNode
) const;
142 BlockNodeList::iterator
begin() { return m_nodes
.begin(); }
143 BlockNodeList::iterator
end() { return m_nodes
.end(); }
145 Node
* firstOriginNode();
146 NodeOrigin
firstOrigin();
148 unsigned numSuccessors() { return terminal()->numSuccessors(); }
150 BasicBlock
*& successor(unsigned index
)
152 return terminal()->successor(index
);
154 BasicBlock
*& successorForCondition(bool condition
)
156 return terminal()->successorForCondition(condition
);
159 Node::SuccessorsIterable
successors()
161 return terminal()->successors();
164 void removePredecessor(BasicBlock
* block
);
165 void replacePredecessor(BasicBlock
* from
, BasicBlock
* to
);
167 template<typename
... Params
>
168 Node
* appendNode(Graph
&, SpeculatedType
, Params
...);
170 template<typename
... Params
>
171 Node
* appendNonTerminal(Graph
&, SpeculatedType
, Params
...);
173 template<typename
... Params
>
174 Node
* replaceTerminal(Graph
&, SpeculatedType
, Params
...);
176 void dump(PrintStream
& out
) const;
185 // This value is used internally for block linking and OSR entry. It is mostly meaningless
186 // for other purposes due to inlining.
187 unsigned bytecodeBegin
;
193 bool cfaShouldRevisit
;
194 bool cfaFoundConstants
;
196 StructureClobberState cfaStructureClobberStateAtHead
;
197 StructureClobberState cfaStructureClobberStateAtTail
;
198 BranchDirection cfaBranchDirection
;
205 PredecessorList predecessors
;
207 Operands
<Node
*, NodePointerTraits
> variablesAtHead
;
208 Operands
<Node
*, NodePointerTraits
> variablesAtTail
;
210 Operands
<AbstractValue
> valuesAtHead
;
211 Operands
<AbstractValue
> valuesAtTail
;
213 // The intersection of assumptions we have made previously at the head of this block. Note
214 // that under normal circumstances, each time we run the CFA, we will get strictly more precise
215 // results. But we don't actually require this to be the case. It's fine for the CFA to loosen
216 // up for any odd reason. It's fine when this happens, because anything that the CFA proves
217 // must be true from that point forward, except if some registered watchpoint fires, in which
218 // case the code won't ever run. So, the CFA proving something less precise later on is just an
219 // outcome of the CFA being imperfect; the more precise thing that it had proved earlier is no
222 // But for the purpose of OSR entry, we need to make sure that we remember what assumptions we
223 // had used for optimizing any given basic block. That's what this is for.
225 // It's interesting that we could use this to make the CFA more precise: all future CFAs could
226 // filter their results with this thing to sort of maintain maximal precision. Because we
227 // expect CFA to usually be monotonically more precise each time we run it to fixpoint, this
228 // would not be a productive optimization: it would make setting up a basic block more
229 // expensive and would only benefit bizarre pathological cases.
230 Operands
<AbstractValue
> intersectionOfPastValuesAtHead
;
231 bool intersectionOfCFAHasVisited
;
233 float executionCount
;
235 // These fields are reserved for NaturalLoops.
236 static const unsigned numberOfInnerMostLoopIndices
= 2;
237 unsigned innerMostLoopIndices
[numberOfInnerMostLoopIndices
];
240 AvailabilityMap availabilityAtHead
;
241 AvailabilityMap availabilityAtTail
;
243 HashSet
<Node
*> liveAtHead
;
244 HashSet
<Node
*> liveAtTail
;
245 HashMap
<Node
*, AbstractValue
> valuesAtHead
;
246 HashMap
<Node
*, AbstractValue
> valuesAtTail
;
248 SSAData(BasicBlock
*);
251 std::unique_ptr
<SSAData
> ssa
;
254 friend class InsertionSet
;
255 BlockNodeList m_nodes
;
258 typedef Vector
<BasicBlock
*, 5> BlockList
;
260 struct UnlinkedBlock
{
262 bool m_needsNormalLinking
;
263 bool m_needsEarlyReturnLinking
;
267 explicit UnlinkedBlock(BasicBlock
* block
)
269 , m_needsNormalLinking(true)
270 , m_needsEarlyReturnLinking(false)
275 static inline unsigned getBytecodeBeginForBlock(BasicBlock
** basicBlock
)
277 return (*basicBlock
)->bytecodeBegin
;
280 static inline BasicBlock
* blockForBytecodeOffset(Vector
<BasicBlock
*>& linkingTargets
, unsigned bytecodeBegin
)
282 return *binarySearch
<BasicBlock
*, unsigned>(linkingTargets
, linkingTargets
.size(), bytecodeBegin
, getBytecodeBeginForBlock
);
285 } } // namespace JSC::DFG
287 #endif // ENABLE(DFG_JIT)
289 #endif // DFGBasicBlock_h