]> git.saurik.com Git - apple/javascriptcore.git/blob - dfg/DFGBasicBlock.h
420ff511c8b6e2250e3779de8639d3cd70a0822d
[apple/javascriptcore.git] / dfg / DFGBasicBlock.h
1 /*
2 * Copyright (C) 2011, 2013-2015 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26 #ifndef DFGBasicBlock_h
27 #define DFGBasicBlock_h
28
29 #if ENABLE(DFG_JIT)
30
31 #include "DFGAbstractValue.h"
32 #include "DFGAvailability.h"
33 #include "DFGAvailabilityMap.h"
34 #include "DFGBranchDirection.h"
35 #include "DFGFlushedAt.h"
36 #include "DFGNode.h"
37 #include "DFGNodeOrigin.h"
38 #include "DFGStructureClobberState.h"
39 #include "Operands.h"
40 #include <wtf/HashMap.h>
41 #include <wtf/HashSet.h>
42 #include <wtf/Vector.h>
43
44 namespace JSC { namespace DFG {
45
46 class Graph;
47 class InsertionSet;
48
49 typedef Vector<BasicBlock*, 2> PredecessorList;
50 typedef Vector<Node*, 8> BlockNodeList;
51
52 struct BasicBlock : RefCounted<BasicBlock> {
53 BasicBlock(
54 unsigned bytecodeBegin, unsigned numArguments, unsigned numLocals,
55 float executionCount);
56 ~BasicBlock();
57
58 void ensureLocals(unsigned newNumLocals);
59
60 size_t size() const { return m_nodes.size(); }
61 bool isEmpty() const { return !size(); }
62 Node*& at(size_t i) { return m_nodes[i]; }
63 Node* at(size_t i) const { return m_nodes[i]; }
64 Node* tryAt(size_t i) const
65 {
66 if (i >= size())
67 return nullptr;
68 return at(i);
69 }
70 Node*& operator[](size_t i) { return at(i); }
71 Node* operator[](size_t i) const { return at(i); }
72
73 // Use this to find both the index of the terminal and the terminal itself in one go. May
74 // return a clear NodeAndIndex if the basic block currently lacks a terminal. That may happen
75 // in the middle of IR transformations within a phase but should never be the case in between
76 // phases.
77 //
78 // The reason why this is more than just "at(size() - 1)" is that we may place non-terminal
79 // liveness marking instructions after the terminal. This is supposed to happen infrequently
80 // but some basic blocks - most notably return blocks - will have liveness markers for all of
81 // the flushed variables right after the return.
82 //
83 // It turns out that doing this linear search is basically perf-neutral, so long as we force
84 // the method to be inlined. Hence the ALWAYS_INLINE.
85 ALWAYS_INLINE NodeAndIndex findTerminal() const
86 {
87 size_t i = size();
88 while (i--) {
89 Node* node = at(i);
90 switch (node->op()) {
91 case Jump:
92 case Branch:
93 case Switch:
94 case Return:
95 case Unreachable:
96 return NodeAndIndex(node, i);
97 // The bitter end can contain Phantoms and the like. There will probably only be one or two nodes after the terminal. They are all no-ops and will not have any checked children.
98 case Check: // This is here because it's our universal no-op.
99 case Phantom:
100 case PhantomLocal:
101 case Flush:
102 break;
103 default:
104 return NodeAndIndex();
105 }
106 }
107 return NodeAndIndex();
108 }
109
110 ALWAYS_INLINE Node* terminal() const
111 {
112 return findTerminal().node;
113 }
114
115 void resize(size_t size) { m_nodes.resize(size); }
116 void grow(size_t size) { m_nodes.grow(size); }
117
118 void append(Node* node) { m_nodes.append(node); }
119 void insertBeforeTerminal(Node* node)
120 {
121 NodeAndIndex result = findTerminal();
122 if (!result)
123 append(node);
124 else
125 m_nodes.insert(result.index, node);
126 }
127
128 void replaceTerminal(Node*);
129
130 size_t numNodes() const { return phis.size() + size(); }
131 Node* node(size_t i) const
132 {
133 if (i < phis.size())
134 return phis[i];
135 return at(i - phis.size());
136 }
137 bool isPhiIndex(size_t i) const { return i < phis.size(); }
138
139 bool isInPhis(Node* node) const;
140 bool isInBlock(Node* myNode) const;
141
142 BlockNodeList::iterator begin() { return m_nodes.begin(); }
143 BlockNodeList::iterator end() { return m_nodes.end(); }
144
145 Node* firstOriginNode();
146 NodeOrigin firstOrigin();
147
148 unsigned numSuccessors() { return terminal()->numSuccessors(); }
149
150 BasicBlock*& successor(unsigned index)
151 {
152 return terminal()->successor(index);
153 }
154 BasicBlock*& successorForCondition(bool condition)
155 {
156 return terminal()->successorForCondition(condition);
157 }
158
159 Node::SuccessorsIterable successors()
160 {
161 return terminal()->successors();
162 }
163
164 void removePredecessor(BasicBlock* block);
165 void replacePredecessor(BasicBlock* from, BasicBlock* to);
166
167 template<typename... Params>
168 Node* appendNode(Graph&, SpeculatedType, Params...);
169
170 template<typename... Params>
171 Node* appendNonTerminal(Graph&, SpeculatedType, Params...);
172
173 template<typename... Params>
174 Node* replaceTerminal(Graph&, SpeculatedType, Params...);
175
176 void dump(PrintStream& out) const;
177
178 void didLink()
179 {
180 #if !ASSERT_DISABLED
181 isLinked = true;
182 #endif
183 }
184
185 // This value is used internally for block linking and OSR entry. It is mostly meaningless
186 // for other purposes due to inlining.
187 unsigned bytecodeBegin;
188
189 BlockIndex index;
190
191 bool isOSRTarget;
192 bool cfaHasVisited;
193 bool cfaShouldRevisit;
194 bool cfaFoundConstants;
195 bool cfaDidFinish;
196 StructureClobberState cfaStructureClobberStateAtHead;
197 StructureClobberState cfaStructureClobberStateAtTail;
198 BranchDirection cfaBranchDirection;
199 #if !ASSERT_DISABLED
200 bool isLinked;
201 #endif
202 bool isReachable;
203
204 Vector<Node*> phis;
205 PredecessorList predecessors;
206
207 Operands<Node*, NodePointerTraits> variablesAtHead;
208 Operands<Node*, NodePointerTraits> variablesAtTail;
209
210 Operands<AbstractValue> valuesAtHead;
211 Operands<AbstractValue> valuesAtTail;
212
213 // The intersection of assumptions we have made previously at the head of this block. Note
214 // that under normal circumstances, each time we run the CFA, we will get strictly more precise
215 // results. But we don't actually require this to be the case. It's fine for the CFA to loosen
216 // up for any odd reason. It's fine when this happens, because anything that the CFA proves
217 // must be true from that point forward, except if some registered watchpoint fires, in which
218 // case the code won't ever run. So, the CFA proving something less precise later on is just an
219 // outcome of the CFA being imperfect; the more precise thing that it had proved earlier is no
220 // less true.
221 //
222 // But for the purpose of OSR entry, we need to make sure that we remember what assumptions we
223 // had used for optimizing any given basic block. That's what this is for.
224 //
225 // It's interesting that we could use this to make the CFA more precise: all future CFAs could
226 // filter their results with this thing to sort of maintain maximal precision. Because we
227 // expect CFA to usually be monotonically more precise each time we run it to fixpoint, this
228 // would not be a productive optimization: it would make setting up a basic block more
229 // expensive and would only benefit bizarre pathological cases.
230 Operands<AbstractValue> intersectionOfPastValuesAtHead;
231 bool intersectionOfCFAHasVisited;
232
233 float executionCount;
234
235 // These fields are reserved for NaturalLoops.
236 static const unsigned numberOfInnerMostLoopIndices = 2;
237 unsigned innerMostLoopIndices[numberOfInnerMostLoopIndices];
238
239 struct SSAData {
240 AvailabilityMap availabilityAtHead;
241 AvailabilityMap availabilityAtTail;
242
243 HashSet<Node*> liveAtHead;
244 HashSet<Node*> liveAtTail;
245 HashMap<Node*, AbstractValue> valuesAtHead;
246 HashMap<Node*, AbstractValue> valuesAtTail;
247
248 SSAData(BasicBlock*);
249 ~SSAData();
250 };
251 std::unique_ptr<SSAData> ssa;
252
253 private:
254 friend class InsertionSet;
255 BlockNodeList m_nodes;
256 };
257
258 typedef Vector<BasicBlock*, 5> BlockList;
259
260 struct UnlinkedBlock {
261 BasicBlock* m_block;
262 bool m_needsNormalLinking;
263 bool m_needsEarlyReturnLinking;
264
265 UnlinkedBlock() { }
266
267 explicit UnlinkedBlock(BasicBlock* block)
268 : m_block(block)
269 , m_needsNormalLinking(true)
270 , m_needsEarlyReturnLinking(false)
271 {
272 }
273 };
274
275 static inline unsigned getBytecodeBeginForBlock(BasicBlock** basicBlock)
276 {
277 return (*basicBlock)->bytecodeBegin;
278 }
279
280 static inline BasicBlock* blockForBytecodeOffset(Vector<BasicBlock*>& linkingTargets, unsigned bytecodeBegin)
281 {
282 return *binarySearch<BasicBlock*, unsigned>(linkingTargets, linkingTargets.size(), bytecodeBegin, getBytecodeBeginForBlock);
283 }
284
285 } } // namespace JSC::DFG
286
287 #endif // ENABLE(DFG_JIT)
288
289 #endif // DFGBasicBlock_h
290