]> git.saurik.com Git - apple/javascriptcore.git/blob - heap/MarkedSpace.cpp
JavaScriptCore-7601.1.46.3.tar.gz
[apple/javascriptcore.git] / heap / MarkedSpace.cpp
1 /*
2 * Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009 Apple Inc. All rights reserved.
3 * Copyright (C) 2007 Eric Seidel <eric@webkit.org>
4 *
5 * This library is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Lesser General Public
7 * License as published by the Free Software Foundation; either
8 * version 2 of the License, or (at your option) any later version.
9 *
10 * This library is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Lesser General Public License for more details.
14 *
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this library; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
18 *
19 */
20
21 #include "config.h"
22 #include "MarkedSpace.h"
23
24 #include "IncrementalSweeper.h"
25 #include "JSGlobalObject.h"
26 #include "JSLock.h"
27 #include "JSObject.h"
28 #include "JSCInlines.h"
29
30 namespace JSC {
31
32 class Structure;
33
34 class Free {
35 public:
36 typedef MarkedBlock* ReturnType;
37
38 enum FreeMode { FreeOrShrink, FreeAll };
39
40 Free(FreeMode, MarkedSpace*);
41 void operator()(MarkedBlock*);
42 ReturnType returnValue();
43
44 private:
45 FreeMode m_freeMode;
46 MarkedSpace* m_markedSpace;
47 DoublyLinkedList<MarkedBlock> m_blocks;
48 };
49
50 inline Free::Free(FreeMode freeMode, MarkedSpace* newSpace)
51 : m_freeMode(freeMode)
52 , m_markedSpace(newSpace)
53 {
54 }
55
56 inline void Free::operator()(MarkedBlock* block)
57 {
58 if (m_freeMode == FreeOrShrink)
59 m_markedSpace->freeOrShrinkBlock(block);
60 else
61 m_markedSpace->freeBlock(block);
62 }
63
64 inline Free::ReturnType Free::returnValue()
65 {
66 return m_blocks.head();
67 }
68
69 struct VisitWeakSet : MarkedBlock::VoidFunctor {
70 VisitWeakSet(HeapRootVisitor& heapRootVisitor) : m_heapRootVisitor(heapRootVisitor) { }
71 void operator()(MarkedBlock* block) { block->visitWeakSet(m_heapRootVisitor); }
72 private:
73 HeapRootVisitor& m_heapRootVisitor;
74 };
75
76 struct ReapWeakSet : MarkedBlock::VoidFunctor {
77 void operator()(MarkedBlock* block) { block->reapWeakSet(); }
78 };
79
80 MarkedSpace::MarkedSpace(Heap* heap)
81 : m_heap(heap)
82 , m_capacity(0)
83 , m_isIterating(false)
84 {
85 for (size_t cellSize = preciseStep; cellSize <= preciseCutoff; cellSize += preciseStep) {
86 allocatorFor(cellSize).init(heap, this, cellSize, false);
87 destructorAllocatorFor(cellSize).init(heap, this, cellSize, true);
88 }
89
90 for (size_t cellSize = impreciseStep; cellSize <= impreciseCutoff; cellSize += impreciseStep) {
91 allocatorFor(cellSize).init(heap, this, cellSize, false);
92 destructorAllocatorFor(cellSize).init(heap, this, cellSize, true);
93 }
94
95 m_normalSpace.largeAllocator.init(heap, this, 0, false);
96 m_destructorSpace.largeAllocator.init(heap, this, 0, true);
97 }
98
99 MarkedSpace::~MarkedSpace()
100 {
101 Free free(Free::FreeAll, this);
102 forEachBlock(free);
103 ASSERT(!m_blocks.set().size());
104 }
105
106 struct LastChanceToFinalize {
107 void operator()(MarkedAllocator& allocator) { allocator.lastChanceToFinalize(); }
108 };
109
110 void MarkedSpace::lastChanceToFinalize()
111 {
112 stopAllocating();
113 forEachAllocator<LastChanceToFinalize>();
114 }
115
116 void MarkedSpace::sweep()
117 {
118 m_heap->sweeper()->willFinishSweeping();
119 forEachBlock<Sweep>();
120 }
121
122 void MarkedSpace::zombifySweep()
123 {
124 if (Options::logGC())
125 dataLog("Zombifying sweep...");
126 m_heap->sweeper()->willFinishSweeping();
127 forEachBlock<ZombifySweep>();
128 }
129
130 void MarkedSpace::resetAllocators()
131 {
132 for (size_t cellSize = preciseStep; cellSize <= preciseCutoff; cellSize += preciseStep) {
133 allocatorFor(cellSize).reset();
134 destructorAllocatorFor(cellSize).reset();
135 }
136
137 for (size_t cellSize = impreciseStep; cellSize <= impreciseCutoff; cellSize += impreciseStep) {
138 allocatorFor(cellSize).reset();
139 destructorAllocatorFor(cellSize).reset();
140 }
141
142 m_normalSpace.largeAllocator.reset();
143 m_destructorSpace.largeAllocator.reset();
144
145 #if ENABLE(GGC)
146 m_blocksWithNewObjects.clear();
147 #endif
148 }
149
150 void MarkedSpace::visitWeakSets(HeapRootVisitor& heapRootVisitor)
151 {
152 VisitWeakSet visitWeakSet(heapRootVisitor);
153 if (m_heap->operationInProgress() == EdenCollection) {
154 for (unsigned i = 0; i < m_blocksWithNewObjects.size(); ++i)
155 visitWeakSet(m_blocksWithNewObjects[i]);
156 } else
157 forEachBlock(visitWeakSet);
158 }
159
160 void MarkedSpace::reapWeakSets()
161 {
162 if (m_heap->operationInProgress() == EdenCollection) {
163 for (unsigned i = 0; i < m_blocksWithNewObjects.size(); ++i)
164 m_blocksWithNewObjects[i]->reapWeakSet();
165 } else
166 forEachBlock<ReapWeakSet>();
167 }
168
169 template <typename Functor>
170 void MarkedSpace::forEachAllocator()
171 {
172 Functor functor;
173 forEachAllocator(functor);
174 }
175
176 template <typename Functor>
177 void MarkedSpace::forEachAllocator(Functor& functor)
178 {
179 for (size_t cellSize = preciseStep; cellSize <= preciseCutoff; cellSize += preciseStep) {
180 functor(allocatorFor(cellSize));
181 functor(destructorAllocatorFor(cellSize));
182 }
183
184 for (size_t cellSize = impreciseStep; cellSize <= impreciseCutoff; cellSize += impreciseStep) {
185 functor(allocatorFor(cellSize));
186 functor(destructorAllocatorFor(cellSize));
187 }
188
189 functor(m_normalSpace.largeAllocator);
190 functor(m_destructorSpace.largeAllocator);
191 }
192
193 struct StopAllocatingFunctor {
194 void operator()(MarkedAllocator& allocator) { allocator.stopAllocating(); }
195 };
196
197 void MarkedSpace::stopAllocating()
198 {
199 ASSERT(!isIterating());
200 forEachAllocator<StopAllocatingFunctor>();
201 }
202
203 struct ResumeAllocatingFunctor {
204 void operator()(MarkedAllocator& allocator) { allocator.resumeAllocating(); }
205 };
206
207 void MarkedSpace::resumeAllocating()
208 {
209 ASSERT(isIterating());
210 forEachAllocator<ResumeAllocatingFunctor>();
211 }
212
213 bool MarkedSpace::isPagedOut(double deadline)
214 {
215 for (size_t cellSize = preciseStep; cellSize <= preciseCutoff; cellSize += preciseStep) {
216 if (allocatorFor(cellSize).isPagedOut(deadline)
217 || destructorAllocatorFor(cellSize).isPagedOut(deadline))
218 return true;
219 }
220
221 for (size_t cellSize = impreciseStep; cellSize <= impreciseCutoff; cellSize += impreciseStep) {
222 if (allocatorFor(cellSize).isPagedOut(deadline)
223 || destructorAllocatorFor(cellSize).isPagedOut(deadline))
224 return true;
225 }
226
227 if (m_normalSpace.largeAllocator.isPagedOut(deadline)
228 || m_destructorSpace.largeAllocator.isPagedOut(deadline))
229 return true;
230
231 return false;
232 }
233
234 void MarkedSpace::freeBlock(MarkedBlock* block)
235 {
236 block->allocator()->removeBlock(block);
237 m_capacity -= block->capacity();
238 m_blocks.remove(block);
239 MarkedBlock::destroy(block);
240 }
241
242 void MarkedSpace::freeOrShrinkBlock(MarkedBlock* block)
243 {
244 if (!block->isEmpty()) {
245 block->shrink();
246 return;
247 }
248
249 freeBlock(block);
250 }
251
252 struct Shrink : MarkedBlock::VoidFunctor {
253 void operator()(MarkedBlock* block) { block->shrink(); }
254 };
255
256 void MarkedSpace::shrink()
257 {
258 Free freeOrShrink(Free::FreeOrShrink, this);
259 forEachBlock(freeOrShrink);
260 }
261
262 static void clearNewlyAllocatedInBlock(MarkedBlock* block)
263 {
264 if (!block)
265 return;
266 block->clearNewlyAllocated();
267 }
268
269 struct ClearNewlyAllocated : MarkedBlock::VoidFunctor {
270 void operator()(MarkedBlock* block) { block->clearNewlyAllocated(); }
271 };
272
273 #ifndef NDEBUG
274 struct VerifyNewlyAllocated : MarkedBlock::VoidFunctor {
275 void operator()(MarkedBlock* block) { ASSERT(!block->clearNewlyAllocated()); }
276 };
277 #endif
278
279 void MarkedSpace::clearNewlyAllocated()
280 {
281 for (size_t i = 0; i < preciseCount; ++i) {
282 clearNewlyAllocatedInBlock(m_normalSpace.preciseAllocators[i].takeLastActiveBlock());
283 clearNewlyAllocatedInBlock(m_destructorSpace.preciseAllocators[i].takeLastActiveBlock());
284 }
285
286 for (size_t i = 0; i < impreciseCount; ++i) {
287 clearNewlyAllocatedInBlock(m_normalSpace.impreciseAllocators[i].takeLastActiveBlock());
288 clearNewlyAllocatedInBlock(m_destructorSpace.impreciseAllocators[i].takeLastActiveBlock());
289 }
290
291 // We have to iterate all of the blocks in the large allocators because they are
292 // canonicalized as they are used up (see MarkedAllocator::tryAllocateHelper)
293 // which creates the m_newlyAllocated bitmap.
294 ClearNewlyAllocated functor;
295 m_normalSpace.largeAllocator.forEachBlock(functor);
296 m_destructorSpace.largeAllocator.forEachBlock(functor);
297
298 #ifndef NDEBUG
299 VerifyNewlyAllocated verifyFunctor;
300 forEachBlock(verifyFunctor);
301 #endif
302 }
303
304 #ifndef NDEBUG
305 struct VerifyMarkedOrRetired : MarkedBlock::VoidFunctor {
306 void operator()(MarkedBlock* block)
307 {
308 switch (block->m_state) {
309 case MarkedBlock::Marked:
310 case MarkedBlock::Retired:
311 return;
312 default:
313 RELEASE_ASSERT_NOT_REACHED();
314 }
315 }
316 };
317 #endif
318
319 void MarkedSpace::clearMarks()
320 {
321 if (m_heap->operationInProgress() == EdenCollection) {
322 for (unsigned i = 0; i < m_blocksWithNewObjects.size(); ++i)
323 m_blocksWithNewObjects[i]->clearMarks();
324 } else
325 forEachBlock<ClearMarks>();
326
327 #ifndef NDEBUG
328 VerifyMarkedOrRetired verifyFunctor;
329 forEachBlock(verifyFunctor);
330 #endif
331 }
332
333 void MarkedSpace::willStartIterating()
334 {
335 ASSERT(!isIterating());
336 stopAllocating();
337 m_isIterating = true;
338 }
339
340 void MarkedSpace::didFinishIterating()
341 {
342 ASSERT(isIterating());
343 resumeAllocating();
344 m_isIterating = false;
345 }
346
347 } // namespace JSC