2 * Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009 Apple Inc. All rights reserved.
3 * Copyright (C) 2007 Eric Seidel <eric@webkit.org>
5 * This library is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Lesser General Public
7 * License as published by the Free Software Foundation; either
8 * version 2 of the License, or (at your option) any later version.
10 * This library is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Lesser General Public License for more details.
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this library; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 #include "MarkedSpace.h"
24 #include "IncrementalSweeper.h"
25 #include "JSGlobalObject.h"
28 #include "JSCInlines.h"
36 typedef MarkedBlock
* ReturnType
;
38 enum FreeMode
{ FreeOrShrink
, FreeAll
};
40 Free(FreeMode
, MarkedSpace
*);
41 void operator()(MarkedBlock
*);
42 ReturnType
returnValue();
46 MarkedSpace
* m_markedSpace
;
47 DoublyLinkedList
<MarkedBlock
> m_blocks
;
50 inline Free::Free(FreeMode freeMode
, MarkedSpace
* newSpace
)
51 : m_freeMode(freeMode
)
52 , m_markedSpace(newSpace
)
56 inline void Free::operator()(MarkedBlock
* block
)
58 if (m_freeMode
== FreeOrShrink
)
59 m_markedSpace
->freeOrShrinkBlock(block
);
61 m_markedSpace
->freeBlock(block
);
64 inline Free::ReturnType
Free::returnValue()
66 return m_blocks
.head();
69 struct VisitWeakSet
: MarkedBlock::VoidFunctor
{
70 VisitWeakSet(HeapRootVisitor
& heapRootVisitor
) : m_heapRootVisitor(heapRootVisitor
) { }
71 void operator()(MarkedBlock
* block
) { block
->visitWeakSet(m_heapRootVisitor
); }
73 HeapRootVisitor
& m_heapRootVisitor
;
76 struct ReapWeakSet
: MarkedBlock::VoidFunctor
{
77 void operator()(MarkedBlock
* block
) { block
->reapWeakSet(); }
80 MarkedSpace::MarkedSpace(Heap
* heap
)
83 , m_isIterating(false)
85 for (size_t cellSize
= preciseStep
; cellSize
<= preciseCutoff
; cellSize
+= preciseStep
) {
86 allocatorFor(cellSize
).init(heap
, this, cellSize
, false);
87 destructorAllocatorFor(cellSize
).init(heap
, this, cellSize
, true);
90 for (size_t cellSize
= impreciseStep
; cellSize
<= impreciseCutoff
; cellSize
+= impreciseStep
) {
91 allocatorFor(cellSize
).init(heap
, this, cellSize
, false);
92 destructorAllocatorFor(cellSize
).init(heap
, this, cellSize
, true);
95 m_normalSpace
.largeAllocator
.init(heap
, this, 0, false);
96 m_destructorSpace
.largeAllocator
.init(heap
, this, 0, true);
99 MarkedSpace::~MarkedSpace()
101 Free
free(Free::FreeAll
, this);
103 ASSERT(!m_blocks
.set().size());
106 struct LastChanceToFinalize
{
107 void operator()(MarkedAllocator
& allocator
) { allocator
.lastChanceToFinalize(); }
110 void MarkedSpace::lastChanceToFinalize()
113 forEachAllocator
<LastChanceToFinalize
>();
116 void MarkedSpace::sweep()
118 m_heap
->sweeper()->willFinishSweeping();
119 forEachBlock
<Sweep
>();
122 void MarkedSpace::zombifySweep()
124 if (Options::logGC())
125 dataLog("Zombifying sweep...");
126 m_heap
->sweeper()->willFinishSweeping();
127 forEachBlock
<ZombifySweep
>();
130 void MarkedSpace::resetAllocators()
132 for (size_t cellSize
= preciseStep
; cellSize
<= preciseCutoff
; cellSize
+= preciseStep
) {
133 allocatorFor(cellSize
).reset();
134 destructorAllocatorFor(cellSize
).reset();
137 for (size_t cellSize
= impreciseStep
; cellSize
<= impreciseCutoff
; cellSize
+= impreciseStep
) {
138 allocatorFor(cellSize
).reset();
139 destructorAllocatorFor(cellSize
).reset();
142 m_normalSpace
.largeAllocator
.reset();
143 m_destructorSpace
.largeAllocator
.reset();
146 m_blocksWithNewObjects
.clear();
150 void MarkedSpace::visitWeakSets(HeapRootVisitor
& heapRootVisitor
)
152 VisitWeakSet
visitWeakSet(heapRootVisitor
);
153 if (m_heap
->operationInProgress() == EdenCollection
) {
154 for (unsigned i
= 0; i
< m_blocksWithNewObjects
.size(); ++i
)
155 visitWeakSet(m_blocksWithNewObjects
[i
]);
157 forEachBlock(visitWeakSet
);
160 void MarkedSpace::reapWeakSets()
162 if (m_heap
->operationInProgress() == EdenCollection
) {
163 for (unsigned i
= 0; i
< m_blocksWithNewObjects
.size(); ++i
)
164 m_blocksWithNewObjects
[i
]->reapWeakSet();
166 forEachBlock
<ReapWeakSet
>();
169 template <typename Functor
>
170 void MarkedSpace::forEachAllocator()
173 forEachAllocator(functor
);
176 template <typename Functor
>
177 void MarkedSpace::forEachAllocator(Functor
& functor
)
179 for (size_t cellSize
= preciseStep
; cellSize
<= preciseCutoff
; cellSize
+= preciseStep
) {
180 functor(allocatorFor(cellSize
));
181 functor(destructorAllocatorFor(cellSize
));
184 for (size_t cellSize
= impreciseStep
; cellSize
<= impreciseCutoff
; cellSize
+= impreciseStep
) {
185 functor(allocatorFor(cellSize
));
186 functor(destructorAllocatorFor(cellSize
));
189 functor(m_normalSpace
.largeAllocator
);
190 functor(m_destructorSpace
.largeAllocator
);
193 struct StopAllocatingFunctor
{
194 void operator()(MarkedAllocator
& allocator
) { allocator
.stopAllocating(); }
197 void MarkedSpace::stopAllocating()
199 ASSERT(!isIterating());
200 forEachAllocator
<StopAllocatingFunctor
>();
203 struct ResumeAllocatingFunctor
{
204 void operator()(MarkedAllocator
& allocator
) { allocator
.resumeAllocating(); }
207 void MarkedSpace::resumeAllocating()
209 ASSERT(isIterating());
210 forEachAllocator
<ResumeAllocatingFunctor
>();
213 bool MarkedSpace::isPagedOut(double deadline
)
215 for (size_t cellSize
= preciseStep
; cellSize
<= preciseCutoff
; cellSize
+= preciseStep
) {
216 if (allocatorFor(cellSize
).isPagedOut(deadline
)
217 || destructorAllocatorFor(cellSize
).isPagedOut(deadline
))
221 for (size_t cellSize
= impreciseStep
; cellSize
<= impreciseCutoff
; cellSize
+= impreciseStep
) {
222 if (allocatorFor(cellSize
).isPagedOut(deadline
)
223 || destructorAllocatorFor(cellSize
).isPagedOut(deadline
))
227 if (m_normalSpace
.largeAllocator
.isPagedOut(deadline
)
228 || m_destructorSpace
.largeAllocator
.isPagedOut(deadline
))
234 void MarkedSpace::freeBlock(MarkedBlock
* block
)
236 block
->allocator()->removeBlock(block
);
237 m_capacity
-= block
->capacity();
238 m_blocks
.remove(block
);
239 MarkedBlock::destroy(block
);
242 void MarkedSpace::freeOrShrinkBlock(MarkedBlock
* block
)
244 if (!block
->isEmpty()) {
252 struct Shrink
: MarkedBlock::VoidFunctor
{
253 void operator()(MarkedBlock
* block
) { block
->shrink(); }
256 void MarkedSpace::shrink()
258 Free
freeOrShrink(Free::FreeOrShrink
, this);
259 forEachBlock(freeOrShrink
);
262 static void clearNewlyAllocatedInBlock(MarkedBlock
* block
)
266 block
->clearNewlyAllocated();
269 struct ClearNewlyAllocated
: MarkedBlock::VoidFunctor
{
270 void operator()(MarkedBlock
* block
) { block
->clearNewlyAllocated(); }
274 struct VerifyNewlyAllocated
: MarkedBlock::VoidFunctor
{
275 void operator()(MarkedBlock
* block
) { ASSERT(!block
->clearNewlyAllocated()); }
279 void MarkedSpace::clearNewlyAllocated()
281 for (size_t i
= 0; i
< preciseCount
; ++i
) {
282 clearNewlyAllocatedInBlock(m_normalSpace
.preciseAllocators
[i
].takeLastActiveBlock());
283 clearNewlyAllocatedInBlock(m_destructorSpace
.preciseAllocators
[i
].takeLastActiveBlock());
286 for (size_t i
= 0; i
< impreciseCount
; ++i
) {
287 clearNewlyAllocatedInBlock(m_normalSpace
.impreciseAllocators
[i
].takeLastActiveBlock());
288 clearNewlyAllocatedInBlock(m_destructorSpace
.impreciseAllocators
[i
].takeLastActiveBlock());
291 // We have to iterate all of the blocks in the large allocators because they are
292 // canonicalized as they are used up (see MarkedAllocator::tryAllocateHelper)
293 // which creates the m_newlyAllocated bitmap.
294 ClearNewlyAllocated functor
;
295 m_normalSpace
.largeAllocator
.forEachBlock(functor
);
296 m_destructorSpace
.largeAllocator
.forEachBlock(functor
);
299 VerifyNewlyAllocated verifyFunctor
;
300 forEachBlock(verifyFunctor
);
305 struct VerifyMarkedOrRetired
: MarkedBlock::VoidFunctor
{
306 void operator()(MarkedBlock
* block
)
308 switch (block
->m_state
) {
309 case MarkedBlock::Marked
:
310 case MarkedBlock::Retired
:
313 RELEASE_ASSERT_NOT_REACHED();
319 void MarkedSpace::clearMarks()
321 if (m_heap
->operationInProgress() == EdenCollection
) {
322 for (unsigned i
= 0; i
< m_blocksWithNewObjects
.size(); ++i
)
323 m_blocksWithNewObjects
[i
]->clearMarks();
325 forEachBlock
<ClearMarks
>();
328 VerifyMarkedOrRetired verifyFunctor
;
329 forEachBlock(verifyFunctor
);
333 void MarkedSpace::willStartIterating()
335 ASSERT(!isIterating());
337 m_isIterating
= true;
340 void MarkedSpace::didFinishIterating()
342 ASSERT(isIterating());
344 m_isIterating
= false;