2 * Copyright (C) 1999-2000 Harri Porten (porten@kde.org)
3 * Copyright (C) 2001 Peter Kelly (pmk@post.com)
4 * Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2011 Apple Inc. All rights reserved.
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
25 #include "MachineStackMarker.h"
26 #include "MarkedAllocator.h"
27 #include "MarkedBlock.h"
28 #include "MarkedBlockSet.h"
30 #include <wtf/Bitmap.h>
31 #include <wtf/DoublyLinkedList.h>
32 #include <wtf/HashSet.h>
33 #include <wtf/Noncopyable.h>
34 #include <wtf/RetainPtr.h>
35 #include <wtf/Vector.h>
40 class HeapIterationScope
;
42 class LiveObjectIterator
;
43 class LLIntOffsetsExtractor
;
47 struct ClearMarks
: MarkedBlock::VoidFunctor
{
48 void operator()(MarkedBlock
* block
)
54 struct Sweep
: MarkedBlock::VoidFunctor
{
55 void operator()(MarkedBlock
* block
) { block
->sweep(); }
58 struct ZombifySweep
: MarkedBlock::VoidFunctor
{
59 void operator()(MarkedBlock
* block
)
61 if (block
->needsSweeping())
66 struct MarkCount
: MarkedBlock::CountFunctor
{
67 void operator()(MarkedBlock
* block
) { count(block
->markCount()); }
70 struct Size
: MarkedBlock::CountFunctor
{
71 void operator()(MarkedBlock
* block
) { count(block
->markCount() * block
->cellSize()); }
75 WTF_MAKE_NONCOPYABLE(MarkedSpace
);
78 static const size_t preciseStep
= MarkedBlock::atomSize
;
79 static const size_t preciseCutoff
= 128;
80 static const size_t preciseCount
= preciseCutoff
/ preciseStep
;
82 // [ 1024... blockSize ]
83 static const size_t impreciseStep
= 2 * preciseCutoff
;
84 static const size_t impreciseCutoff
= MarkedBlock::blockSize
/ 2;
85 static const size_t impreciseCount
= impreciseCutoff
/ impreciseStep
;
88 std::array
<MarkedAllocator
, preciseCount
> preciseAllocators
;
89 std::array
<MarkedAllocator
, impreciseCount
> impreciseAllocators
;
90 MarkedAllocator largeAllocator
;
95 void lastChanceToFinalize();
97 MarkedAllocator
& firstAllocator();
98 MarkedAllocator
& allocatorFor(size_t);
99 MarkedAllocator
& destructorAllocatorFor(size_t);
100 void* allocateWithDestructor(size_t);
101 void* allocateWithoutDestructor(size_t);
103 Subspace
& subspaceForObjectsWithDestructor() { return m_destructorSpace
; }
104 Subspace
& subspaceForObjectsWithoutDestructor() { return m_normalSpace
; }
106 void resetAllocators();
108 void visitWeakSets(HeapRootVisitor
&);
111 MarkedBlockSet
& blocks() { return m_blocks
; }
113 void willStartIterating();
114 bool isIterating() { return m_isIterating
; }
115 void didFinishIterating();
117 void stopAllocating();
118 void resumeAllocating(); // If we just stopped allocation but we didn't do a collection, we need to resume allocation.
120 typedef HashSet
<MarkedBlock
*>::iterator BlockIterator
;
122 template<typename Functor
> typename
Functor::ReturnType
forEachLiveCell(HeapIterationScope
&, Functor
&);
123 template<typename Functor
> typename
Functor::ReturnType
forEachLiveCell(HeapIterationScope
&);
124 template<typename Functor
> typename
Functor::ReturnType
forEachDeadCell(HeapIterationScope
&, Functor
&);
125 template<typename Functor
> typename
Functor::ReturnType
forEachDeadCell(HeapIterationScope
&);
126 template<typename Functor
> typename
Functor::ReturnType
forEachBlock(Functor
&);
127 template<typename Functor
> typename
Functor::ReturnType
forEachBlock();
130 void freeBlock(MarkedBlock
*);
131 void freeOrShrinkBlock(MarkedBlock
*);
133 void didAddBlock(MarkedBlock
*);
134 void didConsumeFreeList(MarkedBlock
*);
135 void didAllocateInBlock(MarkedBlock
*);
138 void clearNewlyAllocated();
141 size_t objectCount();
145 bool isPagedOut(double deadline
);
148 template<typename T
> void releaseSoon(RetainPtr
<T
>&&);
151 const Vector
<MarkedBlock
*>& blocksWithNewObjects() const { return m_blocksWithNewObjects
; }
154 friend class LLIntOffsetsExtractor
;
157 template<typename Functor
> void forEachAllocator(Functor
&);
158 template<typename Functor
> void forEachAllocator();
160 Subspace m_destructorSpace
;
161 Subspace m_normalSpace
;
166 MarkedBlockSet m_blocks
;
167 Vector
<MarkedBlock
*> m_blocksWithNewObjects
;
170 template<typename Functor
> inline typename
Functor::ReturnType
MarkedSpace::forEachLiveCell(HeapIterationScope
&, Functor
& functor
)
172 ASSERT(isIterating());
173 BlockIterator end
= m_blocks
.set().end();
174 for (BlockIterator it
= m_blocks
.set().begin(); it
!= end
; ++it
) {
175 if ((*it
)->forEachLiveCell(functor
) == IterationStatus::Done
)
178 return functor
.returnValue();
181 template<typename Functor
> inline typename
Functor::ReturnType
MarkedSpace::forEachLiveCell(HeapIterationScope
& scope
)
184 return forEachLiveCell(scope
, functor
);
187 template<typename Functor
> inline typename
Functor::ReturnType
MarkedSpace::forEachDeadCell(HeapIterationScope
&, Functor
& functor
)
189 ASSERT(isIterating());
190 BlockIterator end
= m_blocks
.set().end();
191 for (BlockIterator it
= m_blocks
.set().begin(); it
!= end
; ++it
) {
192 if ((*it
)->forEachDeadCell(functor
) == IterationStatus::Done
)
195 return functor
.returnValue();
198 template<typename Functor
> inline typename
Functor::ReturnType
MarkedSpace::forEachDeadCell(HeapIterationScope
& scope
)
201 return forEachDeadCell(scope
, functor
);
204 inline MarkedAllocator
& MarkedSpace::allocatorFor(size_t bytes
)
207 if (bytes
<= preciseCutoff
)
208 return m_normalSpace
.preciseAllocators
[(bytes
- 1) / preciseStep
];
209 if (bytes
<= impreciseCutoff
)
210 return m_normalSpace
.impreciseAllocators
[(bytes
- 1) / impreciseStep
];
211 return m_normalSpace
.largeAllocator
;
214 inline MarkedAllocator
& MarkedSpace::destructorAllocatorFor(size_t bytes
)
217 if (bytes
<= preciseCutoff
)
218 return m_destructorSpace
.preciseAllocators
[(bytes
- 1) / preciseStep
];
219 if (bytes
<= impreciseCutoff
)
220 return m_destructorSpace
.impreciseAllocators
[(bytes
- 1) / impreciseStep
];
221 return m_destructorSpace
.largeAllocator
;
224 inline void* MarkedSpace::allocateWithoutDestructor(size_t bytes
)
226 return allocatorFor(bytes
).allocate(bytes
);
229 inline void* MarkedSpace::allocateWithDestructor(size_t bytes
)
231 return destructorAllocatorFor(bytes
).allocate(bytes
);
234 template <typename Functor
> inline typename
Functor::ReturnType
MarkedSpace::forEachBlock(Functor
& functor
)
236 for (size_t i
= 0; i
< preciseCount
; ++i
)
237 m_normalSpace
.preciseAllocators
[i
].forEachBlock(functor
);
238 for (size_t i
= 0; i
< impreciseCount
; ++i
)
239 m_normalSpace
.impreciseAllocators
[i
].forEachBlock(functor
);
240 m_normalSpace
.largeAllocator
.forEachBlock(functor
);
242 for (size_t i
= 0; i
< preciseCount
; ++i
)
243 m_destructorSpace
.preciseAllocators
[i
].forEachBlock(functor
);
244 for (size_t i
= 0; i
< impreciseCount
; ++i
)
245 m_destructorSpace
.impreciseAllocators
[i
].forEachBlock(functor
);
246 m_destructorSpace
.largeAllocator
.forEachBlock(functor
);
248 return functor
.returnValue();
251 template <typename Functor
> inline typename
Functor::ReturnType
MarkedSpace::forEachBlock()
254 return forEachBlock(functor
);
257 inline void MarkedSpace::didAddBlock(MarkedBlock
* block
)
259 m_capacity
+= block
->capacity();
263 inline void MarkedSpace::didAllocateInBlock(MarkedBlock
* block
)
266 m_blocksWithNewObjects
.append(block
);
272 inline size_t MarkedSpace::objectCount()
274 return forEachBlock
<MarkCount
>();
277 inline size_t MarkedSpace::size()
279 return forEachBlock
<Size
>();
282 inline size_t MarkedSpace::capacity()
289 #endif // MarkedSpace_h