]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (C) 1999-2000 Harri Porten (porten@kde.org) | |
3 | * Copyright (C) 2001 Peter Kelly (pmk@post.com) | |
4 | * Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2011 Apple Inc. All rights reserved. | |
5 | * | |
6 | * This library is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU Lesser General Public | |
8 | * License as published by the Free Software Foundation; either | |
9 | * version 2 of the License, or (at your option) any later version. | |
10 | * | |
11 | * This library is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
14 | * Lesser General Public License for more details. | |
15 | * | |
16 | * You should have received a copy of the GNU Lesser General Public | |
17 | * License along with this library; if not, write to the Free Software | |
18 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA | |
19 | * | |
20 | */ | |
21 | ||
22 | #ifndef MarkedBlock_h | |
23 | #define MarkedBlock_h | |
24 | ||
25 | #include "BlockAllocator.h" | |
26 | #include "HeapBlock.h" | |
27 | ||
28 | #include "WeakSet.h" | |
29 | #include <wtf/Bitmap.h> | |
30 | #include <wtf/DataLog.h> | |
31 | #include <wtf/DoublyLinkedList.h> | |
32 | #include <wtf/HashFunctions.h> | |
33 | #include <wtf/PageAllocationAligned.h> | |
34 | #include <wtf/StdLibExtras.h> | |
35 | #include <wtf/Vector.h> | |
36 | ||
37 | // Set to log state transitions of blocks. | |
38 | #define HEAP_LOG_BLOCK_STATE_TRANSITIONS 0 | |
39 | ||
40 | #if HEAP_LOG_BLOCK_STATE_TRANSITIONS | |
41 | #define HEAP_LOG_BLOCK_STATE_TRANSITION(block) do { \ | |
42 | dataLogF( \ | |
43 | "%s:%d %s: block %s = %p, %d\n", \ | |
44 | __FILE__, __LINE__, __FUNCTION__, \ | |
45 | #block, (block), (block)->m_state); \ | |
46 | } while (false) | |
47 | #else | |
48 | #define HEAP_LOG_BLOCK_STATE_TRANSITION(block) ((void)0) | |
49 | #endif | |
50 | ||
51 | namespace JSC { | |
52 | ||
53 | class Heap; | |
54 | class JSCell; | |
55 | class MarkedAllocator; | |
56 | ||
57 | typedef uintptr_t Bits; | |
58 | ||
59 | static const size_t MB = 1024 * 1024; | |
60 | ||
61 | bool isZapped(const JSCell*); | |
62 | ||
63 | // A marked block is a page-aligned container for heap-allocated objects. | |
64 | // Objects are allocated within cells of the marked block. For a given | |
65 | // marked block, all cells have the same size. Objects smaller than the | |
66 | // cell size may be allocated in the marked block, in which case the | |
67 | // allocation suffers from internal fragmentation: wasted space whose | |
68 | // size is equal to the difference between the cell size and the object | |
69 | // size. | |
70 | ||
71 | class MarkedBlock : public HeapBlock<MarkedBlock> { | |
72 | public: | |
73 | static const size_t atomSize = 8; // bytes | |
74 | static const size_t blockSize = 64 * KB; | |
75 | static const size_t blockMask = ~(blockSize - 1); // blockSize must be a power of two. | |
76 | ||
77 | static const size_t atomsPerBlock = blockSize / atomSize; | |
78 | static const size_t atomMask = atomsPerBlock - 1; | |
79 | ||
80 | struct FreeCell { | |
81 | FreeCell* next; | |
82 | }; | |
83 | ||
84 | struct FreeList { | |
85 | FreeCell* head; | |
86 | size_t bytes; | |
87 | ||
88 | FreeList(); | |
89 | FreeList(FreeCell*, size_t); | |
90 | }; | |
91 | ||
92 | struct VoidFunctor { | |
93 | typedef void ReturnType; | |
94 | void returnValue() { } | |
95 | }; | |
96 | ||
97 | class CountFunctor { | |
98 | public: | |
99 | typedef size_t ReturnType; | |
100 | ||
101 | CountFunctor() : m_count(0) { } | |
102 | void count(size_t count) { m_count += count; } | |
103 | ReturnType returnValue() { return m_count; } | |
104 | ||
105 | private: | |
106 | ReturnType m_count; | |
107 | }; | |
108 | ||
109 | enum DestructorType { None, ImmortalStructure, Normal }; | |
110 | static MarkedBlock* create(DeadBlock*, MarkedAllocator*, size_t cellSize, DestructorType); | |
111 | ||
112 | static bool isAtomAligned(const void*); | |
113 | static MarkedBlock* blockFor(const void*); | |
114 | static size_t firstAtom(); | |
115 | ||
116 | void lastChanceToFinalize(); | |
117 | ||
118 | MarkedAllocator* allocator() const; | |
119 | Heap* heap() const; | |
120 | VM* vm() const; | |
121 | WeakSet& weakSet(); | |
122 | ||
123 | enum SweepMode { SweepOnly, SweepToFreeList }; | |
124 | FreeList sweep(SweepMode = SweepOnly); | |
125 | ||
126 | void shrink(); | |
127 | ||
128 | void visitWeakSet(HeapRootVisitor&); | |
129 | void reapWeakSet(); | |
130 | ||
131 | // While allocating from a free list, MarkedBlock temporarily has bogus | |
132 | // cell liveness data. To restore accurate cell liveness data, call one | |
133 | // of these functions: | |
134 | void didConsumeFreeList(); // Call this once you've allocated all the items in the free list. | |
135 | void canonicalizeCellLivenessData(const FreeList&); | |
136 | ||
137 | void clearMarks(); | |
138 | size_t markCount(); | |
139 | bool isEmpty(); | |
140 | ||
141 | size_t cellSize(); | |
142 | DestructorType destructorType(); | |
143 | ||
144 | size_t size(); | |
145 | size_t capacity(); | |
146 | ||
147 | bool isMarked(const void*); | |
148 | bool testAndSetMarked(const void*); | |
149 | bool isLive(const JSCell*); | |
150 | bool isLiveCell(const void*); | |
151 | void setMarked(const void*); | |
152 | void clearMarked(const void*); | |
153 | ||
154 | bool isNewlyAllocated(const void*); | |
155 | void setNewlyAllocated(const void*); | |
156 | void clearNewlyAllocated(const void*); | |
157 | ||
158 | bool needsSweeping(); | |
159 | ||
160 | template <typename Functor> void forEachCell(Functor&); | |
161 | template <typename Functor> void forEachLiveCell(Functor&); | |
162 | template <typename Functor> void forEachDeadCell(Functor&); | |
163 | ||
164 | private: | |
165 | static const size_t atomAlignmentMask = atomSize - 1; // atomSize must be a power of two. | |
166 | ||
167 | enum BlockState { New, FreeListed, Allocated, Marked }; | |
168 | template<DestructorType> FreeList sweepHelper(SweepMode = SweepOnly); | |
169 | ||
170 | typedef char Atom[atomSize]; | |
171 | ||
172 | MarkedBlock(Region*, MarkedAllocator*, size_t cellSize, DestructorType); | |
173 | Atom* atoms(); | |
174 | size_t atomNumber(const void*); | |
175 | void callDestructor(JSCell*); | |
176 | template<BlockState, SweepMode, DestructorType> FreeList specializedSweep(); | |
177 | ||
178 | size_t m_atomsPerCell; | |
179 | size_t m_endAtom; // This is a fuzzy end. Always test for < m_endAtom. | |
180 | #if ENABLE(PARALLEL_GC) | |
181 | WTF::Bitmap<atomsPerBlock, WTF::BitmapAtomic> m_marks; | |
182 | #else | |
183 | WTF::Bitmap<atomsPerBlock, WTF::BitmapNotAtomic> m_marks; | |
184 | #endif | |
185 | OwnPtr<WTF::Bitmap<atomsPerBlock> > m_newlyAllocated; | |
186 | ||
187 | DestructorType m_destructorType; | |
188 | MarkedAllocator* m_allocator; | |
189 | BlockState m_state; | |
190 | WeakSet m_weakSet; | |
191 | }; | |
192 | ||
193 | inline MarkedBlock::FreeList::FreeList() | |
194 | : head(0) | |
195 | , bytes(0) | |
196 | { | |
197 | } | |
198 | ||
199 | inline MarkedBlock::FreeList::FreeList(FreeCell* head, size_t bytes) | |
200 | : head(head) | |
201 | , bytes(bytes) | |
202 | { | |
203 | } | |
204 | ||
205 | inline size_t MarkedBlock::firstAtom() | |
206 | { | |
207 | return WTF::roundUpToMultipleOf<atomSize>(sizeof(MarkedBlock)) / atomSize; | |
208 | } | |
209 | ||
210 | inline MarkedBlock::Atom* MarkedBlock::atoms() | |
211 | { | |
212 | return reinterpret_cast<Atom*>(this); | |
213 | } | |
214 | ||
215 | inline bool MarkedBlock::isAtomAligned(const void* p) | |
216 | { | |
217 | return !(reinterpret_cast<Bits>(p) & atomAlignmentMask); | |
218 | } | |
219 | ||
220 | inline MarkedBlock* MarkedBlock::blockFor(const void* p) | |
221 | { | |
222 | return reinterpret_cast<MarkedBlock*>(reinterpret_cast<Bits>(p) & blockMask); | |
223 | } | |
224 | ||
225 | inline void MarkedBlock::lastChanceToFinalize() | |
226 | { | |
227 | m_weakSet.lastChanceToFinalize(); | |
228 | ||
229 | clearMarks(); | |
230 | sweep(); | |
231 | } | |
232 | ||
233 | inline MarkedAllocator* MarkedBlock::allocator() const | |
234 | { | |
235 | return m_allocator; | |
236 | } | |
237 | ||
238 | inline Heap* MarkedBlock::heap() const | |
239 | { | |
240 | return m_weakSet.heap(); | |
241 | } | |
242 | ||
243 | inline VM* MarkedBlock::vm() const | |
244 | { | |
245 | return m_weakSet.vm(); | |
246 | } | |
247 | ||
248 | inline WeakSet& MarkedBlock::weakSet() | |
249 | { | |
250 | return m_weakSet; | |
251 | } | |
252 | ||
253 | inline void MarkedBlock::shrink() | |
254 | { | |
255 | m_weakSet.shrink(); | |
256 | } | |
257 | ||
258 | inline void MarkedBlock::visitWeakSet(HeapRootVisitor& heapRootVisitor) | |
259 | { | |
260 | m_weakSet.visit(heapRootVisitor); | |
261 | } | |
262 | ||
263 | inline void MarkedBlock::reapWeakSet() | |
264 | { | |
265 | m_weakSet.reap(); | |
266 | } | |
267 | ||
268 | inline void MarkedBlock::didConsumeFreeList() | |
269 | { | |
270 | HEAP_LOG_BLOCK_STATE_TRANSITION(this); | |
271 | ||
272 | ASSERT(m_state == FreeListed); | |
273 | m_state = Allocated; | |
274 | } | |
275 | ||
276 | inline void MarkedBlock::clearMarks() | |
277 | { | |
278 | HEAP_LOG_BLOCK_STATE_TRANSITION(this); | |
279 | ||
280 | ASSERT(m_state != New && m_state != FreeListed); | |
281 | m_marks.clearAll(); | |
282 | m_newlyAllocated.clear(); | |
283 | ||
284 | // This will become true at the end of the mark phase. We set it now to | |
285 | // avoid an extra pass to do so later. | |
286 | m_state = Marked; | |
287 | } | |
288 | ||
289 | inline size_t MarkedBlock::markCount() | |
290 | { | |
291 | return m_marks.count(); | |
292 | } | |
293 | ||
294 | inline bool MarkedBlock::isEmpty() | |
295 | { | |
296 | return m_marks.isEmpty() && m_weakSet.isEmpty() && (!m_newlyAllocated || m_newlyAllocated->isEmpty()); | |
297 | } | |
298 | ||
299 | inline size_t MarkedBlock::cellSize() | |
300 | { | |
301 | return m_atomsPerCell * atomSize; | |
302 | } | |
303 | ||
304 | inline MarkedBlock::DestructorType MarkedBlock::destructorType() | |
305 | { | |
306 | return m_destructorType; | |
307 | } | |
308 | ||
309 | inline size_t MarkedBlock::size() | |
310 | { | |
311 | return markCount() * cellSize(); | |
312 | } | |
313 | ||
314 | inline size_t MarkedBlock::capacity() | |
315 | { | |
316 | return region()->blockSize(); | |
317 | } | |
318 | ||
319 | inline size_t MarkedBlock::atomNumber(const void* p) | |
320 | { | |
321 | return (reinterpret_cast<Bits>(p) - reinterpret_cast<Bits>(this)) / atomSize; | |
322 | } | |
323 | ||
324 | inline bool MarkedBlock::isMarked(const void* p) | |
325 | { | |
326 | return m_marks.get(atomNumber(p)); | |
327 | } | |
328 | ||
329 | inline bool MarkedBlock::testAndSetMarked(const void* p) | |
330 | { | |
331 | return m_marks.concurrentTestAndSet(atomNumber(p)); | |
332 | } | |
333 | ||
334 | inline void MarkedBlock::setMarked(const void* p) | |
335 | { | |
336 | m_marks.set(atomNumber(p)); | |
337 | } | |
338 | ||
339 | inline void MarkedBlock::clearMarked(const void* p) | |
340 | { | |
341 | ASSERT(m_marks.get(atomNumber(p))); | |
342 | m_marks.clear(atomNumber(p)); | |
343 | } | |
344 | ||
345 | inline bool MarkedBlock::isNewlyAllocated(const void* p) | |
346 | { | |
347 | return m_newlyAllocated->get(atomNumber(p)); | |
348 | } | |
349 | ||
350 | inline void MarkedBlock::setNewlyAllocated(const void* p) | |
351 | { | |
352 | m_newlyAllocated->set(atomNumber(p)); | |
353 | } | |
354 | ||
355 | inline void MarkedBlock::clearNewlyAllocated(const void* p) | |
356 | { | |
357 | m_newlyAllocated->clear(atomNumber(p)); | |
358 | } | |
359 | ||
360 | inline bool MarkedBlock::isLive(const JSCell* cell) | |
361 | { | |
362 | switch (m_state) { | |
363 | case Allocated: | |
364 | return true; | |
365 | ||
366 | case Marked: | |
367 | return m_marks.get(atomNumber(cell)) || (m_newlyAllocated && isNewlyAllocated(cell)); | |
368 | ||
369 | case New: | |
370 | case FreeListed: | |
371 | RELEASE_ASSERT_NOT_REACHED(); | |
372 | return false; | |
373 | } | |
374 | ||
375 | RELEASE_ASSERT_NOT_REACHED(); | |
376 | return false; | |
377 | } | |
378 | ||
379 | inline bool MarkedBlock::isLiveCell(const void* p) | |
380 | { | |
381 | ASSERT(MarkedBlock::isAtomAligned(p)); | |
382 | size_t atomNumber = this->atomNumber(p); | |
383 | size_t firstAtom = this->firstAtom(); | |
384 | if (atomNumber < firstAtom) // Filters pointers into MarkedBlock metadata. | |
385 | return false; | |
386 | if ((atomNumber - firstAtom) % m_atomsPerCell) // Filters pointers into cell middles. | |
387 | return false; | |
388 | if (atomNumber >= m_endAtom) // Filters pointers into invalid cells out of the range. | |
389 | return false; | |
390 | ||
391 | return isLive(static_cast<const JSCell*>(p)); | |
392 | } | |
393 | ||
394 | template <typename Functor> inline void MarkedBlock::forEachCell(Functor& functor) | |
395 | { | |
396 | for (size_t i = firstAtom(); i < m_endAtom; i += m_atomsPerCell) { | |
397 | JSCell* cell = reinterpret_cast_ptr<JSCell*>(&atoms()[i]); | |
398 | functor(cell); | |
399 | } | |
400 | } | |
401 | ||
402 | template <typename Functor> inline void MarkedBlock::forEachLiveCell(Functor& functor) | |
403 | { | |
404 | for (size_t i = firstAtom(); i < m_endAtom; i += m_atomsPerCell) { | |
405 | JSCell* cell = reinterpret_cast_ptr<JSCell*>(&atoms()[i]); | |
406 | if (!isLive(cell)) | |
407 | continue; | |
408 | ||
409 | functor(cell); | |
410 | } | |
411 | } | |
412 | ||
413 | template <typename Functor> inline void MarkedBlock::forEachDeadCell(Functor& functor) | |
414 | { | |
415 | for (size_t i = firstAtom(); i < m_endAtom; i += m_atomsPerCell) { | |
416 | JSCell* cell = reinterpret_cast_ptr<JSCell*>(&atoms()[i]); | |
417 | if (isLive(cell)) | |
418 | continue; | |
419 | ||
420 | functor(cell); | |
421 | } | |
422 | } | |
423 | ||
424 | inline bool MarkedBlock::needsSweeping() | |
425 | { | |
426 | return m_state == Marked; | |
427 | } | |
428 | ||
429 | } // namespace JSC | |
430 | ||
431 | namespace WTF { | |
432 | ||
433 | struct MarkedBlockHash : PtrHash<JSC::MarkedBlock*> { | |
434 | static unsigned hash(JSC::MarkedBlock* const& key) | |
435 | { | |
436 | // Aligned VM regions tend to be monotonically increasing integers, | |
437 | // which is a great hash function, but we have to remove the low bits, | |
438 | // since they're always zero, which is a terrible hash function! | |
439 | return reinterpret_cast<JSC::Bits>(key) / JSC::MarkedBlock::blockSize; | |
440 | } | |
441 | }; | |
442 | ||
443 | template<> struct DefaultHash<JSC::MarkedBlock*> { | |
444 | typedef MarkedBlockHash Hash; | |
445 | }; | |
446 | ||
447 | } // namespace WTF | |
448 | ||
449 | #endif // MarkedBlock_h |