]>
Commit | Line | Data |
---|---|---|
14957cd0 A |
1 | /* |
2 | * Copyright (C) 1999-2000 Harri Porten (porten@kde.org) | |
3 | * Copyright (C) 2001 Peter Kelly (pmk@post.com) | |
4 | * Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2011 Apple Inc. All rights reserved. | |
5 | * | |
6 | * This library is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU Lesser General Public | |
8 | * License as published by the Free Software Foundation; either | |
9 | * version 2 of the License, or (at your option) any later version. | |
10 | * | |
11 | * This library is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
14 | * Lesser General Public License for more details. | |
15 | * | |
16 | * You should have received a copy of the GNU Lesser General Public | |
17 | * License along with this library; if not, write to the Free Software | |
18 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA | |
19 | * | |
20 | */ | |
21 | ||
22 | #ifndef MarkedBlock_h | |
23 | #define MarkedBlock_h | |
24 | ||
93a37866 | 25 | #include "BlockAllocator.h" |
6fe7ccc8 A |
26 | #include "HeapBlock.h" |
27 | ||
93a37866 | 28 | #include "WeakSet.h" |
14957cd0 | 29 | #include <wtf/Bitmap.h> |
6fe7ccc8 A |
30 | #include <wtf/DataLog.h> |
31 | #include <wtf/DoublyLinkedList.h> | |
32 | #include <wtf/HashFunctions.h> | |
14957cd0 A |
33 | #include <wtf/PageAllocationAligned.h> |
34 | #include <wtf/StdLibExtras.h> | |
6fe7ccc8 A |
35 | #include <wtf/Vector.h> |
36 | ||
37 | // Set to log state transitions of blocks. | |
38 | #define HEAP_LOG_BLOCK_STATE_TRANSITIONS 0 | |
39 | ||
40 | #if HEAP_LOG_BLOCK_STATE_TRANSITIONS | |
41 | #define HEAP_LOG_BLOCK_STATE_TRANSITION(block) do { \ | |
93a37866 | 42 | dataLogF( \ |
6fe7ccc8 A |
43 | "%s:%d %s: block %s = %p, %d\n", \ |
44 | __FILE__, __LINE__, __FUNCTION__, \ | |
45 | #block, (block), (block)->m_state); \ | |
46 | } while (false) | |
47 | #else | |
48 | #define HEAP_LOG_BLOCK_STATE_TRANSITION(block) ((void)0) | |
49 | #endif | |
14957cd0 A |
50 | |
51 | namespace JSC { | |
6fe7ccc8 | 52 | |
14957cd0 A |
53 | class Heap; |
54 | class JSCell; | |
93a37866 | 55 | class MarkedAllocator; |
14957cd0 A |
56 | |
57 | typedef uintptr_t Bits; | |
58 | ||
6fe7ccc8 A |
59 | static const size_t MB = 1024 * 1024; |
60 | ||
61 | bool isZapped(const JSCell*); | |
62 | ||
63 | // A marked block is a page-aligned container for heap-allocated objects. | |
64 | // Objects are allocated within cells of the marked block. For a given | |
65 | // marked block, all cells have the same size. Objects smaller than the | |
66 | // cell size may be allocated in the marked block, in which case the | |
67 | // allocation suffers from internal fragmentation: wasted space whose | |
68 | // size is equal to the difference between the cell size and the object | |
69 | // size. | |
70 | ||
93a37866 | 71 | class MarkedBlock : public HeapBlock<MarkedBlock> { |
14957cd0 | 72 | public: |
93a37866 | 73 | static const size_t atomSize = 8; // bytes |
6fe7ccc8 A |
74 | static const size_t blockSize = 64 * KB; |
75 | static const size_t blockMask = ~(blockSize - 1); // blockSize must be a power of two. | |
14957cd0 | 76 | |
93a37866 | 77 | static const size_t atomsPerBlock = blockSize / atomSize; |
6fe7ccc8 | 78 | static const size_t atomMask = atomsPerBlock - 1; |
6fe7ccc8 A |
79 | |
80 | struct FreeCell { | |
81 | FreeCell* next; | |
82 | }; | |
83 | ||
84 | struct FreeList { | |
85 | FreeCell* head; | |
86 | size_t bytes; | |
87 | ||
88 | FreeList(); | |
89 | FreeList(FreeCell*, size_t); | |
90 | }; | |
91 | ||
92 | struct VoidFunctor { | |
93 | typedef void ReturnType; | |
94 | void returnValue() { } | |
95 | }; | |
96 | ||
93a37866 A |
97 | class CountFunctor { |
98 | public: | |
99 | typedef size_t ReturnType; | |
100 | ||
101 | CountFunctor() : m_count(0) { } | |
102 | void count(size_t count) { m_count += count; } | |
103 | ReturnType returnValue() { return m_count; } | |
104 | ||
105 | private: | |
106 | ReturnType m_count; | |
107 | }; | |
108 | ||
109 | enum DestructorType { None, ImmortalStructure, Normal }; | |
110 | static MarkedBlock* create(DeadBlock*, MarkedAllocator*, size_t cellSize, DestructorType); | |
14957cd0 A |
111 | |
112 | static bool isAtomAligned(const void*); | |
113 | static MarkedBlock* blockFor(const void*); | |
114 | static size_t firstAtom(); | |
115 | ||
93a37866 A |
116 | void lastChanceToFinalize(); |
117 | ||
118 | MarkedAllocator* allocator() const; | |
14957cd0 | 119 | Heap* heap() const; |
93a37866 A |
120 | VM* vm() const; |
121 | WeakSet& weakSet(); | |
14957cd0 | 122 | |
6fe7ccc8 A |
123 | enum SweepMode { SweepOnly, SweepToFreeList }; |
124 | FreeList sweep(SweepMode = SweepOnly); | |
125 | ||
93a37866 A |
126 | void shrink(); |
127 | ||
128 | void visitWeakSet(HeapRootVisitor&); | |
129 | void reapWeakSet(); | |
130 | ||
6fe7ccc8 A |
131 | // While allocating from a free list, MarkedBlock temporarily has bogus |
132 | // cell liveness data. To restore accurate cell liveness data, call one | |
133 | // of these functions: | |
134 | void didConsumeFreeList(); // Call this once you've allocated all the items in the free list. | |
93a37866 | 135 | void canonicalizeCellLivenessData(const FreeList&); |
14957cd0 A |
136 | |
137 | void clearMarks(); | |
138 | size_t markCount(); | |
93a37866 | 139 | bool isEmpty(); |
14957cd0 A |
140 | |
141 | size_t cellSize(); | |
93a37866 | 142 | DestructorType destructorType(); |
14957cd0 A |
143 | |
144 | size_t size(); | |
145 | size_t capacity(); | |
146 | ||
14957cd0 A |
147 | bool isMarked(const void*); |
148 | bool testAndSetMarked(const void*); | |
6fe7ccc8 A |
149 | bool isLive(const JSCell*); |
150 | bool isLiveCell(const void*); | |
14957cd0 | 151 | void setMarked(const void*); |
93a37866 | 152 | void clearMarked(const void*); |
6fe7ccc8 | 153 | |
93a37866 A |
154 | bool isNewlyAllocated(const void*); |
155 | void setNewlyAllocated(const void*); | |
156 | void clearNewlyAllocated(const void*); | |
6fe7ccc8 | 157 | |
93a37866 | 158 | bool needsSweeping(); |
6fe7ccc8 A |
159 | |
160 | template <typename Functor> void forEachCell(Functor&); | |
93a37866 A |
161 | template <typename Functor> void forEachLiveCell(Functor&); |
162 | template <typename Functor> void forEachDeadCell(Functor&); | |
14957cd0 A |
163 | |
164 | private: | |
6fe7ccc8 | 165 | static const size_t atomAlignmentMask = atomSize - 1; // atomSize must be a power of two. |
14957cd0 | 166 | |
93a37866 A |
167 | enum BlockState { New, FreeListed, Allocated, Marked }; |
168 | template<DestructorType> FreeList sweepHelper(SweepMode = SweepOnly); | |
14957cd0 A |
169 | |
170 | typedef char Atom[atomSize]; | |
171 | ||
93a37866 | 172 | MarkedBlock(Region*, MarkedAllocator*, size_t cellSize, DestructorType); |
14957cd0 | 173 | Atom* atoms(); |
6fe7ccc8 A |
174 | size_t atomNumber(const void*); |
175 | void callDestructor(JSCell*); | |
93a37866 | 176 | template<BlockState, SweepMode, DestructorType> FreeList specializedSweep(); |
6fe7ccc8 | 177 | |
14957cd0 | 178 | size_t m_atomsPerCell; |
6fe7ccc8 A |
179 | size_t m_endAtom; // This is a fuzzy end. Always test for < m_endAtom. |
180 | #if ENABLE(PARALLEL_GC) | |
181 | WTF::Bitmap<atomsPerBlock, WTF::BitmapAtomic> m_marks; | |
182 | #else | |
183 | WTF::Bitmap<atomsPerBlock, WTF::BitmapNotAtomic> m_marks; | |
184 | #endif | |
93a37866 A |
185 | OwnPtr<WTF::Bitmap<atomsPerBlock> > m_newlyAllocated; |
186 | ||
187 | DestructorType m_destructorType; | |
188 | MarkedAllocator* m_allocator; | |
6fe7ccc8 | 189 | BlockState m_state; |
93a37866 | 190 | WeakSet m_weakSet; |
14957cd0 A |
191 | }; |
192 | ||
6fe7ccc8 A |
193 | inline MarkedBlock::FreeList::FreeList() |
194 | : head(0) | |
195 | , bytes(0) | |
196 | { | |
197 | } | |
198 | ||
199 | inline MarkedBlock::FreeList::FreeList(FreeCell* head, size_t bytes) | |
200 | : head(head) | |
201 | , bytes(bytes) | |
202 | { | |
203 | } | |
204 | ||
14957cd0 A |
205 | inline size_t MarkedBlock::firstAtom() |
206 | { | |
207 | return WTF::roundUpToMultipleOf<atomSize>(sizeof(MarkedBlock)) / atomSize; | |
208 | } | |
209 | ||
210 | inline MarkedBlock::Atom* MarkedBlock::atoms() | |
211 | { | |
212 | return reinterpret_cast<Atom*>(this); | |
213 | } | |
214 | ||
215 | inline bool MarkedBlock::isAtomAligned(const void* p) | |
216 | { | |
6fe7ccc8 | 217 | return !(reinterpret_cast<Bits>(p) & atomAlignmentMask); |
14957cd0 A |
218 | } |
219 | ||
220 | inline MarkedBlock* MarkedBlock::blockFor(const void* p) | |
221 | { | |
6fe7ccc8 | 222 | return reinterpret_cast<MarkedBlock*>(reinterpret_cast<Bits>(p) & blockMask); |
14957cd0 A |
223 | } |
224 | ||
93a37866 A |
225 | inline void MarkedBlock::lastChanceToFinalize() |
226 | { | |
227 | m_weakSet.lastChanceToFinalize(); | |
228 | ||
229 | clearMarks(); | |
230 | sweep(); | |
231 | } | |
232 | ||
233 | inline MarkedAllocator* MarkedBlock::allocator() const | |
234 | { | |
235 | return m_allocator; | |
236 | } | |
237 | ||
14957cd0 A |
238 | inline Heap* MarkedBlock::heap() const |
239 | { | |
93a37866 A |
240 | return m_weakSet.heap(); |
241 | } | |
242 | ||
243 | inline VM* MarkedBlock::vm() const | |
244 | { | |
245 | return m_weakSet.vm(); | |
246 | } | |
247 | ||
248 | inline WeakSet& MarkedBlock::weakSet() | |
249 | { | |
250 | return m_weakSet; | |
251 | } | |
252 | ||
253 | inline void MarkedBlock::shrink() | |
254 | { | |
255 | m_weakSet.shrink(); | |
256 | } | |
257 | ||
258 | inline void MarkedBlock::visitWeakSet(HeapRootVisitor& heapRootVisitor) | |
259 | { | |
260 | m_weakSet.visit(heapRootVisitor); | |
261 | } | |
262 | ||
263 | inline void MarkedBlock::reapWeakSet() | |
264 | { | |
265 | m_weakSet.reap(); | |
14957cd0 A |
266 | } |
267 | ||
6fe7ccc8 | 268 | inline void MarkedBlock::didConsumeFreeList() |
14957cd0 | 269 | { |
6fe7ccc8 | 270 | HEAP_LOG_BLOCK_STATE_TRANSITION(this); |
14957cd0 | 271 | |
6fe7ccc8 A |
272 | ASSERT(m_state == FreeListed); |
273 | m_state = Allocated; | |
14957cd0 A |
274 | } |
275 | ||
6fe7ccc8 | 276 | inline void MarkedBlock::clearMarks() |
14957cd0 | 277 | { |
6fe7ccc8 | 278 | HEAP_LOG_BLOCK_STATE_TRANSITION(this); |
14957cd0 | 279 | |
6fe7ccc8 A |
280 | ASSERT(m_state != New && m_state != FreeListed); |
281 | m_marks.clearAll(); | |
93a37866 | 282 | m_newlyAllocated.clear(); |
6fe7ccc8 A |
283 | |
284 | // This will become true at the end of the mark phase. We set it now to | |
285 | // avoid an extra pass to do so later. | |
286 | m_state = Marked; | |
14957cd0 A |
287 | } |
288 | ||
6fe7ccc8 | 289 | inline size_t MarkedBlock::markCount() |
14957cd0 | 290 | { |
6fe7ccc8 | 291 | return m_marks.count(); |
14957cd0 A |
292 | } |
293 | ||
93a37866 | 294 | inline bool MarkedBlock::isEmpty() |
14957cd0 | 295 | { |
93a37866 | 296 | return m_marks.isEmpty() && m_weakSet.isEmpty() && (!m_newlyAllocated || m_newlyAllocated->isEmpty()); |
14957cd0 A |
297 | } |
298 | ||
6fe7ccc8 | 299 | inline size_t MarkedBlock::cellSize() |
14957cd0 | 300 | { |
6fe7ccc8 | 301 | return m_atomsPerCell * atomSize; |
14957cd0 A |
302 | } |
303 | ||
93a37866 | 304 | inline MarkedBlock::DestructorType MarkedBlock::destructorType() |
14957cd0 | 305 | { |
93a37866 | 306 | return m_destructorType; |
14957cd0 A |
307 | } |
308 | ||
309 | inline size_t MarkedBlock::size() | |
310 | { | |
311 | return markCount() * cellSize(); | |
312 | } | |
313 | ||
314 | inline size_t MarkedBlock::capacity() | |
315 | { | |
93a37866 | 316 | return region()->blockSize(); |
14957cd0 A |
317 | } |
318 | ||
14957cd0 A |
319 | inline size_t MarkedBlock::atomNumber(const void* p) |
320 | { | |
6fe7ccc8 | 321 | return (reinterpret_cast<Bits>(p) - reinterpret_cast<Bits>(this)) / atomSize; |
14957cd0 A |
322 | } |
323 | ||
324 | inline bool MarkedBlock::isMarked(const void* p) | |
325 | { | |
326 | return m_marks.get(atomNumber(p)); | |
327 | } | |
328 | ||
329 | inline bool MarkedBlock::testAndSetMarked(const void* p) | |
330 | { | |
6fe7ccc8 | 331 | return m_marks.concurrentTestAndSet(atomNumber(p)); |
14957cd0 A |
332 | } |
333 | ||
334 | inline void MarkedBlock::setMarked(const void* p) | |
335 | { | |
336 | m_marks.set(atomNumber(p)); | |
337 | } | |
338 | ||
93a37866 A |
339 | inline void MarkedBlock::clearMarked(const void* p) |
340 | { | |
341 | ASSERT(m_marks.get(atomNumber(p))); | |
342 | m_marks.clear(atomNumber(p)); | |
343 | } | |
344 | ||
345 | inline bool MarkedBlock::isNewlyAllocated(const void* p) | |
346 | { | |
347 | return m_newlyAllocated->get(atomNumber(p)); | |
348 | } | |
349 | ||
350 | inline void MarkedBlock::setNewlyAllocated(const void* p) | |
351 | { | |
352 | m_newlyAllocated->set(atomNumber(p)); | |
353 | } | |
354 | ||
355 | inline void MarkedBlock::clearNewlyAllocated(const void* p) | |
356 | { | |
357 | m_newlyAllocated->clear(atomNumber(p)); | |
358 | } | |
359 | ||
6fe7ccc8 A |
360 | inline bool MarkedBlock::isLive(const JSCell* cell) |
361 | { | |
362 | switch (m_state) { | |
363 | case Allocated: | |
364 | return true; | |
93a37866 | 365 | |
6fe7ccc8 | 366 | case Marked: |
93a37866 | 367 | return m_marks.get(atomNumber(cell)) || (m_newlyAllocated && isNewlyAllocated(cell)); |
6fe7ccc8 A |
368 | |
369 | case New: | |
370 | case FreeListed: | |
93a37866 | 371 | RELEASE_ASSERT_NOT_REACHED(); |
6fe7ccc8 A |
372 | return false; |
373 | } | |
374 | ||
93a37866 | 375 | RELEASE_ASSERT_NOT_REACHED(); |
6fe7ccc8 A |
376 | return false; |
377 | } | |
378 | ||
379 | inline bool MarkedBlock::isLiveCell(const void* p) | |
380 | { | |
381 | ASSERT(MarkedBlock::isAtomAligned(p)); | |
382 | size_t atomNumber = this->atomNumber(p); | |
383 | size_t firstAtom = this->firstAtom(); | |
384 | if (atomNumber < firstAtom) // Filters pointers into MarkedBlock metadata. | |
385 | return false; | |
386 | if ((atomNumber - firstAtom) % m_atomsPerCell) // Filters pointers into cell middles. | |
387 | return false; | |
388 | if (atomNumber >= m_endAtom) // Filters pointers into invalid cells out of the range. | |
389 | return false; | |
390 | ||
391 | return isLive(static_cast<const JSCell*>(p)); | |
392 | } | |
393 | ||
394 | template <typename Functor> inline void MarkedBlock::forEachCell(Functor& functor) | |
14957cd0 A |
395 | { |
396 | for (size_t i = firstAtom(); i < m_endAtom; i += m_atomsPerCell) { | |
6fe7ccc8 | 397 | JSCell* cell = reinterpret_cast_ptr<JSCell*>(&atoms()[i]); |
6fe7ccc8 | 398 | functor(cell); |
14957cd0 A |
399 | } |
400 | } | |
401 | ||
93a37866 A |
402 | template <typename Functor> inline void MarkedBlock::forEachLiveCell(Functor& functor) |
403 | { | |
404 | for (size_t i = firstAtom(); i < m_endAtom; i += m_atomsPerCell) { | |
405 | JSCell* cell = reinterpret_cast_ptr<JSCell*>(&atoms()[i]); | |
406 | if (!isLive(cell)) | |
407 | continue; | |
408 | ||
409 | functor(cell); | |
6fe7ccc8 A |
410 | } |
411 | } | |
6fe7ccc8 | 412 | |
93a37866 A |
413 | template <typename Functor> inline void MarkedBlock::forEachDeadCell(Functor& functor) |
414 | { | |
415 | for (size_t i = firstAtom(); i < m_endAtom; i += m_atomsPerCell) { | |
416 | JSCell* cell = reinterpret_cast_ptr<JSCell*>(&atoms()[i]); | |
417 | if (isLive(cell)) | |
418 | continue; | |
6fe7ccc8 | 419 | |
93a37866 | 420 | functor(cell); |
6fe7ccc8 A |
421 | } |
422 | } | |
93a37866 A |
423 | |
424 | inline bool MarkedBlock::needsSweeping() | |
425 | { | |
426 | return m_state == Marked; | |
6fe7ccc8 | 427 | } |
6fe7ccc8 | 428 | |
14957cd0 A |
429 | } // namespace JSC |
430 | ||
6fe7ccc8 A |
431 | namespace WTF { |
432 | ||
433 | struct MarkedBlockHash : PtrHash<JSC::MarkedBlock*> { | |
434 | static unsigned hash(JSC::MarkedBlock* const& key) | |
435 | { | |
436 | // Aligned VM regions tend to be monotonically increasing integers, | |
437 | // which is a great hash function, but we have to remove the low bits, | |
438 | // since they're always zero, which is a terrible hash function! | |
439 | return reinterpret_cast<JSC::Bits>(key) / JSC::MarkedBlock::blockSize; | |
440 | } | |
441 | }; | |
442 | ||
443 | template<> struct DefaultHash<JSC::MarkedBlock*> { | |
444 | typedef MarkedBlockHash Hash; | |
445 | }; | |
446 | ||
447 | } // namespace WTF | |
448 | ||
449 | #endif // MarkedBlock_h |