]>
Commit | Line | Data |
---|---|---|
14957cd0 A |
1 | /* |
2 | * Copyright (C) 1999-2000 Harri Porten (porten@kde.org) | |
3 | * Copyright (C) 2001 Peter Kelly (pmk@post.com) | |
4 | * Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2011 Apple Inc. All rights reserved. | |
5 | * | |
6 | * This library is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU Lesser General Public | |
8 | * License as published by the Free Software Foundation; either | |
9 | * version 2 of the License, or (at your option) any later version. | |
10 | * | |
11 | * This library is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
14 | * Lesser General Public License for more details. | |
15 | * | |
16 | * You should have received a copy of the GNU Lesser General Public | |
17 | * License along with this library; if not, write to the Free Software | |
18 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA | |
19 | * | |
20 | */ | |
21 | ||
22 | #ifndef MarkedBlock_h | |
23 | #define MarkedBlock_h | |
24 | ||
6fe7ccc8 A |
25 | #include "CardSet.h" |
26 | #include "HeapBlock.h" | |
27 | ||
14957cd0 | 28 | #include <wtf/Bitmap.h> |
6fe7ccc8 A |
29 | #include <wtf/DataLog.h> |
30 | #include <wtf/DoublyLinkedList.h> | |
31 | #include <wtf/HashFunctions.h> | |
14957cd0 A |
32 | #include <wtf/PageAllocationAligned.h> |
33 | #include <wtf/StdLibExtras.h> | |
6fe7ccc8 A |
34 | #include <wtf/Vector.h> |
35 | ||
36 | // Set to log state transitions of blocks. | |
37 | #define HEAP_LOG_BLOCK_STATE_TRANSITIONS 0 | |
38 | ||
39 | #if HEAP_LOG_BLOCK_STATE_TRANSITIONS | |
40 | #define HEAP_LOG_BLOCK_STATE_TRANSITION(block) do { \ | |
41 | dataLog( \ | |
42 | "%s:%d %s: block %s = %p, %d\n", \ | |
43 | __FILE__, __LINE__, __FUNCTION__, \ | |
44 | #block, (block), (block)->m_state); \ | |
45 | } while (false) | |
46 | #else | |
47 | #define HEAP_LOG_BLOCK_STATE_TRANSITION(block) ((void)0) | |
48 | #endif | |
14957cd0 A |
49 | |
50 | namespace JSC { | |
6fe7ccc8 | 51 | |
14957cd0 A |
52 | class Heap; |
53 | class JSCell; | |
14957cd0 A |
54 | |
55 | typedef uintptr_t Bits; | |
56 | ||
6fe7ccc8 A |
57 | static const size_t MB = 1024 * 1024; |
58 | ||
59 | bool isZapped(const JSCell*); | |
60 | ||
61 | // A marked block is a page-aligned container for heap-allocated objects. | |
62 | // Objects are allocated within cells of the marked block. For a given | |
63 | // marked block, all cells have the same size. Objects smaller than the | |
64 | // cell size may be allocated in the marked block, in which case the | |
65 | // allocation suffers from internal fragmentation: wasted space whose | |
66 | // size is equal to the difference between the cell size and the object | |
67 | // size. | |
68 | ||
69 | class MarkedBlock : public HeapBlock { | |
70 | friend class WTF::DoublyLinkedListNode<MarkedBlock>; | |
14957cd0 | 71 | public: |
6fe7ccc8 A |
72 | // Ensure natural alignment for native types whilst recognizing that the smallest |
73 | // object the heap will commonly allocate is four words. | |
74 | static const size_t atomSize = 4 * sizeof(void*); | |
75 | static const size_t atomShift = 5; | |
76 | static const size_t blockSize = 64 * KB; | |
77 | static const size_t blockMask = ~(blockSize - 1); // blockSize must be a power of two. | |
14957cd0 | 78 | |
6fe7ccc8 A |
79 | static const size_t atomsPerBlock = blockSize / atomSize; // ~0.4% overhead |
80 | static const size_t atomMask = atomsPerBlock - 1; | |
81 | static const int cardShift = 8; // This is log2 of bytes per card. | |
82 | static const size_t bytesPerCard = 1 << cardShift; | |
83 | static const int cardCount = blockSize / bytesPerCard; | |
84 | static const int cardMask = cardCount - 1; | |
85 | ||
86 | struct FreeCell { | |
87 | FreeCell* next; | |
88 | }; | |
89 | ||
90 | struct FreeList { | |
91 | FreeCell* head; | |
92 | size_t bytes; | |
93 | ||
94 | FreeList(); | |
95 | FreeList(FreeCell*, size_t); | |
96 | }; | |
97 | ||
98 | struct VoidFunctor { | |
99 | typedef void ReturnType; | |
100 | void returnValue() { } | |
101 | }; | |
102 | ||
103 | static MarkedBlock* create(Heap*, size_t cellSize, bool cellsNeedDestruction); | |
104 | static MarkedBlock* recycle(MarkedBlock*, Heap*, size_t cellSize, bool cellsNeedDestruction); | |
14957cd0 A |
105 | static void destroy(MarkedBlock*); |
106 | ||
107 | static bool isAtomAligned(const void*); | |
108 | static MarkedBlock* blockFor(const void*); | |
109 | static size_t firstAtom(); | |
110 | ||
111 | Heap* heap() const; | |
14957cd0 A |
112 | |
113 | void* allocate(); | |
6fe7ccc8 A |
114 | |
115 | enum SweepMode { SweepOnly, SweepToFreeList }; | |
116 | FreeList sweep(SweepMode = SweepOnly); | |
117 | ||
118 | // While allocating from a free list, MarkedBlock temporarily has bogus | |
119 | // cell liveness data. To restore accurate cell liveness data, call one | |
120 | // of these functions: | |
121 | void didConsumeFreeList(); // Call this once you've allocated all the items in the free list. | |
122 | void zapFreeList(const FreeList&); // Call this to undo the free list. | |
14957cd0 A |
123 | |
124 | void clearMarks(); | |
125 | size_t markCount(); | |
6fe7ccc8 | 126 | bool markCountIsZero(); // Faster than markCount(). |
14957cd0 A |
127 | |
128 | size_t cellSize(); | |
6fe7ccc8 | 129 | bool cellsNeedDestruction(); |
14957cd0 A |
130 | |
131 | size_t size(); | |
132 | size_t capacity(); | |
133 | ||
14957cd0 A |
134 | bool isMarked(const void*); |
135 | bool testAndSetMarked(const void*); | |
6fe7ccc8 A |
136 | bool isLive(const JSCell*); |
137 | bool isLiveCell(const void*); | |
14957cd0 A |
138 | void setMarked(const void*); |
139 | ||
6fe7ccc8 A |
140 | #if ENABLE(GGC) |
141 | void setDirtyObject(const void* atom) | |
142 | { | |
143 | ASSERT(MarkedBlock::blockFor(atom) == this); | |
144 | m_cards.markCardForAtom(atom); | |
145 | } | |
146 | ||
147 | uint8_t* addressOfCardFor(const void* atom) | |
148 | { | |
149 | ASSERT(MarkedBlock::blockFor(atom) == this); | |
150 | return &m_cards.cardForAtom(atom); | |
151 | } | |
152 | ||
153 | static inline size_t offsetOfCards() | |
154 | { | |
155 | return OBJECT_OFFSETOF(MarkedBlock, m_cards); | |
156 | } | |
157 | ||
158 | static inline size_t offsetOfMarks() | |
159 | { | |
160 | return OBJECT_OFFSETOF(MarkedBlock, m_marks); | |
161 | } | |
162 | ||
163 | typedef Vector<JSCell*, 32> DirtyCellVector; | |
164 | inline void gatherDirtyCells(DirtyCellVector&); | |
165 | template <int size> inline void gatherDirtyCellsWithSize(DirtyCellVector&); | |
166 | #endif | |
167 | ||
168 | template <typename Functor> void forEachCell(Functor&); | |
14957cd0 A |
169 | |
170 | private: | |
6fe7ccc8 | 171 | static const size_t atomAlignmentMask = atomSize - 1; // atomSize must be a power of two. |
14957cd0 | 172 | |
6fe7ccc8 A |
173 | enum BlockState { New, FreeListed, Allocated, Marked, Zapped }; |
174 | template<bool destructorCallNeeded> FreeList sweepHelper(SweepMode = SweepOnly); | |
14957cd0 A |
175 | |
176 | typedef char Atom[atomSize]; | |
177 | ||
6fe7ccc8 | 178 | MarkedBlock(PageAllocationAligned&, Heap*, size_t cellSize, bool cellsNeedDestruction); |
14957cd0 | 179 | Atom* atoms(); |
6fe7ccc8 A |
180 | size_t atomNumber(const void*); |
181 | void callDestructor(JSCell*); | |
182 | template<BlockState, SweepMode, bool destructorCallNeeded> FreeList specializedSweep(); | |
183 | ||
184 | #if ENABLE(GGC) | |
185 | CardSet<bytesPerCard, blockSize> m_cards; | |
186 | #endif | |
14957cd0 | 187 | |
14957cd0 | 188 | size_t m_atomsPerCell; |
6fe7ccc8 A |
189 | size_t m_endAtom; // This is a fuzzy end. Always test for < m_endAtom. |
190 | #if ENABLE(PARALLEL_GC) | |
191 | WTF::Bitmap<atomsPerBlock, WTF::BitmapAtomic> m_marks; | |
192 | #else | |
193 | WTF::Bitmap<atomsPerBlock, WTF::BitmapNotAtomic> m_marks; | |
194 | #endif | |
195 | bool m_cellsNeedDestruction; | |
196 | BlockState m_state; | |
14957cd0 | 197 | Heap* m_heap; |
14957cd0 A |
198 | }; |
199 | ||
6fe7ccc8 A |
200 | inline MarkedBlock::FreeList::FreeList() |
201 | : head(0) | |
202 | , bytes(0) | |
203 | { | |
204 | } | |
205 | ||
206 | inline MarkedBlock::FreeList::FreeList(FreeCell* head, size_t bytes) | |
207 | : head(head) | |
208 | , bytes(bytes) | |
209 | { | |
210 | } | |
211 | ||
14957cd0 A |
212 | inline size_t MarkedBlock::firstAtom() |
213 | { | |
214 | return WTF::roundUpToMultipleOf<atomSize>(sizeof(MarkedBlock)) / atomSize; | |
215 | } | |
216 | ||
217 | inline MarkedBlock::Atom* MarkedBlock::atoms() | |
218 | { | |
219 | return reinterpret_cast<Atom*>(this); | |
220 | } | |
221 | ||
222 | inline bool MarkedBlock::isAtomAligned(const void* p) | |
223 | { | |
6fe7ccc8 | 224 | return !(reinterpret_cast<Bits>(p) & atomAlignmentMask); |
14957cd0 A |
225 | } |
226 | ||
227 | inline MarkedBlock* MarkedBlock::blockFor(const void* p) | |
228 | { | |
6fe7ccc8 | 229 | return reinterpret_cast<MarkedBlock*>(reinterpret_cast<Bits>(p) & blockMask); |
14957cd0 A |
230 | } |
231 | ||
232 | inline Heap* MarkedBlock::heap() const | |
233 | { | |
234 | return m_heap; | |
235 | } | |
236 | ||
6fe7ccc8 | 237 | inline void MarkedBlock::didConsumeFreeList() |
14957cd0 | 238 | { |
6fe7ccc8 | 239 | HEAP_LOG_BLOCK_STATE_TRANSITION(this); |
14957cd0 | 240 | |
6fe7ccc8 A |
241 | ASSERT(m_state == FreeListed); |
242 | m_state = Allocated; | |
14957cd0 A |
243 | } |
244 | ||
6fe7ccc8 | 245 | inline void MarkedBlock::clearMarks() |
14957cd0 | 246 | { |
6fe7ccc8 | 247 | HEAP_LOG_BLOCK_STATE_TRANSITION(this); |
14957cd0 | 248 | |
6fe7ccc8 A |
249 | ASSERT(m_state != New && m_state != FreeListed); |
250 | m_marks.clearAll(); | |
251 | ||
252 | // This will become true at the end of the mark phase. We set it now to | |
253 | // avoid an extra pass to do so later. | |
254 | m_state = Marked; | |
14957cd0 A |
255 | } |
256 | ||
6fe7ccc8 | 257 | inline size_t MarkedBlock::markCount() |
14957cd0 | 258 | { |
6fe7ccc8 | 259 | return m_marks.count(); |
14957cd0 A |
260 | } |
261 | ||
6fe7ccc8 | 262 | inline bool MarkedBlock::markCountIsZero() |
14957cd0 A |
263 | { |
264 | return m_marks.isEmpty(); | |
265 | } | |
266 | ||
6fe7ccc8 | 267 | inline size_t MarkedBlock::cellSize() |
14957cd0 | 268 | { |
6fe7ccc8 | 269 | return m_atomsPerCell * atomSize; |
14957cd0 A |
270 | } |
271 | ||
6fe7ccc8 | 272 | inline bool MarkedBlock::cellsNeedDestruction() |
14957cd0 | 273 | { |
6fe7ccc8 | 274 | return m_cellsNeedDestruction; |
14957cd0 A |
275 | } |
276 | ||
277 | inline size_t MarkedBlock::size() | |
278 | { | |
279 | return markCount() * cellSize(); | |
280 | } | |
281 | ||
282 | inline size_t MarkedBlock::capacity() | |
283 | { | |
284 | return m_allocation.size(); | |
285 | } | |
286 | ||
14957cd0 A |
287 | inline size_t MarkedBlock::atomNumber(const void* p) |
288 | { | |
6fe7ccc8 | 289 | return (reinterpret_cast<Bits>(p) - reinterpret_cast<Bits>(this)) / atomSize; |
14957cd0 A |
290 | } |
291 | ||
292 | inline bool MarkedBlock::isMarked(const void* p) | |
293 | { | |
294 | return m_marks.get(atomNumber(p)); | |
295 | } | |
296 | ||
297 | inline bool MarkedBlock::testAndSetMarked(const void* p) | |
298 | { | |
6fe7ccc8 | 299 | return m_marks.concurrentTestAndSet(atomNumber(p)); |
14957cd0 A |
300 | } |
301 | ||
302 | inline void MarkedBlock::setMarked(const void* p) | |
303 | { | |
304 | m_marks.set(atomNumber(p)); | |
305 | } | |
306 | ||
6fe7ccc8 A |
307 | inline bool MarkedBlock::isLive(const JSCell* cell) |
308 | { | |
309 | switch (m_state) { | |
310 | case Allocated: | |
311 | return true; | |
312 | case Zapped: | |
313 | if (isZapped(cell)) { | |
314 | // Object dead in previous collection, not allocated since previous collection: mark bit should not be set. | |
315 | ASSERT(!m_marks.get(atomNumber(cell))); | |
316 | return false; | |
317 | } | |
318 | ||
319 | // Newly allocated objects: mark bit not set. | |
320 | // Objects that survived prior collection: mark bit set. | |
321 | return true; | |
322 | case Marked: | |
323 | return m_marks.get(atomNumber(cell)); | |
324 | ||
325 | case New: | |
326 | case FreeListed: | |
327 | ASSERT_NOT_REACHED(); | |
328 | return false; | |
329 | } | |
330 | ||
331 | ASSERT_NOT_REACHED(); | |
332 | return false; | |
333 | } | |
334 | ||
335 | inline bool MarkedBlock::isLiveCell(const void* p) | |
336 | { | |
337 | ASSERT(MarkedBlock::isAtomAligned(p)); | |
338 | size_t atomNumber = this->atomNumber(p); | |
339 | size_t firstAtom = this->firstAtom(); | |
340 | if (atomNumber < firstAtom) // Filters pointers into MarkedBlock metadata. | |
341 | return false; | |
342 | if ((atomNumber - firstAtom) % m_atomsPerCell) // Filters pointers into cell middles. | |
343 | return false; | |
344 | if (atomNumber >= m_endAtom) // Filters pointers into invalid cells out of the range. | |
345 | return false; | |
346 | ||
347 | return isLive(static_cast<const JSCell*>(p)); | |
348 | } | |
349 | ||
350 | template <typename Functor> inline void MarkedBlock::forEachCell(Functor& functor) | |
14957cd0 A |
351 | { |
352 | for (size_t i = firstAtom(); i < m_endAtom; i += m_atomsPerCell) { | |
6fe7ccc8 A |
353 | JSCell* cell = reinterpret_cast_ptr<JSCell*>(&atoms()[i]); |
354 | if (!isLive(cell)) | |
14957cd0 | 355 | continue; |
6fe7ccc8 A |
356 | |
357 | functor(cell); | |
14957cd0 A |
358 | } |
359 | } | |
360 | ||
6fe7ccc8 A |
361 | #if ENABLE(GGC) |
362 | template <int _cellSize> void MarkedBlock::gatherDirtyCellsWithSize(DirtyCellVector& dirtyCells) | |
363 | { | |
364 | if (m_cards.testAndClear(0)) { | |
365 | char* ptr = reinterpret_cast<char*>(&atoms()[firstAtom()]); | |
366 | const char* end = reinterpret_cast<char*>(this) + bytesPerCard; | |
367 | while (ptr < end) { | |
368 | JSCell* cell = reinterpret_cast<JSCell*>(ptr); | |
369 | if (isMarked(cell)) | |
370 | dirtyCells.append(cell); | |
371 | ptr += _cellSize; | |
372 | } | |
373 | } | |
374 | ||
375 | const size_t cellOffset = firstAtom() * atomSize % _cellSize; | |
376 | for (size_t i = 1; i < m_cards.cardCount; i++) { | |
377 | if (!m_cards.testAndClear(i)) | |
378 | continue; | |
379 | char* ptr = reinterpret_cast<char*>(this) + i * bytesPerCard + cellOffset; | |
380 | char* end = reinterpret_cast<char*>(this) + (i + 1) * bytesPerCard; | |
381 | ||
382 | while (ptr < end) { | |
383 | JSCell* cell = reinterpret_cast<JSCell*>(ptr); | |
384 | if (isMarked(cell)) | |
385 | dirtyCells.append(cell); | |
386 | ptr += _cellSize; | |
387 | } | |
388 | } | |
389 | } | |
390 | ||
391 | void MarkedBlock::gatherDirtyCells(DirtyCellVector& dirtyCells) | |
392 | { | |
393 | COMPILE_ASSERT((int)m_cards.cardCount == (int)cardCount, MarkedBlockCardCountsMatch); | |
394 | ||
395 | ASSERT(m_state != New && m_state != FreeListed); | |
396 | ||
397 | // This is an optimisation to avoid having to walk the set of marked | |
398 | // blocks twice during GC. | |
399 | m_state = Marked; | |
400 | ||
401 | if (markCountIsZero()) | |
402 | return; | |
403 | ||
404 | size_t cellSize = this->cellSize(); | |
405 | if (cellSize == 32) { | |
406 | gatherDirtyCellsWithSize<32>(dirtyCells); | |
407 | return; | |
408 | } | |
409 | if (cellSize == 64) { | |
410 | gatherDirtyCellsWithSize<64>(dirtyCells); | |
411 | return; | |
412 | } | |
413 | ||
414 | const size_t firstCellOffset = firstAtom() * atomSize % cellSize; | |
415 | ||
416 | if (m_cards.testAndClear(0)) { | |
417 | char* ptr = reinterpret_cast<char*>(this) + firstAtom() * atomSize; | |
418 | char* end = reinterpret_cast<char*>(this) + bytesPerCard; | |
419 | while (ptr < end) { | |
420 | JSCell* cell = reinterpret_cast<JSCell*>(ptr); | |
421 | if (isMarked(cell)) | |
422 | dirtyCells.append(cell); | |
423 | ptr += cellSize; | |
424 | } | |
425 | } | |
426 | for (size_t i = 1; i < m_cards.cardCount; i++) { | |
427 | if (!m_cards.testAndClear(i)) | |
428 | continue; | |
429 | char* ptr = reinterpret_cast<char*>(this) + firstCellOffset + cellSize * ((i * bytesPerCard + cellSize - 1 - firstCellOffset) / cellSize); | |
430 | char* end = reinterpret_cast<char*>(this) + std::min((i + 1) * bytesPerCard, m_endAtom * atomSize); | |
431 | ||
432 | while (ptr < end) { | |
433 | JSCell* cell = reinterpret_cast<JSCell*>(ptr); | |
434 | if (isMarked(cell)) | |
435 | dirtyCells.append(cell); | |
436 | ptr += cellSize; | |
437 | } | |
438 | } | |
439 | } | |
440 | #endif | |
441 | ||
14957cd0 A |
442 | } // namespace JSC |
443 | ||
6fe7ccc8 A |
444 | namespace WTF { |
445 | ||
446 | struct MarkedBlockHash : PtrHash<JSC::MarkedBlock*> { | |
447 | static unsigned hash(JSC::MarkedBlock* const& key) | |
448 | { | |
449 | // Aligned VM regions tend to be monotonically increasing integers, | |
450 | // which is a great hash function, but we have to remove the low bits, | |
451 | // since they're always zero, which is a terrible hash function! | |
452 | return reinterpret_cast<JSC::Bits>(key) / JSC::MarkedBlock::blockSize; | |
453 | } | |
454 | }; | |
455 | ||
456 | template<> struct DefaultHash<JSC::MarkedBlock*> { | |
457 | typedef MarkedBlockHash Hash; | |
458 | }; | |
459 | ||
460 | } // namespace WTF | |
461 | ||
462 | #endif // MarkedBlock_h |