2 * Copyright (C) 1999-2000 Harri Porten (porten@kde.org)
3 * Copyright (C) 2001 Peter Kelly (pmk@post.com)
4 * Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2011 Apple Inc. All rights reserved.
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
26 #include "HeapBlock.h"
28 #include <wtf/Bitmap.h>
29 #include <wtf/DataLog.h>
30 #include <wtf/DoublyLinkedList.h>
31 #include <wtf/HashFunctions.h>
32 #include <wtf/PageAllocationAligned.h>
33 #include <wtf/StdLibExtras.h>
34 #include <wtf/Vector.h>
36 // Set to log state transitions of blocks.
37 #define HEAP_LOG_BLOCK_STATE_TRANSITIONS 0
39 #if HEAP_LOG_BLOCK_STATE_TRANSITIONS
40 #define HEAP_LOG_BLOCK_STATE_TRANSITION(block) do { \
42 "%s:%d %s: block %s = %p, %d\n", \
43 __FILE__, __LINE__, __FUNCTION__, \
44 #block, (block), (block)->m_state); \
47 #define HEAP_LOG_BLOCK_STATE_TRANSITION(block) ((void)0)
55 typedef uintptr_t Bits
;
57 static const size_t MB
= 1024 * 1024;
59 bool isZapped(const JSCell
*);
61 // A marked block is a page-aligned container for heap-allocated objects.
62 // Objects are allocated within cells of the marked block. For a given
63 // marked block, all cells have the same size. Objects smaller than the
64 // cell size may be allocated in the marked block, in which case the
65 // allocation suffers from internal fragmentation: wasted space whose
66 // size is equal to the difference between the cell size and the object
69 class MarkedBlock
: public HeapBlock
{
70 friend class WTF::DoublyLinkedListNode
<MarkedBlock
>;
72 // Ensure natural alignment for native types whilst recognizing that the smallest
73 // object the heap will commonly allocate is four words.
74 static const size_t atomSize
= 4 * sizeof(void*);
75 static const size_t atomShift
= 5;
76 static const size_t blockSize
= 64 * KB
;
77 static const size_t blockMask
= ~(blockSize
- 1); // blockSize must be a power of two.
79 static const size_t atomsPerBlock
= blockSize
/ atomSize
; // ~0.4% overhead
80 static const size_t atomMask
= atomsPerBlock
- 1;
81 static const int cardShift
= 8; // This is log2 of bytes per card.
82 static const size_t bytesPerCard
= 1 << cardShift
;
83 static const int cardCount
= blockSize
/ bytesPerCard
;
84 static const int cardMask
= cardCount
- 1;
95 FreeList(FreeCell
*, size_t);
99 typedef void ReturnType
;
100 void returnValue() { }
103 static MarkedBlock
* create(Heap
*, size_t cellSize
, bool cellsNeedDestruction
);
104 static MarkedBlock
* recycle(MarkedBlock
*, Heap
*, size_t cellSize
, bool cellsNeedDestruction
);
105 static void destroy(MarkedBlock
*);
107 static bool isAtomAligned(const void*);
108 static MarkedBlock
* blockFor(const void*);
109 static size_t firstAtom();
115 enum SweepMode
{ SweepOnly
, SweepToFreeList
};
116 FreeList
sweep(SweepMode
= SweepOnly
);
118 // While allocating from a free list, MarkedBlock temporarily has bogus
119 // cell liveness data. To restore accurate cell liveness data, call one
120 // of these functions:
121 void didConsumeFreeList(); // Call this once you've allocated all the items in the free list.
122 void zapFreeList(const FreeList
&); // Call this to undo the free list.
126 bool markCountIsZero(); // Faster than markCount().
129 bool cellsNeedDestruction();
134 bool isMarked(const void*);
135 bool testAndSetMarked(const void*);
136 bool isLive(const JSCell
*);
137 bool isLiveCell(const void*);
138 void setMarked(const void*);
141 void setDirtyObject(const void* atom
)
143 ASSERT(MarkedBlock::blockFor(atom
) == this);
144 m_cards
.markCardForAtom(atom
);
147 uint8_t* addressOfCardFor(const void* atom
)
149 ASSERT(MarkedBlock::blockFor(atom
) == this);
150 return &m_cards
.cardForAtom(atom
);
153 static inline size_t offsetOfCards()
155 return OBJECT_OFFSETOF(MarkedBlock
, m_cards
);
158 static inline size_t offsetOfMarks()
160 return OBJECT_OFFSETOF(MarkedBlock
, m_marks
);
163 typedef Vector
<JSCell
*, 32> DirtyCellVector
;
164 inline void gatherDirtyCells(DirtyCellVector
&);
165 template <int size
> inline void gatherDirtyCellsWithSize(DirtyCellVector
&);
168 template <typename Functor
> void forEachCell(Functor
&);
171 static const size_t atomAlignmentMask
= atomSize
- 1; // atomSize must be a power of two.
173 enum BlockState
{ New
, FreeListed
, Allocated
, Marked
, Zapped
};
174 template<bool destructorCallNeeded
> FreeList
sweepHelper(SweepMode
= SweepOnly
);
176 typedef char Atom
[atomSize
];
178 MarkedBlock(PageAllocationAligned
&, Heap
*, size_t cellSize
, bool cellsNeedDestruction
);
180 size_t atomNumber(const void*);
181 void callDestructor(JSCell
*);
182 template<BlockState
, SweepMode
, bool destructorCallNeeded
> FreeList
specializedSweep();
185 CardSet
<bytesPerCard
, blockSize
> m_cards
;
188 size_t m_atomsPerCell
;
189 size_t m_endAtom
; // This is a fuzzy end. Always test for < m_endAtom.
190 #if ENABLE(PARALLEL_GC)
191 WTF::Bitmap
<atomsPerBlock
, WTF::BitmapAtomic
> m_marks
;
193 WTF::Bitmap
<atomsPerBlock
, WTF::BitmapNotAtomic
> m_marks
;
195 bool m_cellsNeedDestruction
;
200 inline MarkedBlock::FreeList::FreeList()
206 inline MarkedBlock::FreeList::FreeList(FreeCell
* head
, size_t bytes
)
212 inline size_t MarkedBlock::firstAtom()
214 return WTF::roundUpToMultipleOf
<atomSize
>(sizeof(MarkedBlock
)) / atomSize
;
217 inline MarkedBlock::Atom
* MarkedBlock::atoms()
219 return reinterpret_cast<Atom
*>(this);
222 inline bool MarkedBlock::isAtomAligned(const void* p
)
224 return !(reinterpret_cast<Bits
>(p
) & atomAlignmentMask
);
227 inline MarkedBlock
* MarkedBlock::blockFor(const void* p
)
229 return reinterpret_cast<MarkedBlock
*>(reinterpret_cast<Bits
>(p
) & blockMask
);
232 inline Heap
* MarkedBlock::heap() const
237 inline void MarkedBlock::didConsumeFreeList()
239 HEAP_LOG_BLOCK_STATE_TRANSITION(this);
241 ASSERT(m_state
== FreeListed
);
245 inline void MarkedBlock::clearMarks()
247 HEAP_LOG_BLOCK_STATE_TRANSITION(this);
249 ASSERT(m_state
!= New
&& m_state
!= FreeListed
);
252 // This will become true at the end of the mark phase. We set it now to
253 // avoid an extra pass to do so later.
257 inline size_t MarkedBlock::markCount()
259 return m_marks
.count();
262 inline bool MarkedBlock::markCountIsZero()
264 return m_marks
.isEmpty();
267 inline size_t MarkedBlock::cellSize()
269 return m_atomsPerCell
* atomSize
;
272 inline bool MarkedBlock::cellsNeedDestruction()
274 return m_cellsNeedDestruction
;
277 inline size_t MarkedBlock::size()
279 return markCount() * cellSize();
282 inline size_t MarkedBlock::capacity()
284 return m_allocation
.size();
287 inline size_t MarkedBlock::atomNumber(const void* p
)
289 return (reinterpret_cast<Bits
>(p
) - reinterpret_cast<Bits
>(this)) / atomSize
;
292 inline bool MarkedBlock::isMarked(const void* p
)
294 return m_marks
.get(atomNumber(p
));
297 inline bool MarkedBlock::testAndSetMarked(const void* p
)
299 return m_marks
.concurrentTestAndSet(atomNumber(p
));
302 inline void MarkedBlock::setMarked(const void* p
)
304 m_marks
.set(atomNumber(p
));
307 inline bool MarkedBlock::isLive(const JSCell
* cell
)
313 if (isZapped(cell
)) {
314 // Object dead in previous collection, not allocated since previous collection: mark bit should not be set.
315 ASSERT(!m_marks
.get(atomNumber(cell
)));
319 // Newly allocated objects: mark bit not set.
320 // Objects that survived prior collection: mark bit set.
323 return m_marks
.get(atomNumber(cell
));
327 ASSERT_NOT_REACHED();
331 ASSERT_NOT_REACHED();
335 inline bool MarkedBlock::isLiveCell(const void* p
)
337 ASSERT(MarkedBlock::isAtomAligned(p
));
338 size_t atomNumber
= this->atomNumber(p
);
339 size_t firstAtom
= this->firstAtom();
340 if (atomNumber
< firstAtom
) // Filters pointers into MarkedBlock metadata.
342 if ((atomNumber
- firstAtom
) % m_atomsPerCell
) // Filters pointers into cell middles.
344 if (atomNumber
>= m_endAtom
) // Filters pointers into invalid cells out of the range.
347 return isLive(static_cast<const JSCell
*>(p
));
350 template <typename Functor
> inline void MarkedBlock::forEachCell(Functor
& functor
)
352 for (size_t i
= firstAtom(); i
< m_endAtom
; i
+= m_atomsPerCell
) {
353 JSCell
* cell
= reinterpret_cast_ptr
<JSCell
*>(&atoms()[i
]);
362 template <int _cellSize
> void MarkedBlock::gatherDirtyCellsWithSize(DirtyCellVector
& dirtyCells
)
364 if (m_cards
.testAndClear(0)) {
365 char* ptr
= reinterpret_cast<char*>(&atoms()[firstAtom()]);
366 const char* end
= reinterpret_cast<char*>(this) + bytesPerCard
;
368 JSCell
* cell
= reinterpret_cast<JSCell
*>(ptr
);
370 dirtyCells
.append(cell
);
375 const size_t cellOffset
= firstAtom() * atomSize
% _cellSize
;
376 for (size_t i
= 1; i
< m_cards
.cardCount
; i
++) {
377 if (!m_cards
.testAndClear(i
))
379 char* ptr
= reinterpret_cast<char*>(this) + i
* bytesPerCard
+ cellOffset
;
380 char* end
= reinterpret_cast<char*>(this) + (i
+ 1) * bytesPerCard
;
383 JSCell
* cell
= reinterpret_cast<JSCell
*>(ptr
);
385 dirtyCells
.append(cell
);
391 void MarkedBlock::gatherDirtyCells(DirtyCellVector
& dirtyCells
)
393 COMPILE_ASSERT((int)m_cards
.cardCount
== (int)cardCount
, MarkedBlockCardCountsMatch
);
395 ASSERT(m_state
!= New
&& m_state
!= FreeListed
);
397 // This is an optimisation to avoid having to walk the set of marked
398 // blocks twice during GC.
401 if (markCountIsZero())
404 size_t cellSize
= this->cellSize();
405 if (cellSize
== 32) {
406 gatherDirtyCellsWithSize
<32>(dirtyCells
);
409 if (cellSize
== 64) {
410 gatherDirtyCellsWithSize
<64>(dirtyCells
);
414 const size_t firstCellOffset
= firstAtom() * atomSize
% cellSize
;
416 if (m_cards
.testAndClear(0)) {
417 char* ptr
= reinterpret_cast<char*>(this) + firstAtom() * atomSize
;
418 char* end
= reinterpret_cast<char*>(this) + bytesPerCard
;
420 JSCell
* cell
= reinterpret_cast<JSCell
*>(ptr
);
422 dirtyCells
.append(cell
);
426 for (size_t i
= 1; i
< m_cards
.cardCount
; i
++) {
427 if (!m_cards
.testAndClear(i
))
429 char* ptr
= reinterpret_cast<char*>(this) + firstCellOffset
+ cellSize
* ((i
* bytesPerCard
+ cellSize
- 1 - firstCellOffset
) / cellSize
);
430 char* end
= reinterpret_cast<char*>(this) + std::min((i
+ 1) * bytesPerCard
, m_endAtom
* atomSize
);
433 JSCell
* cell
= reinterpret_cast<JSCell
*>(ptr
);
435 dirtyCells
.append(cell
);
446 struct MarkedBlockHash
: PtrHash
<JSC::MarkedBlock
*> {
447 static unsigned hash(JSC::MarkedBlock
* const& key
)
449 // Aligned VM regions tend to be monotonically increasing integers,
450 // which is a great hash function, but we have to remove the low bits,
451 // since they're always zero, which is a terrible hash function!
452 return reinterpret_cast<JSC::Bits
>(key
) / JSC::MarkedBlock::blockSize
;
456 template<> struct DefaultHash
<JSC::MarkedBlock
*> {
457 typedef MarkedBlockHash Hash
;
462 #endif // MarkedBlock_h