#ifndef MarkedBlock_h
#define MarkedBlock_h
-#include "CardSet.h"
+#include "BlockAllocator.h"
#include "HeapBlock.h"
+#include "WeakSet.h"
#include <wtf/Bitmap.h>
#include <wtf/DataLog.h>
#include <wtf/DoublyLinkedList.h>
#if HEAP_LOG_BLOCK_STATE_TRANSITIONS
#define HEAP_LOG_BLOCK_STATE_TRANSITION(block) do { \
- dataLog( \
+ dataLogF( \
"%s:%d %s: block %s = %p, %d\n", \
__FILE__, __LINE__, __FUNCTION__, \
#block, (block), (block)->m_state); \
class Heap;
class JSCell;
+ class MarkedAllocator;
typedef uintptr_t Bits;
// size is equal to the difference between the cell size and the object
// size.
- class MarkedBlock : public HeapBlock {
- friend class WTF::DoublyLinkedListNode<MarkedBlock>;
+ class MarkedBlock : public HeapBlock<MarkedBlock> {
public:
- // Ensure natural alignment for native types whilst recognizing that the smallest
- // object the heap will commonly allocate is four words.
- static const size_t atomSize = 4 * sizeof(void*);
- static const size_t atomShift = 5;
+ static const size_t atomSize = 8; // bytes
static const size_t blockSize = 64 * KB;
static const size_t blockMask = ~(blockSize - 1); // blockSize must be a power of two.
- static const size_t atomsPerBlock = blockSize / atomSize; // ~0.4% overhead
+ static const size_t atomsPerBlock = blockSize / atomSize;
static const size_t atomMask = atomsPerBlock - 1;
- static const int cardShift = 8; // This is log2 of bytes per card.
- static const size_t bytesPerCard = 1 << cardShift;
- static const int cardCount = blockSize / bytesPerCard;
- static const int cardMask = cardCount - 1;
struct FreeCell {
FreeCell* next;
void returnValue() { }
};
- static MarkedBlock* create(Heap*, size_t cellSize, bool cellsNeedDestruction);
- static MarkedBlock* recycle(MarkedBlock*, Heap*, size_t cellSize, bool cellsNeedDestruction);
- static void destroy(MarkedBlock*);
+ class CountFunctor {
+ public:
+ typedef size_t ReturnType;
+
+ CountFunctor() : m_count(0) { }
+ void count(size_t count) { m_count += count; }
+ ReturnType returnValue() { return m_count; }
+
+ private:
+ ReturnType m_count;
+ };
+
+ enum DestructorType { None, ImmortalStructure, Normal };
+ static MarkedBlock* create(DeadBlock*, MarkedAllocator*, size_t cellSize, DestructorType);
static bool isAtomAligned(const void*);
static MarkedBlock* blockFor(const void*);
static size_t firstAtom();
+ void lastChanceToFinalize();
+
+ MarkedAllocator* allocator() const;
Heap* heap() const;
+ VM* vm() const;
+ WeakSet& weakSet();
- void* allocate();
-
enum SweepMode { SweepOnly, SweepToFreeList };
FreeList sweep(SweepMode = SweepOnly);
+ void shrink();
+
+ void visitWeakSet(HeapRootVisitor&);
+ void reapWeakSet();
+
// While allocating from a free list, MarkedBlock temporarily has bogus
// cell liveness data. To restore accurate cell liveness data, call one
// of these functions:
void didConsumeFreeList(); // Call this once you've allocated all the items in the free list.
- void zapFreeList(const FreeList&); // Call this to undo the free list.
+ void canonicalizeCellLivenessData(const FreeList&);
void clearMarks();
size_t markCount();
- bool markCountIsZero(); // Faster than markCount().
+ bool isEmpty();
size_t cellSize();
- bool cellsNeedDestruction();
+ DestructorType destructorType();
size_t size();
size_t capacity();
bool isLive(const JSCell*);
bool isLiveCell(const void*);
void setMarked(const void*);
-
-#if ENABLE(GGC)
- void setDirtyObject(const void* atom)
- {
- ASSERT(MarkedBlock::blockFor(atom) == this);
- m_cards.markCardForAtom(atom);
- }
+ void clearMarked(const void*);
- uint8_t* addressOfCardFor(const void* atom)
- {
- ASSERT(MarkedBlock::blockFor(atom) == this);
- return &m_cards.cardForAtom(atom);
- }
-
- static inline size_t offsetOfCards()
- {
- return OBJECT_OFFSETOF(MarkedBlock, m_cards);
- }
+ bool isNewlyAllocated(const void*);
+ void setNewlyAllocated(const void*);
+ void clearNewlyAllocated(const void*);
- static inline size_t offsetOfMarks()
- {
- return OBJECT_OFFSETOF(MarkedBlock, m_marks);
- }
-
- typedef Vector<JSCell*, 32> DirtyCellVector;
- inline void gatherDirtyCells(DirtyCellVector&);
- template <int size> inline void gatherDirtyCellsWithSize(DirtyCellVector&);
-#endif
+ bool needsSweeping();
template <typename Functor> void forEachCell(Functor&);
+ template <typename Functor> void forEachLiveCell(Functor&);
+ template <typename Functor> void forEachDeadCell(Functor&);
private:
static const size_t atomAlignmentMask = atomSize - 1; // atomSize must be a power of two.
- enum BlockState { New, FreeListed, Allocated, Marked, Zapped };
- template<bool destructorCallNeeded> FreeList sweepHelper(SweepMode = SweepOnly);
+ enum BlockState { New, FreeListed, Allocated, Marked };
+ template<DestructorType> FreeList sweepHelper(SweepMode = SweepOnly);
typedef char Atom[atomSize];
- MarkedBlock(PageAllocationAligned&, Heap*, size_t cellSize, bool cellsNeedDestruction);
+ MarkedBlock(Region*, MarkedAllocator*, size_t cellSize, DestructorType);
Atom* atoms();
size_t atomNumber(const void*);
void callDestructor(JSCell*);
- template<BlockState, SweepMode, bool destructorCallNeeded> FreeList specializedSweep();
+ template<BlockState, SweepMode, DestructorType> FreeList specializedSweep();
-#if ENABLE(GGC)
- CardSet<bytesPerCard, blockSize> m_cards;
-#endif
-
size_t m_atomsPerCell;
size_t m_endAtom; // This is a fuzzy end. Always test for < m_endAtom.
#if ENABLE(PARALLEL_GC)
#else
WTF::Bitmap<atomsPerBlock, WTF::BitmapNotAtomic> m_marks;
#endif
- bool m_cellsNeedDestruction;
+ OwnPtr<WTF::Bitmap<atomsPerBlock> > m_newlyAllocated;
+
+ DestructorType m_destructorType;
+ MarkedAllocator* m_allocator;
BlockState m_state;
- Heap* m_heap;
+ WeakSet m_weakSet;
};
inline MarkedBlock::FreeList::FreeList()
return reinterpret_cast<MarkedBlock*>(reinterpret_cast<Bits>(p) & blockMask);
}
+ inline void MarkedBlock::lastChanceToFinalize()
+ {
+ m_weakSet.lastChanceToFinalize();
+
+ clearMarks();
+ sweep();
+ }
+
+ inline MarkedAllocator* MarkedBlock::allocator() const
+ {
+ return m_allocator;
+ }
+
inline Heap* MarkedBlock::heap() const
{
- return m_heap;
+ return m_weakSet.heap();
+ }
+
+ inline VM* MarkedBlock::vm() const
+ {
+ return m_weakSet.vm();
+ }
+
+ inline WeakSet& MarkedBlock::weakSet()
+ {
+ return m_weakSet;
+ }
+
+ inline void MarkedBlock::shrink()
+ {
+ m_weakSet.shrink();
+ }
+
+ inline void MarkedBlock::visitWeakSet(HeapRootVisitor& heapRootVisitor)
+ {
+ m_weakSet.visit(heapRootVisitor);
+ }
+
+ inline void MarkedBlock::reapWeakSet()
+ {
+ m_weakSet.reap();
}
inline void MarkedBlock::didConsumeFreeList()
ASSERT(m_state != New && m_state != FreeListed);
m_marks.clearAll();
+ m_newlyAllocated.clear();
// This will become true at the end of the mark phase. We set it now to
// avoid an extra pass to do so later.
return m_marks.count();
}
- inline bool MarkedBlock::markCountIsZero()
+ inline bool MarkedBlock::isEmpty()
{
- return m_marks.isEmpty();
+ return m_marks.isEmpty() && m_weakSet.isEmpty() && (!m_newlyAllocated || m_newlyAllocated->isEmpty());
}
inline size_t MarkedBlock::cellSize()
return m_atomsPerCell * atomSize;
}
- inline bool MarkedBlock::cellsNeedDestruction()
+ inline MarkedBlock::DestructorType MarkedBlock::destructorType()
{
- return m_cellsNeedDestruction;
+ return m_destructorType;
}
inline size_t MarkedBlock::size()
inline size_t MarkedBlock::capacity()
{
- return m_allocation.size();
+ return region()->blockSize();
}
inline size_t MarkedBlock::atomNumber(const void* p)
m_marks.set(atomNumber(p));
}
+ inline void MarkedBlock::clearMarked(const void* p)
+ {
+ ASSERT(m_marks.get(atomNumber(p)));
+ m_marks.clear(atomNumber(p));
+ }
+
+ inline bool MarkedBlock::isNewlyAllocated(const void* p)
+ {
+ return m_newlyAllocated->get(atomNumber(p));
+ }
+
+ inline void MarkedBlock::setNewlyAllocated(const void* p)
+ {
+ m_newlyAllocated->set(atomNumber(p));
+ }
+
+ inline void MarkedBlock::clearNewlyAllocated(const void* p)
+ {
+ m_newlyAllocated->clear(atomNumber(p));
+ }
+
inline bool MarkedBlock::isLive(const JSCell* cell)
{
switch (m_state) {
case Allocated:
return true;
- case Zapped:
- if (isZapped(cell)) {
- // Object dead in previous collection, not allocated since previous collection: mark bit should not be set.
- ASSERT(!m_marks.get(atomNumber(cell)));
- return false;
- }
-
- // Newly allocated objects: mark bit not set.
- // Objects that survived prior collection: mark bit set.
- return true;
+
case Marked:
- return m_marks.get(atomNumber(cell));
+ return m_marks.get(atomNumber(cell)) || (m_newlyAllocated && isNewlyAllocated(cell));
case New:
case FreeListed:
- ASSERT_NOT_REACHED();
+ RELEASE_ASSERT_NOT_REACHED();
return false;
}
- ASSERT_NOT_REACHED();
+ RELEASE_ASSERT_NOT_REACHED();
return false;
}
{
for (size_t i = firstAtom(); i < m_endAtom; i += m_atomsPerCell) {
JSCell* cell = reinterpret_cast_ptr<JSCell*>(&atoms()[i]);
- if (!isLive(cell))
- continue;
-
functor(cell);
}
}
-#if ENABLE(GGC)
-template <int _cellSize> void MarkedBlock::gatherDirtyCellsWithSize(DirtyCellVector& dirtyCells)
-{
- if (m_cards.testAndClear(0)) {
- char* ptr = reinterpret_cast<char*>(&atoms()[firstAtom()]);
- const char* end = reinterpret_cast<char*>(this) + bytesPerCard;
- while (ptr < end) {
- JSCell* cell = reinterpret_cast<JSCell*>(ptr);
- if (isMarked(cell))
- dirtyCells.append(cell);
- ptr += _cellSize;
- }
- }
-
- const size_t cellOffset = firstAtom() * atomSize % _cellSize;
- for (size_t i = 1; i < m_cards.cardCount; i++) {
- if (!m_cards.testAndClear(i))
- continue;
- char* ptr = reinterpret_cast<char*>(this) + i * bytesPerCard + cellOffset;
- char* end = reinterpret_cast<char*>(this) + (i + 1) * bytesPerCard;
-
- while (ptr < end) {
- JSCell* cell = reinterpret_cast<JSCell*>(ptr);
- if (isMarked(cell))
- dirtyCells.append(cell);
- ptr += _cellSize;
+ template <typename Functor> inline void MarkedBlock::forEachLiveCell(Functor& functor)
+ {
+ for (size_t i = firstAtom(); i < m_endAtom; i += m_atomsPerCell) {
+ JSCell* cell = reinterpret_cast_ptr<JSCell*>(&atoms()[i]);
+ if (!isLive(cell))
+ continue;
+
+ functor(cell);
}
}
-}
-
-void MarkedBlock::gatherDirtyCells(DirtyCellVector& dirtyCells)
-{
- COMPILE_ASSERT((int)m_cards.cardCount == (int)cardCount, MarkedBlockCardCountsMatch);
- ASSERT(m_state != New && m_state != FreeListed);
-
- // This is an optimisation to avoid having to walk the set of marked
- // blocks twice during GC.
- m_state = Marked;
-
- if (markCountIsZero())
- return;
-
- size_t cellSize = this->cellSize();
- if (cellSize == 32) {
- gatherDirtyCellsWithSize<32>(dirtyCells);
- return;
- }
- if (cellSize == 64) {
- gatherDirtyCellsWithSize<64>(dirtyCells);
- return;
- }
+ template <typename Functor> inline void MarkedBlock::forEachDeadCell(Functor& functor)
+ {
+ for (size_t i = firstAtom(); i < m_endAtom; i += m_atomsPerCell) {
+ JSCell* cell = reinterpret_cast_ptr<JSCell*>(&atoms()[i]);
+ if (isLive(cell))
+ continue;
- const size_t firstCellOffset = firstAtom() * atomSize % cellSize;
-
- if (m_cards.testAndClear(0)) {
- char* ptr = reinterpret_cast<char*>(this) + firstAtom() * atomSize;
- char* end = reinterpret_cast<char*>(this) + bytesPerCard;
- while (ptr < end) {
- JSCell* cell = reinterpret_cast<JSCell*>(ptr);
- if (isMarked(cell))
- dirtyCells.append(cell);
- ptr += cellSize;
+ functor(cell);
}
}
- for (size_t i = 1; i < m_cards.cardCount; i++) {
- if (!m_cards.testAndClear(i))
- continue;
- char* ptr = reinterpret_cast<char*>(this) + firstCellOffset + cellSize * ((i * bytesPerCard + cellSize - 1 - firstCellOffset) / cellSize);
- char* end = reinterpret_cast<char*>(this) + std::min((i + 1) * bytesPerCard, m_endAtom * atomSize);
-
- while (ptr < end) {
- JSCell* cell = reinterpret_cast<JSCell*>(ptr);
- if (isMarked(cell))
- dirtyCells.append(cell);
- ptr += cellSize;
- }
+
+ inline bool MarkedBlock::needsSweeping()
+ {
+ return m_state == Marked;
}
-}
-#endif
} // namespace JSC