X-Git-Url: https://git.saurik.com/apple/javascriptcore.git/blobdiff_plain/6fe7ccc865dc7d7541b93c5bcaf6368d2c98a174..93a3786624b2768d89bfa27e46598dc64e2fb70a:/heap/MarkedBlock.h?ds=sidebyside diff --git a/heap/MarkedBlock.h b/heap/MarkedBlock.h index 429b7c0..fcc3016 100644 --- a/heap/MarkedBlock.h +++ b/heap/MarkedBlock.h @@ -22,9 +22,10 @@ #ifndef MarkedBlock_h #define MarkedBlock_h -#include "CardSet.h" +#include "BlockAllocator.h" #include "HeapBlock.h" +#include "WeakSet.h" #include #include #include @@ -38,7 +39,7 @@ #if HEAP_LOG_BLOCK_STATE_TRANSITIONS #define HEAP_LOG_BLOCK_STATE_TRANSITION(block) do { \ - dataLog( \ + dataLogF( \ "%s:%d %s: block %s = %p, %d\n", \ __FILE__, __LINE__, __FUNCTION__, \ #block, (block), (block)->m_state); \ @@ -51,6 +52,7 @@ namespace JSC { class Heap; class JSCell; + class MarkedAllocator; typedef uintptr_t Bits; @@ -66,22 +68,14 @@ namespace JSC { // size is equal to the difference between the cell size and the object // size. - class MarkedBlock : public HeapBlock { - friend class WTF::DoublyLinkedListNode; + class MarkedBlock : public HeapBlock { public: - // Ensure natural alignment for native types whilst recognizing that the smallest - // object the heap will commonly allocate is four words. - static const size_t atomSize = 4 * sizeof(void*); - static const size_t atomShift = 5; + static const size_t atomSize = 8; // bytes static const size_t blockSize = 64 * KB; static const size_t blockMask = ~(blockSize - 1); // blockSize must be a power of two. - static const size_t atomsPerBlock = blockSize / atomSize; // ~0.4% overhead + static const size_t atomsPerBlock = blockSize / atomSize; static const size_t atomMask = atomsPerBlock - 1; - static const int cardShift = 8; // This is log2 of bytes per card. - static const size_t bytesPerCard = 1 << cardShift; - static const int cardCount = blockSize / bytesPerCard; - static const int cardMask = cardCount - 1; struct FreeCell { FreeCell* next; @@ -100,33 +94,52 @@ namespace JSC { void returnValue() { } }; - static MarkedBlock* create(Heap*, size_t cellSize, bool cellsNeedDestruction); - static MarkedBlock* recycle(MarkedBlock*, Heap*, size_t cellSize, bool cellsNeedDestruction); - static void destroy(MarkedBlock*); + class CountFunctor { + public: + typedef size_t ReturnType; + + CountFunctor() : m_count(0) { } + void count(size_t count) { m_count += count; } + ReturnType returnValue() { return m_count; } + + private: + ReturnType m_count; + }; + + enum DestructorType { None, ImmortalStructure, Normal }; + static MarkedBlock* create(DeadBlock*, MarkedAllocator*, size_t cellSize, DestructorType); static bool isAtomAligned(const void*); static MarkedBlock* blockFor(const void*); static size_t firstAtom(); + void lastChanceToFinalize(); + + MarkedAllocator* allocator() const; Heap* heap() const; + VM* vm() const; + WeakSet& weakSet(); - void* allocate(); - enum SweepMode { SweepOnly, SweepToFreeList }; FreeList sweep(SweepMode = SweepOnly); + void shrink(); + + void visitWeakSet(HeapRootVisitor&); + void reapWeakSet(); + // While allocating from a free list, MarkedBlock temporarily has bogus // cell liveness data. To restore accurate cell liveness data, call one // of these functions: void didConsumeFreeList(); // Call this once you've allocated all the items in the free list. - void zapFreeList(const FreeList&); // Call this to undo the free list. + void canonicalizeCellLivenessData(const FreeList&); void clearMarks(); size_t markCount(); - bool markCountIsZero(); // Faster than markCount(). + bool isEmpty(); size_t cellSize(); - bool cellsNeedDestruction(); + DestructorType destructorType(); size_t size(); size_t capacity(); @@ -136,55 +149,32 @@ namespace JSC { bool isLive(const JSCell*); bool isLiveCell(const void*); void setMarked(const void*); - -#if ENABLE(GGC) - void setDirtyObject(const void* atom) - { - ASSERT(MarkedBlock::blockFor(atom) == this); - m_cards.markCardForAtom(atom); - } + void clearMarked(const void*); - uint8_t* addressOfCardFor(const void* atom) - { - ASSERT(MarkedBlock::blockFor(atom) == this); - return &m_cards.cardForAtom(atom); - } - - static inline size_t offsetOfCards() - { - return OBJECT_OFFSETOF(MarkedBlock, m_cards); - } + bool isNewlyAllocated(const void*); + void setNewlyAllocated(const void*); + void clearNewlyAllocated(const void*); - static inline size_t offsetOfMarks() - { - return OBJECT_OFFSETOF(MarkedBlock, m_marks); - } - - typedef Vector DirtyCellVector; - inline void gatherDirtyCells(DirtyCellVector&); - template inline void gatherDirtyCellsWithSize(DirtyCellVector&); -#endif + bool needsSweeping(); template void forEachCell(Functor&); + template void forEachLiveCell(Functor&); + template void forEachDeadCell(Functor&); private: static const size_t atomAlignmentMask = atomSize - 1; // atomSize must be a power of two. - enum BlockState { New, FreeListed, Allocated, Marked, Zapped }; - template FreeList sweepHelper(SweepMode = SweepOnly); + enum BlockState { New, FreeListed, Allocated, Marked }; + template FreeList sweepHelper(SweepMode = SweepOnly); typedef char Atom[atomSize]; - MarkedBlock(PageAllocationAligned&, Heap*, size_t cellSize, bool cellsNeedDestruction); + MarkedBlock(Region*, MarkedAllocator*, size_t cellSize, DestructorType); Atom* atoms(); size_t atomNumber(const void*); void callDestructor(JSCell*); - template FreeList specializedSweep(); + template FreeList specializedSweep(); -#if ENABLE(GGC) - CardSet m_cards; -#endif - size_t m_atomsPerCell; size_t m_endAtom; // This is a fuzzy end. Always test for < m_endAtom. #if ENABLE(PARALLEL_GC) @@ -192,9 +182,12 @@ namespace JSC { #else WTF::Bitmap m_marks; #endif - bool m_cellsNeedDestruction; + OwnPtr > m_newlyAllocated; + + DestructorType m_destructorType; + MarkedAllocator* m_allocator; BlockState m_state; - Heap* m_heap; + WeakSet m_weakSet; }; inline MarkedBlock::FreeList::FreeList() @@ -229,9 +222,47 @@ namespace JSC { return reinterpret_cast(reinterpret_cast(p) & blockMask); } + inline void MarkedBlock::lastChanceToFinalize() + { + m_weakSet.lastChanceToFinalize(); + + clearMarks(); + sweep(); + } + + inline MarkedAllocator* MarkedBlock::allocator() const + { + return m_allocator; + } + inline Heap* MarkedBlock::heap() const { - return m_heap; + return m_weakSet.heap(); + } + + inline VM* MarkedBlock::vm() const + { + return m_weakSet.vm(); + } + + inline WeakSet& MarkedBlock::weakSet() + { + return m_weakSet; + } + + inline void MarkedBlock::shrink() + { + m_weakSet.shrink(); + } + + inline void MarkedBlock::visitWeakSet(HeapRootVisitor& heapRootVisitor) + { + m_weakSet.visit(heapRootVisitor); + } + + inline void MarkedBlock::reapWeakSet() + { + m_weakSet.reap(); } inline void MarkedBlock::didConsumeFreeList() @@ -248,6 +279,7 @@ namespace JSC { ASSERT(m_state != New && m_state != FreeListed); m_marks.clearAll(); + m_newlyAllocated.clear(); // This will become true at the end of the mark phase. We set it now to // avoid an extra pass to do so later. @@ -259,9 +291,9 @@ namespace JSC { return m_marks.count(); } - inline bool MarkedBlock::markCountIsZero() + inline bool MarkedBlock::isEmpty() { - return m_marks.isEmpty(); + return m_marks.isEmpty() && m_weakSet.isEmpty() && (!m_newlyAllocated || m_newlyAllocated->isEmpty()); } inline size_t MarkedBlock::cellSize() @@ -269,9 +301,9 @@ namespace JSC { return m_atomsPerCell * atomSize; } - inline bool MarkedBlock::cellsNeedDestruction() + inline MarkedBlock::DestructorType MarkedBlock::destructorType() { - return m_cellsNeedDestruction; + return m_destructorType; } inline size_t MarkedBlock::size() @@ -281,7 +313,7 @@ namespace JSC { inline size_t MarkedBlock::capacity() { - return m_allocation.size(); + return region()->blockSize(); } inline size_t MarkedBlock::atomNumber(const void* p) @@ -304,31 +336,43 @@ namespace JSC { m_marks.set(atomNumber(p)); } + inline void MarkedBlock::clearMarked(const void* p) + { + ASSERT(m_marks.get(atomNumber(p))); + m_marks.clear(atomNumber(p)); + } + + inline bool MarkedBlock::isNewlyAllocated(const void* p) + { + return m_newlyAllocated->get(atomNumber(p)); + } + + inline void MarkedBlock::setNewlyAllocated(const void* p) + { + m_newlyAllocated->set(atomNumber(p)); + } + + inline void MarkedBlock::clearNewlyAllocated(const void* p) + { + m_newlyAllocated->clear(atomNumber(p)); + } + inline bool MarkedBlock::isLive(const JSCell* cell) { switch (m_state) { case Allocated: return true; - case Zapped: - if (isZapped(cell)) { - // Object dead in previous collection, not allocated since previous collection: mark bit should not be set. - ASSERT(!m_marks.get(atomNumber(cell))); - return false; - } - - // Newly allocated objects: mark bit not set. - // Objects that survived prior collection: mark bit set. - return true; + case Marked: - return m_marks.get(atomNumber(cell)); + return m_marks.get(atomNumber(cell)) || (m_newlyAllocated && isNewlyAllocated(cell)); case New: case FreeListed: - ASSERT_NOT_REACHED(); + RELEASE_ASSERT_NOT_REACHED(); return false; } - ASSERT_NOT_REACHED(); + RELEASE_ASSERT_NOT_REACHED(); return false; } @@ -351,93 +395,36 @@ namespace JSC { { for (size_t i = firstAtom(); i < m_endAtom; i += m_atomsPerCell) { JSCell* cell = reinterpret_cast_ptr(&atoms()[i]); - if (!isLive(cell)) - continue; - functor(cell); } } -#if ENABLE(GGC) -template void MarkedBlock::gatherDirtyCellsWithSize(DirtyCellVector& dirtyCells) -{ - if (m_cards.testAndClear(0)) { - char* ptr = reinterpret_cast(&atoms()[firstAtom()]); - const char* end = reinterpret_cast(this) + bytesPerCard; - while (ptr < end) { - JSCell* cell = reinterpret_cast(ptr); - if (isMarked(cell)) - dirtyCells.append(cell); - ptr += _cellSize; - } - } - - const size_t cellOffset = firstAtom() * atomSize % _cellSize; - for (size_t i = 1; i < m_cards.cardCount; i++) { - if (!m_cards.testAndClear(i)) - continue; - char* ptr = reinterpret_cast(this) + i * bytesPerCard + cellOffset; - char* end = reinterpret_cast(this) + (i + 1) * bytesPerCard; - - while (ptr < end) { - JSCell* cell = reinterpret_cast(ptr); - if (isMarked(cell)) - dirtyCells.append(cell); - ptr += _cellSize; + template inline void MarkedBlock::forEachLiveCell(Functor& functor) + { + for (size_t i = firstAtom(); i < m_endAtom; i += m_atomsPerCell) { + JSCell* cell = reinterpret_cast_ptr(&atoms()[i]); + if (!isLive(cell)) + continue; + + functor(cell); } } -} - -void MarkedBlock::gatherDirtyCells(DirtyCellVector& dirtyCells) -{ - COMPILE_ASSERT((int)m_cards.cardCount == (int)cardCount, MarkedBlockCardCountsMatch); - ASSERT(m_state != New && m_state != FreeListed); - - // This is an optimisation to avoid having to walk the set of marked - // blocks twice during GC. - m_state = Marked; - - if (markCountIsZero()) - return; - - size_t cellSize = this->cellSize(); - if (cellSize == 32) { - gatherDirtyCellsWithSize<32>(dirtyCells); - return; - } - if (cellSize == 64) { - gatherDirtyCellsWithSize<64>(dirtyCells); - return; - } + template inline void MarkedBlock::forEachDeadCell(Functor& functor) + { + for (size_t i = firstAtom(); i < m_endAtom; i += m_atomsPerCell) { + JSCell* cell = reinterpret_cast_ptr(&atoms()[i]); + if (isLive(cell)) + continue; - const size_t firstCellOffset = firstAtom() * atomSize % cellSize; - - if (m_cards.testAndClear(0)) { - char* ptr = reinterpret_cast(this) + firstAtom() * atomSize; - char* end = reinterpret_cast(this) + bytesPerCard; - while (ptr < end) { - JSCell* cell = reinterpret_cast(ptr); - if (isMarked(cell)) - dirtyCells.append(cell); - ptr += cellSize; + functor(cell); } } - for (size_t i = 1; i < m_cards.cardCount; i++) { - if (!m_cards.testAndClear(i)) - continue; - char* ptr = reinterpret_cast(this) + firstCellOffset + cellSize * ((i * bytesPerCard + cellSize - 1 - firstCellOffset) / cellSize); - char* end = reinterpret_cast(this) + std::min((i + 1) * bytesPerCard, m_endAtom * atomSize); - - while (ptr < end) { - JSCell* cell = reinterpret_cast(ptr); - if (isMarked(cell)) - dirtyCells.append(cell); - ptr += cellSize; - } + + inline bool MarkedBlock::needsSweeping() + { + return m_state == Marked; } -} -#endif } // namespace JSC