X-Git-Url: https://git.saurik.com/apple/javascriptcore.git/blobdiff_plain/14957cd040308e3eeec43d26bae5d76da13fcd85..a253471d7f8e4d91bf6ebabab00155c3b387d3d0:/heap/MarkedBlock.h diff --git a/heap/MarkedBlock.h b/heap/MarkedBlock.h index c567502..429b7c0 100644 --- a/heap/MarkedBlock.h +++ b/heap/MarkedBlock.h @@ -22,25 +22,86 @@ #ifndef MarkedBlock_h #define MarkedBlock_h +#include "CardSet.h" +#include "HeapBlock.h" + #include +#include +#include +#include #include #include +#include + +// Set to log state transitions of blocks. +#define HEAP_LOG_BLOCK_STATE_TRANSITIONS 0 + +#if HEAP_LOG_BLOCK_STATE_TRANSITIONS +#define HEAP_LOG_BLOCK_STATE_TRANSITION(block) do { \ + dataLog( \ + "%s:%d %s: block %s = %p, %d\n", \ + __FILE__, __LINE__, __FUNCTION__, \ + #block, (block), (block)->m_state); \ + } while (false) +#else +#define HEAP_LOG_BLOCK_STATE_TRANSITION(block) ((void)0) +#endif namespace JSC { - + class Heap; class JSCell; - class JSGlobalData; typedef uintptr_t Bits; - static const size_t KB = 1024; - - class MarkedBlock { + static const size_t MB = 1024 * 1024; + + bool isZapped(const JSCell*); + + // A marked block is a page-aligned container for heap-allocated objects. + // Objects are allocated within cells of the marked block. For a given + // marked block, all cells have the same size. Objects smaller than the + // cell size may be allocated in the marked block, in which case the + // allocation suffers from internal fragmentation: wasted space whose + // size is equal to the difference between the cell size and the object + // size. + + class MarkedBlock : public HeapBlock { + friend class WTF::DoublyLinkedListNode; public: - static const size_t atomSize = sizeof(double); // Ensures natural alignment for all built-in types. + // Ensure natural alignment for native types whilst recognizing that the smallest + // object the heap will commonly allocate is four words. + static const size_t atomSize = 4 * sizeof(void*); + static const size_t atomShift = 5; + static const size_t blockSize = 64 * KB; + static const size_t blockMask = ~(blockSize - 1); // blockSize must be a power of two. - static MarkedBlock* create(JSGlobalData*, size_t cellSize); + static const size_t atomsPerBlock = blockSize / atomSize; // ~0.4% overhead + static const size_t atomMask = atomsPerBlock - 1; + static const int cardShift = 8; // This is log2 of bytes per card. + static const size_t bytesPerCard = 1 << cardShift; + static const int cardCount = blockSize / bytesPerCard; + static const int cardMask = cardCount - 1; + + struct FreeCell { + FreeCell* next; + }; + + struct FreeList { + FreeCell* head; + size_t bytes; + + FreeList(); + FreeList(FreeCell*, size_t); + }; + + struct VoidFunctor { + typedef void ReturnType; + void returnValue() { } + }; + + static MarkedBlock* create(Heap*, size_t cellSize, bool cellsNeedDestruction); + static MarkedBlock* recycle(MarkedBlock*, Heap*, size_t cellSize, bool cellsNeedDestruction); static void destroy(MarkedBlock*); static bool isAtomAligned(const void*); @@ -48,57 +109,106 @@ namespace JSC { static size_t firstAtom(); Heap* heap() const; - - void setPrev(MarkedBlock*); - void setNext(MarkedBlock*); - MarkedBlock* prev() const; - MarkedBlock* next() const; void* allocate(); - void reset(); - void sweep(); - - bool isEmpty(); + + enum SweepMode { SweepOnly, SweepToFreeList }; + FreeList sweep(SweepMode = SweepOnly); + + // While allocating from a free list, MarkedBlock temporarily has bogus + // cell liveness data. To restore accurate cell liveness data, call one + // of these functions: + void didConsumeFreeList(); // Call this once you've allocated all the items in the free list. + void zapFreeList(const FreeList&); // Call this to undo the free list. void clearMarks(); size_t markCount(); + bool markCountIsZero(); // Faster than markCount(). size_t cellSize(); + bool cellsNeedDestruction(); size_t size(); size_t capacity(); - bool contains(const void*); - size_t atomNumber(const void*); bool isMarked(const void*); bool testAndSetMarked(const void*); + bool isLive(const JSCell*); + bool isLiveCell(const void*); void setMarked(const void*); - template void forEach(Functor&); +#if ENABLE(GGC) + void setDirtyObject(const void* atom) + { + ASSERT(MarkedBlock::blockFor(atom) == this); + m_cards.markCardForAtom(atom); + } + + uint8_t* addressOfCardFor(const void* atom) + { + ASSERT(MarkedBlock::blockFor(atom) == this); + return &m_cards.cardForAtom(atom); + } + + static inline size_t offsetOfCards() + { + return OBJECT_OFFSETOF(MarkedBlock, m_cards); + } + + static inline size_t offsetOfMarks() + { + return OBJECT_OFFSETOF(MarkedBlock, m_marks); + } + + typedef Vector DirtyCellVector; + inline void gatherDirtyCells(DirtyCellVector&); + template inline void gatherDirtyCellsWithSize(DirtyCellVector&); +#endif + + template void forEachCell(Functor&); private: - static const size_t blockSize = 16 * KB; - static const size_t blockMask = ~(blockSize - 1); // blockSize must be a power of two. + static const size_t atomAlignmentMask = atomSize - 1; // atomSize must be a power of two. - static const size_t atomMask = ~(atomSize - 1); // atomSize must be a power of two. - - static const size_t atomsPerBlock = blockSize / atomSize; + enum BlockState { New, FreeListed, Allocated, Marked, Zapped }; + template FreeList sweepHelper(SweepMode = SweepOnly); typedef char Atom[atomSize]; - MarkedBlock(const PageAllocationAligned&, JSGlobalData*, size_t cellSize); + MarkedBlock(PageAllocationAligned&, Heap*, size_t cellSize, bool cellsNeedDestruction); Atom* atoms(); + size_t atomNumber(const void*); + void callDestructor(JSCell*); + template FreeList specializedSweep(); + +#if ENABLE(GGC) + CardSet m_cards; +#endif - size_t m_nextAtom; - size_t m_endAtom; // This is a fuzzy end. Always test for < m_endAtom. size_t m_atomsPerCell; - WTF::Bitmap m_marks; - PageAllocationAligned m_allocation; + size_t m_endAtom; // This is a fuzzy end. Always test for < m_endAtom. +#if ENABLE(PARALLEL_GC) + WTF::Bitmap m_marks; +#else + WTF::Bitmap m_marks; +#endif + bool m_cellsNeedDestruction; + BlockState m_state; Heap* m_heap; - MarkedBlock* m_prev; - MarkedBlock* m_next; }; + inline MarkedBlock::FreeList::FreeList() + : head(0) + , bytes(0) + { + } + + inline MarkedBlock::FreeList::FreeList(FreeCell* head, size_t bytes) + : head(head) + , bytes(bytes) + { + } + inline size_t MarkedBlock::firstAtom() { return WTF::roundUpToMultipleOf(sizeof(MarkedBlock)) / atomSize; @@ -111,12 +221,12 @@ namespace JSC { inline bool MarkedBlock::isAtomAligned(const void* p) { - return !((intptr_t)(p) & ~atomMask); + return !(reinterpret_cast(p) & atomAlignmentMask); } inline MarkedBlock* MarkedBlock::blockFor(const void* p) { - return reinterpret_cast(reinterpret_cast(p) & blockMask); + return reinterpret_cast(reinterpret_cast(p) & blockMask); } inline Heap* MarkedBlock::heap() const @@ -124,49 +234,44 @@ namespace JSC { return m_heap; } - inline void MarkedBlock::setPrev(MarkedBlock* prev) + inline void MarkedBlock::didConsumeFreeList() { - m_prev = prev; - } + HEAP_LOG_BLOCK_STATE_TRANSITION(this); - inline void MarkedBlock::setNext(MarkedBlock* next) - { - m_next = next; + ASSERT(m_state == FreeListed); + m_state = Allocated; } - inline MarkedBlock* MarkedBlock::prev() const + inline void MarkedBlock::clearMarks() { - return m_prev; - } + HEAP_LOG_BLOCK_STATE_TRANSITION(this); - inline MarkedBlock* MarkedBlock::next() const - { - return m_next; + ASSERT(m_state != New && m_state != FreeListed); + m_marks.clearAll(); + + // This will become true at the end of the mark phase. We set it now to + // avoid an extra pass to do so later. + m_state = Marked; } - inline void MarkedBlock::reset() + inline size_t MarkedBlock::markCount() { - m_nextAtom = firstAtom(); + return m_marks.count(); } - inline bool MarkedBlock::isEmpty() + inline bool MarkedBlock::markCountIsZero() { return m_marks.isEmpty(); } - inline void MarkedBlock::clearMarks() - { - m_marks.clearAll(); - } - - inline size_t MarkedBlock::markCount() + inline size_t MarkedBlock::cellSize() { - return m_marks.count(); + return m_atomsPerCell * atomSize; } - inline size_t MarkedBlock::cellSize() + inline bool MarkedBlock::cellsNeedDestruction() { - return m_atomsPerCell * atomSize; + return m_cellsNeedDestruction; } inline size_t MarkedBlock::size() @@ -179,19 +284,9 @@ namespace JSC { return m_allocation.size(); } - inline bool MarkedBlock::contains(const void* p) - { - ASSERT(p && isAtomAligned(p) && atomNumber(p) < atomsPerBlock); - - // Even though we physically contain p, we only logically contain p if p - // points to a live cell. (Claiming to contain a dead cell would trick the - // conservative garbage collector into resurrecting the cell in a zombie state.) - return isMarked(p); - } - inline size_t MarkedBlock::atomNumber(const void* p) { - return (reinterpret_cast(p) - reinterpret_cast(this)) / atomSize; + return (reinterpret_cast(p) - reinterpret_cast(this)) / atomSize; } inline bool MarkedBlock::isMarked(const void* p) @@ -201,7 +296,7 @@ namespace JSC { inline bool MarkedBlock::testAndSetMarked(const void* p) { - return m_marks.testAndSet(atomNumber(p)); + return m_marks.concurrentTestAndSet(atomNumber(p)); } inline void MarkedBlock::setMarked(const void* p) @@ -209,15 +304,159 @@ namespace JSC { m_marks.set(atomNumber(p)); } - template inline void MarkedBlock::forEach(Functor& functor) + inline bool MarkedBlock::isLive(const JSCell* cell) + { + switch (m_state) { + case Allocated: + return true; + case Zapped: + if (isZapped(cell)) { + // Object dead in previous collection, not allocated since previous collection: mark bit should not be set. + ASSERT(!m_marks.get(atomNumber(cell))); + return false; + } + + // Newly allocated objects: mark bit not set. + // Objects that survived prior collection: mark bit set. + return true; + case Marked: + return m_marks.get(atomNumber(cell)); + + case New: + case FreeListed: + ASSERT_NOT_REACHED(); + return false; + } + + ASSERT_NOT_REACHED(); + return false; + } + + inline bool MarkedBlock::isLiveCell(const void* p) + { + ASSERT(MarkedBlock::isAtomAligned(p)); + size_t atomNumber = this->atomNumber(p); + size_t firstAtom = this->firstAtom(); + if (atomNumber < firstAtom) // Filters pointers into MarkedBlock metadata. + return false; + if ((atomNumber - firstAtom) % m_atomsPerCell) // Filters pointers into cell middles. + return false; + if (atomNumber >= m_endAtom) // Filters pointers into invalid cells out of the range. + return false; + + return isLive(static_cast(p)); + } + + template inline void MarkedBlock::forEachCell(Functor& functor) { for (size_t i = firstAtom(); i < m_endAtom; i += m_atomsPerCell) { - if (!m_marks.get(i)) + JSCell* cell = reinterpret_cast_ptr(&atoms()[i]); + if (!isLive(cell)) continue; - functor(reinterpret_cast(&atoms()[i])); + + functor(cell); } } +#if ENABLE(GGC) +template void MarkedBlock::gatherDirtyCellsWithSize(DirtyCellVector& dirtyCells) +{ + if (m_cards.testAndClear(0)) { + char* ptr = reinterpret_cast(&atoms()[firstAtom()]); + const char* end = reinterpret_cast(this) + bytesPerCard; + while (ptr < end) { + JSCell* cell = reinterpret_cast(ptr); + if (isMarked(cell)) + dirtyCells.append(cell); + ptr += _cellSize; + } + } + + const size_t cellOffset = firstAtom() * atomSize % _cellSize; + for (size_t i = 1; i < m_cards.cardCount; i++) { + if (!m_cards.testAndClear(i)) + continue; + char* ptr = reinterpret_cast(this) + i * bytesPerCard + cellOffset; + char* end = reinterpret_cast(this) + (i + 1) * bytesPerCard; + + while (ptr < end) { + JSCell* cell = reinterpret_cast(ptr); + if (isMarked(cell)) + dirtyCells.append(cell); + ptr += _cellSize; + } + } +} + +void MarkedBlock::gatherDirtyCells(DirtyCellVector& dirtyCells) +{ + COMPILE_ASSERT((int)m_cards.cardCount == (int)cardCount, MarkedBlockCardCountsMatch); + + ASSERT(m_state != New && m_state != FreeListed); + + // This is an optimisation to avoid having to walk the set of marked + // blocks twice during GC. + m_state = Marked; + + if (markCountIsZero()) + return; + + size_t cellSize = this->cellSize(); + if (cellSize == 32) { + gatherDirtyCellsWithSize<32>(dirtyCells); + return; + } + if (cellSize == 64) { + gatherDirtyCellsWithSize<64>(dirtyCells); + return; + } + + const size_t firstCellOffset = firstAtom() * atomSize % cellSize; + + if (m_cards.testAndClear(0)) { + char* ptr = reinterpret_cast(this) + firstAtom() * atomSize; + char* end = reinterpret_cast(this) + bytesPerCard; + while (ptr < end) { + JSCell* cell = reinterpret_cast(ptr); + if (isMarked(cell)) + dirtyCells.append(cell); + ptr += cellSize; + } + } + for (size_t i = 1; i < m_cards.cardCount; i++) { + if (!m_cards.testAndClear(i)) + continue; + char* ptr = reinterpret_cast(this) + firstCellOffset + cellSize * ((i * bytesPerCard + cellSize - 1 - firstCellOffset) / cellSize); + char* end = reinterpret_cast(this) + std::min((i + 1) * bytesPerCard, m_endAtom * atomSize); + + while (ptr < end) { + JSCell* cell = reinterpret_cast(ptr); + if (isMarked(cell)) + dirtyCells.append(cell); + ptr += cellSize; + } + } +} +#endif + } // namespace JSC -#endif // MarkedSpace_h +namespace WTF { + + struct MarkedBlockHash : PtrHash { + static unsigned hash(JSC::MarkedBlock* const& key) + { + // Aligned VM regions tend to be monotonically increasing integers, + // which is a great hash function, but we have to remove the low bits, + // since they're always zero, which is a terrible hash function! + return reinterpret_cast(key) / JSC::MarkedBlock::blockSize; + } + }; + + template<> struct DefaultHash { + typedef MarkedBlockHash Hash; + }; + +} // namespace WTF + +#endif // MarkedBlock_h