X-Git-Url: https://git.saurik.com/apple/javascriptcore.git/blobdiff_plain/14957cd040308e3eeec43d26bae5d76da13fcd85..8b637bb680022adfddad653280734877951535a9:/heap/MarkedBlock.cpp?ds=inline diff --git a/heap/MarkedBlock.cpp b/heap/MarkedBlock.cpp index f1f630c..0df2e1f 100644 --- a/heap/MarkedBlock.cpp +++ b/heap/MarkedBlock.cpp @@ -26,64 +26,177 @@ #include "config.h" #include "MarkedBlock.h" +#include "IncrementalSweeper.h" #include "JSCell.h" -#include "JSObject.h" -#include "JSZombie.h" -#include "ScopeChain.h" +#include "JSDestructibleObject.h" +#include "Operations.h" namespace JSC { -MarkedBlock* MarkedBlock::create(JSGlobalData* globalData, size_t cellSize) +MarkedBlock* MarkedBlock::create(DeadBlock* block, MarkedAllocator* allocator, size_t cellSize, DestructorType destructorType) { - PageAllocationAligned allocation = PageAllocationAligned::allocate(blockSize, blockSize, OSAllocator::JSGCHeapPages); - if (!static_cast(allocation)) - CRASH(); - return new (allocation.base()) MarkedBlock(allocation, globalData, cellSize); + ASSERT(reinterpret_cast(block) == (reinterpret_cast(block) & blockMask)); + Region* region = block->region(); + return new (NotNull, block) MarkedBlock(region, allocator, cellSize, destructorType); } -void MarkedBlock::destroy(MarkedBlock* block) +MarkedBlock::MarkedBlock(Region* region, MarkedAllocator* allocator, size_t cellSize, DestructorType destructorType) + : HeapBlock(region) + , m_atomsPerCell((cellSize + atomSize - 1) / atomSize) + , m_endAtom((allocator->cellSize() ? atomsPerBlock : region->blockSize() / atomSize) - m_atomsPerCell + 1) + , m_destructorType(destructorType) + , m_allocator(allocator) + , m_state(New) // All cells start out unmarked. + , m_weakSet(allocator->heap()->vm()) { - for (size_t i = block->firstAtom(); i < block->m_endAtom; i += block->m_atomsPerCell) - reinterpret_cast(&block->atoms()[i])->~JSCell(); - block->m_allocation.deallocate(); + ASSERT(allocator); + HEAP_LOG_BLOCK_STATE_TRANSITION(this); } -MarkedBlock::MarkedBlock(const PageAllocationAligned& allocation, JSGlobalData* globalData, size_t cellSize) - : m_nextAtom(firstAtom()) - , m_allocation(allocation) - , m_heap(&globalData->heap) - , m_prev(0) - , m_next(0) +inline void MarkedBlock::callDestructor(JSCell* cell) { - m_atomsPerCell = (cellSize + atomSize - 1) / atomSize; - m_endAtom = atomsPerBlock - m_atomsPerCell + 1; + // A previous eager sweep may already have run cell's destructor. + if (cell->isZapped()) + return; - Structure* dummyMarkableCellStructure = globalData->dummyMarkableCellStructure.get(); - for (size_t i = firstAtom(); i < m_endAtom; i += m_atomsPerCell) - new (&atoms()[i]) JSCell(*globalData, dummyMarkableCellStructure, JSCell::CreatingEarlyCell); +#if ENABLE(SIMPLE_HEAP_PROFILING) + m_heap->m_destroyedTypeCounts.countVPtr(vptr); +#endif + + cell->methodTableForDestruction()->destroy(cell); + cell->zap(); } -void MarkedBlock::sweep() +template +MarkedBlock::FreeList MarkedBlock::specializedSweep() { - Structure* dummyMarkableCellStructure = m_heap->globalData()->dummyMarkableCellStructure.get(); + ASSERT(blockState != Allocated && blockState != FreeListed); + ASSERT(!(dtorType == MarkedBlock::None && sweepMode == SweepOnly)); + // This produces a free list that is ordered in reverse through the block. + // This is fine, since the allocation code makes no assumptions about the + // order of the free list. + FreeCell* head = 0; + size_t count = 0; for (size_t i = firstAtom(); i < m_endAtom; i += m_atomsPerCell) { - if (m_marks.get(i)) + if (blockState == Marked && (m_marks.get(i) || (m_newlyAllocated && m_newlyAllocated->get(i)))) continue; - JSCell* cell = reinterpret_cast(&atoms()[i]); -#if ENABLE(JSC_ZOMBIES) - if (cell->structure() && cell->structure() != dummyMarkableCellStructure && !cell->isZombie()) { - const ClassInfo* info = cell->classInfo(); - cell->~JSCell(); - new (cell) JSZombie(*m_heap->globalData(), info, m_heap->globalData()->zombieStructure.get()); - m_marks.set(i); + JSCell* cell = reinterpret_cast_ptr(&atoms()[i]); + + if (dtorType != MarkedBlock::None && blockState != New) + callDestructor(cell); + + if (sweepMode == SweepToFreeList) { + FreeCell* freeCell = reinterpret_cast(cell); + freeCell->next = head; + head = freeCell; + ++count; } -#else - cell->~JSCell(); - new (cell) JSCell(*m_heap->globalData(), dummyMarkableCellStructure); -#endif } + + // We only want to discard the newlyAllocated bits if we're creating a FreeList, + // otherwise we would lose information on what's currently alive. + if (sweepMode == SweepToFreeList && m_newlyAllocated) + m_newlyAllocated.clear(); + + m_state = ((sweepMode == SweepToFreeList) ? FreeListed : Marked); + return FreeList(head, count * cellSize()); +} + +MarkedBlock::FreeList MarkedBlock::sweep(SweepMode sweepMode) +{ + HEAP_LOG_BLOCK_STATE_TRANSITION(this); + + m_weakSet.sweep(); + + if (sweepMode == SweepOnly && m_destructorType == MarkedBlock::None) + return FreeList(); + + if (m_destructorType == MarkedBlock::ImmortalStructure) + return sweepHelper(sweepMode); + if (m_destructorType == MarkedBlock::Normal) + return sweepHelper(sweepMode); + return sweepHelper(sweepMode); +} + +template +MarkedBlock::FreeList MarkedBlock::sweepHelper(SweepMode sweepMode) +{ + switch (m_state) { + case New: + ASSERT(sweepMode == SweepToFreeList); + return specializedSweep(); + case FreeListed: + // Happens when a block transitions to fully allocated. + ASSERT(sweepMode == SweepToFreeList); + return FreeList(); + case Allocated: + RELEASE_ASSERT_NOT_REACHED(); + return FreeList(); + case Marked: + return sweepMode == SweepToFreeList + ? specializedSweep() + : specializedSweep(); + } + + RELEASE_ASSERT_NOT_REACHED(); + return FreeList(); +} + +class SetNewlyAllocatedFunctor : public MarkedBlock::VoidFunctor { +public: + SetNewlyAllocatedFunctor(MarkedBlock* block) + : m_block(block) + { + } + + void operator()(JSCell* cell) + { + ASSERT(MarkedBlock::blockFor(cell) == m_block); + m_block->setNewlyAllocated(cell); + } + +private: + MarkedBlock* m_block; +}; + +void MarkedBlock::canonicalizeCellLivenessData(const FreeList& freeList) +{ + HEAP_LOG_BLOCK_STATE_TRANSITION(this); + FreeCell* head = freeList.head; + + if (m_state == Marked) { + // If the block is in the Marked state then we know that: + // 1) It was not used for allocation during the previous allocation cycle. + // 2) It may have dead objects, and we only know them to be dead by the + // fact that their mark bits are unset. + // Hence if the block is Marked we need to leave it Marked. + + ASSERT(!head); + return; + } + + ASSERT(m_state == FreeListed); + + // Roll back to a coherent state for Heap introspection. Cells newly + // allocated from our free list are not currently marked, so we need another + // way to tell what's live vs dead. + + ASSERT(!m_newlyAllocated); + m_newlyAllocated = adoptPtr(new WTF::Bitmap()); + + SetNewlyAllocatedFunctor functor(this); + forEachCell(functor); + + FreeCell* next; + for (FreeCell* current = head; current; current = next) { + next = current->next; + reinterpret_cast(current)->zap(); + clearNewlyAllocated(current); + } + + m_state = Marked; } } // namespace JSC