X-Git-Url: https://git.saurik.com/apple/javascriptcore.git/blobdiff_plain/93a3786624b2768d89bfa27e46598dc64e2fb70a..ed1e77d3adeb83d26fd1dfb16dd84cabdcefd250:/heap/MarkedSpace.cpp?ds=inline diff --git a/heap/MarkedSpace.cpp b/heap/MarkedSpace.cpp index 2bef608..4f30890 100644 --- a/heap/MarkedSpace.cpp +++ b/heap/MarkedSpace.cpp @@ -25,7 +25,7 @@ #include "JSGlobalObject.h" #include "JSLock.h" #include "JSObject.h" - +#include "JSCInlines.h" namespace JSC { @@ -79,38 +79,38 @@ struct ReapWeakSet : MarkedBlock::VoidFunctor { MarkedSpace::MarkedSpace(Heap* heap) : m_heap(heap) + , m_capacity(0) + , m_isIterating(false) { for (size_t cellSize = preciseStep; cellSize <= preciseCutoff; cellSize += preciseStep) { - allocatorFor(cellSize).init(heap, this, cellSize, MarkedBlock::None); - normalDestructorAllocatorFor(cellSize).init(heap, this, cellSize, MarkedBlock::Normal); - immortalStructureDestructorAllocatorFor(cellSize).init(heap, this, cellSize, MarkedBlock::ImmortalStructure); + allocatorFor(cellSize).init(heap, this, cellSize, false); + destructorAllocatorFor(cellSize).init(heap, this, cellSize, true); } for (size_t cellSize = impreciseStep; cellSize <= impreciseCutoff; cellSize += impreciseStep) { - allocatorFor(cellSize).init(heap, this, cellSize, MarkedBlock::None); - normalDestructorAllocatorFor(cellSize).init(heap, this, cellSize, MarkedBlock::Normal); - immortalStructureDestructorAllocatorFor(cellSize).init(heap, this, cellSize, MarkedBlock::ImmortalStructure); + allocatorFor(cellSize).init(heap, this, cellSize, false); + destructorAllocatorFor(cellSize).init(heap, this, cellSize, true); } - m_normalSpace.largeAllocator.init(heap, this, 0, MarkedBlock::None); - m_normalDestructorSpace.largeAllocator.init(heap, this, 0, MarkedBlock::Normal); - m_immortalStructureDestructorSpace.largeAllocator.init(heap, this, 0, MarkedBlock::ImmortalStructure); + m_normalSpace.largeAllocator.init(heap, this, 0, false); + m_destructorSpace.largeAllocator.init(heap, this, 0, true); } MarkedSpace::~MarkedSpace() { Free free(Free::FreeAll, this); forEachBlock(free); + ASSERT(!m_blocks.set().size()); } -struct LastChanceToFinalize : MarkedBlock::VoidFunctor { - void operator()(MarkedBlock* block) { block->lastChanceToFinalize(); } +struct LastChanceToFinalize { + void operator()(MarkedAllocator& allocator) { allocator.lastChanceToFinalize(); } }; void MarkedSpace::lastChanceToFinalize() { - canonicalizeCellLivenessData(); - forEachBlock(); + stopAllocating(); + forEachAllocator(); } void MarkedSpace::sweep() @@ -119,74 +119,113 @@ void MarkedSpace::sweep() forEachBlock(); } +void MarkedSpace::zombifySweep() +{ + if (Options::logGC()) + dataLog("Zombifying sweep..."); + m_heap->sweeper()->willFinishSweeping(); + forEachBlock(); +} + void MarkedSpace::resetAllocators() { for (size_t cellSize = preciseStep; cellSize <= preciseCutoff; cellSize += preciseStep) { allocatorFor(cellSize).reset(); - normalDestructorAllocatorFor(cellSize).reset(); - immortalStructureDestructorAllocatorFor(cellSize).reset(); + destructorAllocatorFor(cellSize).reset(); } for (size_t cellSize = impreciseStep; cellSize <= impreciseCutoff; cellSize += impreciseStep) { allocatorFor(cellSize).reset(); - normalDestructorAllocatorFor(cellSize).reset(); - immortalStructureDestructorAllocatorFor(cellSize).reset(); + destructorAllocatorFor(cellSize).reset(); } m_normalSpace.largeAllocator.reset(); - m_normalDestructorSpace.largeAllocator.reset(); - m_immortalStructureDestructorSpace.largeAllocator.reset(); + m_destructorSpace.largeAllocator.reset(); + +#if ENABLE(GGC) + m_blocksWithNewObjects.clear(); +#endif } void MarkedSpace::visitWeakSets(HeapRootVisitor& heapRootVisitor) { VisitWeakSet visitWeakSet(heapRootVisitor); - forEachBlock(visitWeakSet); + if (m_heap->operationInProgress() == EdenCollection) { + for (unsigned i = 0; i < m_blocksWithNewObjects.size(); ++i) + visitWeakSet(m_blocksWithNewObjects[i]); + } else + forEachBlock(visitWeakSet); } void MarkedSpace::reapWeakSets() { - forEachBlock(); + if (m_heap->operationInProgress() == EdenCollection) { + for (unsigned i = 0; i < m_blocksWithNewObjects.size(); ++i) + m_blocksWithNewObjects[i]->reapWeakSet(); + } else + forEachBlock(); } -void MarkedSpace::canonicalizeCellLivenessData() +template +void MarkedSpace::forEachAllocator() +{ + Functor functor; + forEachAllocator(functor); +} + +template +void MarkedSpace::forEachAllocator(Functor& functor) { for (size_t cellSize = preciseStep; cellSize <= preciseCutoff; cellSize += preciseStep) { - allocatorFor(cellSize).canonicalizeCellLivenessData(); - normalDestructorAllocatorFor(cellSize).canonicalizeCellLivenessData(); - immortalStructureDestructorAllocatorFor(cellSize).canonicalizeCellLivenessData(); + functor(allocatorFor(cellSize)); + functor(destructorAllocatorFor(cellSize)); } for (size_t cellSize = impreciseStep; cellSize <= impreciseCutoff; cellSize += impreciseStep) { - allocatorFor(cellSize).canonicalizeCellLivenessData(); - normalDestructorAllocatorFor(cellSize).canonicalizeCellLivenessData(); - immortalStructureDestructorAllocatorFor(cellSize).canonicalizeCellLivenessData(); + functor(allocatorFor(cellSize)); + functor(destructorAllocatorFor(cellSize)); } - m_normalSpace.largeAllocator.canonicalizeCellLivenessData(); - m_normalDestructorSpace.largeAllocator.canonicalizeCellLivenessData(); - m_immortalStructureDestructorSpace.largeAllocator.canonicalizeCellLivenessData(); + functor(m_normalSpace.largeAllocator); + functor(m_destructorSpace.largeAllocator); +} + +struct StopAllocatingFunctor { + void operator()(MarkedAllocator& allocator) { allocator.stopAllocating(); } +}; + +void MarkedSpace::stopAllocating() +{ + ASSERT(!isIterating()); + forEachAllocator(); +} + +struct ResumeAllocatingFunctor { + void operator()(MarkedAllocator& allocator) { allocator.resumeAllocating(); } +}; + +void MarkedSpace::resumeAllocating() +{ + ASSERT(isIterating()); + forEachAllocator(); } bool MarkedSpace::isPagedOut(double deadline) { for (size_t cellSize = preciseStep; cellSize <= preciseCutoff; cellSize += preciseStep) { if (allocatorFor(cellSize).isPagedOut(deadline) - || normalDestructorAllocatorFor(cellSize).isPagedOut(deadline) - || immortalStructureDestructorAllocatorFor(cellSize).isPagedOut(deadline)) + || destructorAllocatorFor(cellSize).isPagedOut(deadline)) return true; } for (size_t cellSize = impreciseStep; cellSize <= impreciseCutoff; cellSize += impreciseStep) { if (allocatorFor(cellSize).isPagedOut(deadline) - || normalDestructorAllocatorFor(cellSize).isPagedOut(deadline) - || immortalStructureDestructorAllocatorFor(cellSize).isPagedOut(deadline)) + || destructorAllocatorFor(cellSize).isPagedOut(deadline)) return true; } if (m_normalSpace.largeAllocator.isPagedOut(deadline) - || m_normalDestructorSpace.largeAllocator.isPagedOut(deadline) - || m_immortalStructureDestructorSpace.largeAllocator.isPagedOut(deadline)) + || m_destructorSpace.largeAllocator.isPagedOut(deadline)) return true; return false; @@ -195,12 +234,9 @@ bool MarkedSpace::isPagedOut(double deadline) void MarkedSpace::freeBlock(MarkedBlock* block) { block->allocator()->removeBlock(block); + m_capacity -= block->capacity(); m_blocks.remove(block); - if (block->capacity() == MarkedBlock::blockSize) { - m_heap->blockAllocator().deallocate(MarkedBlock::destroy(block)); - return; - } - m_heap->blockAllocator().deallocateCustomSize(MarkedBlock::destroy(block)); + MarkedBlock::destroy(block); } void MarkedSpace::freeOrShrinkBlock(MarkedBlock* block) @@ -223,4 +259,89 @@ void MarkedSpace::shrink() forEachBlock(freeOrShrink); } +static void clearNewlyAllocatedInBlock(MarkedBlock* block) +{ + if (!block) + return; + block->clearNewlyAllocated(); +} + +struct ClearNewlyAllocated : MarkedBlock::VoidFunctor { + void operator()(MarkedBlock* block) { block->clearNewlyAllocated(); } +}; + +#ifndef NDEBUG +struct VerifyNewlyAllocated : MarkedBlock::VoidFunctor { + void operator()(MarkedBlock* block) { ASSERT(!block->clearNewlyAllocated()); } +}; +#endif + +void MarkedSpace::clearNewlyAllocated() +{ + for (size_t i = 0; i < preciseCount; ++i) { + clearNewlyAllocatedInBlock(m_normalSpace.preciseAllocators[i].takeLastActiveBlock()); + clearNewlyAllocatedInBlock(m_destructorSpace.preciseAllocators[i].takeLastActiveBlock()); + } + + for (size_t i = 0; i < impreciseCount; ++i) { + clearNewlyAllocatedInBlock(m_normalSpace.impreciseAllocators[i].takeLastActiveBlock()); + clearNewlyAllocatedInBlock(m_destructorSpace.impreciseAllocators[i].takeLastActiveBlock()); + } + + // We have to iterate all of the blocks in the large allocators because they are + // canonicalized as they are used up (see MarkedAllocator::tryAllocateHelper) + // which creates the m_newlyAllocated bitmap. + ClearNewlyAllocated functor; + m_normalSpace.largeAllocator.forEachBlock(functor); + m_destructorSpace.largeAllocator.forEachBlock(functor); + +#ifndef NDEBUG + VerifyNewlyAllocated verifyFunctor; + forEachBlock(verifyFunctor); +#endif +} + +#ifndef NDEBUG +struct VerifyMarkedOrRetired : MarkedBlock::VoidFunctor { + void operator()(MarkedBlock* block) + { + switch (block->m_state) { + case MarkedBlock::Marked: + case MarkedBlock::Retired: + return; + default: + RELEASE_ASSERT_NOT_REACHED(); + } + } +}; +#endif + +void MarkedSpace::clearMarks() +{ + if (m_heap->operationInProgress() == EdenCollection) { + for (unsigned i = 0; i < m_blocksWithNewObjects.size(); ++i) + m_blocksWithNewObjects[i]->clearMarks(); + } else + forEachBlock(); + +#ifndef NDEBUG + VerifyMarkedOrRetired verifyFunctor; + forEachBlock(verifyFunctor); +#endif +} + +void MarkedSpace::willStartIterating() +{ + ASSERT(!isIterating()); + stopAllocating(); + m_isIterating = true; +} + +void MarkedSpace::didFinishIterating() +{ + ASSERT(isIterating()); + resumeAllocating(); + m_isIterating = false; +} + } // namespace JSC