]> git.saurik.com Git - apple/javascriptcore.git/blobdiff - heap/MarkedBlock.cpp
JavaScriptCore-7600.1.4.16.1.tar.gz
[apple/javascriptcore.git] / heap / MarkedBlock.cpp
index f1f630ceaa30ff123eb6d6c4d2ac2db154c961da..f4d39fc7dd43be7956b9ff6ee32f35cead38190b 100644 (file)
 #include "config.h"
 #include "MarkedBlock.h"
 
+#include "DelayedReleaseScope.h"
+#include "IncrementalSweeper.h"
 #include "JSCell.h"
-#include "JSObject.h"
-#include "JSZombie.h"
-#include "ScopeChain.h"
+#include "JSDestructibleObject.h"
+#include "JSCInlines.h"
 
 namespace JSC {
 
-MarkedBlock* MarkedBlock::create(JSGlobalData* globalData, size_t cellSize)
+MarkedBlock* MarkedBlock::create(DeadBlock* block, MarkedAllocator* allocator, size_t cellSize, DestructorType destructorType)
 {
-    PageAllocationAligned allocation = PageAllocationAligned::allocate(blockSize, blockSize, OSAllocator::JSGCHeapPages);
-    if (!static_cast<bool>(allocation))
-        CRASH();
-    return new (allocation.base()) MarkedBlock(allocation, globalData, cellSize);
+    ASSERT(reinterpret_cast<size_t>(block) == (reinterpret_cast<size_t>(block) & blockMask));
+    Region* region = block->region();
+    return new (NotNull, block) MarkedBlock(region, allocator, cellSize, destructorType);
 }
 
-void MarkedBlock::destroy(MarkedBlock* block)
+MarkedBlock::MarkedBlock(Region* region, MarkedAllocator* allocator, size_t cellSize, DestructorType destructorType)
+    : HeapBlock<MarkedBlock>(region)
+    , m_atomsPerCell((cellSize + atomSize - 1) / atomSize)
+    , m_endAtom((allocator->cellSize() ? atomsPerBlock : region->blockSize() / atomSize) - m_atomsPerCell + 1)
+    , m_destructorType(destructorType)
+    , m_allocator(allocator)
+    , m_state(New) // All cells start out unmarked.
+    , m_weakSet(allocator->heap()->vm())
 {
-    for (size_t i = block->firstAtom(); i < block->m_endAtom; i += block->m_atomsPerCell)
-        reinterpret_cast<JSCell*>(&block->atoms()[i])->~JSCell();
-    block->m_allocation.deallocate();
+    ASSERT(allocator);
+    HEAP_LOG_BLOCK_STATE_TRANSITION(this);
 }
 
-MarkedBlock::MarkedBlock(const PageAllocationAligned& allocation, JSGlobalData* globalData, size_t cellSize)
-    : m_nextAtom(firstAtom())
-    , m_allocation(allocation)
-    , m_heap(&globalData->heap)
-    , m_prev(0)
-    , m_next(0)
+template<MarkedBlock::DestructorType dtorType>
+inline void MarkedBlock::callDestructor(JSCell* cell)
 {
-    m_atomsPerCell = (cellSize + atomSize - 1) / atomSize;
-    m_endAtom = atomsPerBlock - m_atomsPerCell + 1;
+    // A previous eager sweep may already have run cell's destructor.
+    if (cell->isZapped())
+        return;
 
-    Structure* dummyMarkableCellStructure = globalData->dummyMarkableCellStructure.get();
-    for (size_t i = firstAtom(); i < m_endAtom; i += m_atomsPerCell)
-        new (&atoms()[i]) JSCell(*globalData, dummyMarkableCellStructure, JSCell::CreatingEarlyCell);
+    if (dtorType == MarkedBlock::Normal)
+        jsCast<JSDestructibleObject*>(cell)->classInfo()->methodTable.destroy(cell);
+    else
+        cell->structure(*vm())->classInfo()->methodTable.destroy(cell);
+    cell->zap();
 }
 
-void MarkedBlock::sweep()
+template<MarkedBlock::BlockState blockState, MarkedBlock::SweepMode sweepMode, MarkedBlock::DestructorType dtorType>
+MarkedBlock::FreeList MarkedBlock::specializedSweep()
 {
-    Structure* dummyMarkableCellStructure = m_heap->globalData()->dummyMarkableCellStructure.get();
+    ASSERT(blockState != Allocated && blockState != FreeListed);
+    ASSERT(!(dtorType == MarkedBlock::None && sweepMode == SweepOnly));
 
+    // This produces a free list that is ordered in reverse through the block.
+    // This is fine, since the allocation code makes no assumptions about the
+    // order of the free list.
+    FreeCell* head = 0;
+    size_t count = 0;
     for (size_t i = firstAtom(); i < m_endAtom; i += m_atomsPerCell) {
-        if (m_marks.get(i))
+        if (blockState == Marked && (m_marks.get(i) || (m_newlyAllocated && m_newlyAllocated->get(i))))
             continue;
 
-        JSCell* cell = reinterpret_cast<JSCell*>(&atoms()[i]);
-#if ENABLE(JSC_ZOMBIES)
-        if (cell->structure() && cell->structure() != dummyMarkableCellStructure && !cell->isZombie()) {
-            const ClassInfo* info = cell->classInfo();
-            cell->~JSCell();
-            new (cell) JSZombie(*m_heap->globalData(), info, m_heap->globalData()->zombieStructure.get());
-            m_marks.set(i);
+        JSCell* cell = reinterpret_cast_ptr<JSCell*>(&atoms()[i]);
+
+        if (dtorType != MarkedBlock::None && blockState != New)
+            callDestructor<dtorType>(cell);
+
+        if (sweepMode == SweepToFreeList) {
+            FreeCell* freeCell = reinterpret_cast<FreeCell*>(cell);
+            freeCell->next = head;
+            head = freeCell;
+            ++count;
         }
+    }
+
+    // We only want to discard the newlyAllocated bits if we're creating a FreeList,
+    // otherwise we would lose information on what's currently alive.
+    if (sweepMode == SweepToFreeList && m_newlyAllocated)
+        m_newlyAllocated.clear();
+
+    m_state = ((sweepMode == SweepToFreeList) ? FreeListed : Marked);
+    return FreeList(head, count * cellSize());
+}
+
+MarkedBlock::FreeList MarkedBlock::sweep(SweepMode sweepMode)
+{
+    ASSERT(DelayedReleaseScope::isInEffectFor(heap()->m_objectSpace));
+    HEAP_LOG_BLOCK_STATE_TRANSITION(this);
+
+    m_weakSet.sweep();
+
+    if (sweepMode == SweepOnly && m_destructorType == MarkedBlock::None)
+        return FreeList();
+
+    if (m_destructorType == MarkedBlock::ImmortalStructure)
+        return sweepHelper<MarkedBlock::ImmortalStructure>(sweepMode);
+    if (m_destructorType == MarkedBlock::Normal)
+        return sweepHelper<MarkedBlock::Normal>(sweepMode);
+    return sweepHelper<MarkedBlock::None>(sweepMode);
+}
+
+template<MarkedBlock::DestructorType dtorType>
+MarkedBlock::FreeList MarkedBlock::sweepHelper(SweepMode sweepMode)
+{
+    switch (m_state) {
+    case New:
+        ASSERT(sweepMode == SweepToFreeList);
+        return specializedSweep<New, SweepToFreeList, dtorType>();
+    case FreeListed:
+        // Happens when a block transitions to fully allocated.
+        ASSERT(sweepMode == SweepToFreeList);
+        return FreeList();
+    case Retired:
+    case Allocated:
+        RELEASE_ASSERT_NOT_REACHED();
+        return FreeList();
+    case Marked:
+        return sweepMode == SweepToFreeList
+            ? specializedSweep<Marked, SweepToFreeList, dtorType>()
+            : specializedSweep<Marked, SweepOnly, dtorType>();
+    }
+
+    RELEASE_ASSERT_NOT_REACHED();
+    return FreeList();
+}
+
+class SetNewlyAllocatedFunctor : public MarkedBlock::VoidFunctor {
+public:
+    SetNewlyAllocatedFunctor(MarkedBlock* block)
+        : m_block(block)
+    {
+    }
+
+    void operator()(JSCell* cell)
+    {
+        ASSERT(MarkedBlock::blockFor(cell) == m_block);
+        m_block->setNewlyAllocated(cell);
+    }
+
+private:
+    MarkedBlock* m_block;
+};
+
+void MarkedBlock::stopAllocating(const FreeList& freeList)
+{
+    HEAP_LOG_BLOCK_STATE_TRANSITION(this);
+    FreeCell* head = freeList.head;
+
+    if (m_state == Marked) {
+        // If the block is in the Marked state then we know that:
+        // 1) It was not used for allocation during the previous allocation cycle.
+        // 2) It may have dead objects, and we only know them to be dead by the
+        //    fact that their mark bits are unset.
+        // Hence if the block is Marked we need to leave it Marked.
+        
+        ASSERT(!head);
+        return;
+    }
+   
+    ASSERT(m_state == FreeListed);
+    
+    // Roll back to a coherent state for Heap introspection. Cells newly
+    // allocated from our free list are not currently marked, so we need another
+    // way to tell what's live vs dead. 
+    
+    ASSERT(!m_newlyAllocated);
+    m_newlyAllocated = adoptPtr(new WTF::Bitmap<atomsPerBlock>());
+
+    SetNewlyAllocatedFunctor functor(this);
+    forEachCell(functor);
+
+    FreeCell* next;
+    for (FreeCell* current = head; current; current = next) {
+        next = current->next;
+        reinterpret_cast<JSCell*>(current)->zap();
+        clearNewlyAllocated(current);
+    }
+    
+    m_state = Marked;
+}
+
+void MarkedBlock::clearMarks()
+{
+#if ENABLE(GGC)
+    if (heap()->operationInProgress() == JSC::EdenCollection)
+        this->clearMarksWithCollectionType<EdenCollection>();
+    else
+        this->clearMarksWithCollectionType<FullCollection>();
 #else
-        cell->~JSCell();
-        new (cell) JSCell(*m_heap->globalData(), dummyMarkableCellStructure);
+    this->clearMarksWithCollectionType<FullCollection>();
 #endif
+}
+
+void MarkedBlock::clearRememberedSet()
+{
+    m_rememberedSet.clearAll();
+}
+
+template <HeapOperation collectionType>
+void MarkedBlock::clearMarksWithCollectionType()
+{
+    ASSERT(collectionType == FullCollection || collectionType == EdenCollection);
+    HEAP_LOG_BLOCK_STATE_TRANSITION(this);
+
+    ASSERT(m_state != New && m_state != FreeListed);
+    if (collectionType == FullCollection) {
+        m_marks.clearAll();
+#if ENABLE(GGC)
+        m_rememberedSet.clearAll();
+#endif
+        // This will become true at the end of the mark phase. We set it now to
+        // avoid an extra pass to do so later.
+        m_state = Marked;
+        return;
     }
+
+    ASSERT(collectionType == EdenCollection);
+    // If a block was retired then there's no way an EdenCollection can un-retire it.
+    if (m_state != Retired)
+        m_state = Marked;
+}
+
+void MarkedBlock::lastChanceToFinalize()
+{
+    m_weakSet.lastChanceToFinalize();
+
+    clearNewlyAllocated();
+    clearMarksWithCollectionType<FullCollection>();
+    sweep();
+}
+
+MarkedBlock::FreeList MarkedBlock::resumeAllocating()
+{
+    HEAP_LOG_BLOCK_STATE_TRANSITION(this);
+
+    ASSERT(m_state == Marked);
+
+    if (!m_newlyAllocated) {
+        // We didn't have to create a "newly allocated" bitmap. That means we were already Marked
+        // when we last stopped allocation, so return an empty free list and stay in the Marked state.
+        return FreeList();
+    }
+
+    // Re-create our free list from before stopping allocation. 
+    return sweep(SweepToFreeList);
+}
+
+void MarkedBlock::didRetireBlock(const FreeList& freeList)
+{
+    HEAP_LOG_BLOCK_STATE_TRANSITION(this);
+    FreeCell* head = freeList.head;
+
+    // Currently we don't notify the Heap that we're giving up on this block. 
+    // The Heap might be able to make a better decision about how many bytes should 
+    // be allocated before the next collection if it knew about this retired block.
+    // On the other hand we'll waste at most 10% of our Heap space between FullCollections 
+    // and only under heavy fragmentation.
+
+    // We need to zap the free list when retiring a block so that we don't try to destroy 
+    // previously destroyed objects when we re-sweep the block in the future.
+    FreeCell* next;
+    for (FreeCell* current = head; current; current = next) {
+        next = current->next;
+        reinterpret_cast<JSCell*>(current)->zap();
+    }
+
+    ASSERT(m_state == FreeListed);
+    m_state = Retired;
 }
 
 } // namespace JSC