#include "config.h"
#include "MarkedBlock.h"
+#include "IncrementalSweeper.h"
#include "JSCell.h"
-#include "JSObject.h"
-#include "ScopeChain.h"
+#include "JSDestructibleObject.h"
+#include "JSCInlines.h"
namespace JSC {
-MarkedBlock* MarkedBlock::create(Heap* heap, size_t cellSize, bool cellsNeedDestruction)
+MarkedBlock* MarkedBlock::create(MarkedAllocator* allocator, size_t capacity, size_t cellSize, bool needsDestruction)
{
- PageAllocationAligned allocation = PageAllocationAligned::allocate(blockSize, blockSize, OSAllocator::JSGCHeapPages);
- if (!static_cast<bool>(allocation))
- CRASH();
- return new (NotNull, allocation.base()) MarkedBlock(allocation, heap, cellSize, cellsNeedDestruction);
-}
-
-MarkedBlock* MarkedBlock::recycle(MarkedBlock* block, Heap* heap, size_t cellSize, bool cellsNeedDestruction)
-{
- return new (NotNull, block) MarkedBlock(block->m_allocation, heap, cellSize, cellsNeedDestruction);
+ return new (NotNull, fastAlignedMalloc(blockSize, capacity)) MarkedBlock(allocator, capacity, cellSize, needsDestruction);
}
void MarkedBlock::destroy(MarkedBlock* block)
{
- block->m_allocation.deallocate();
+ block->~MarkedBlock();
+ fastAlignedFree(block);
}
-MarkedBlock::MarkedBlock(PageAllocationAligned& allocation, Heap* heap, size_t cellSize, bool cellsNeedDestruction)
- : HeapBlock(allocation)
+MarkedBlock::MarkedBlock(MarkedAllocator* allocator, size_t capacity, size_t cellSize, bool needsDestruction)
+ : DoublyLinkedListNode<MarkedBlock>()
, m_atomsPerCell((cellSize + atomSize - 1) / atomSize)
- , m_endAtom(atomsPerBlock - m_atomsPerCell + 1)
- , m_cellsNeedDestruction(cellsNeedDestruction)
+ , m_endAtom((allocator->cellSize() ? atomsPerBlock - m_atomsPerCell : firstAtom()) + 1)
+ , m_capacity(capacity)
+ , m_needsDestruction(needsDestruction)
+ , m_allocator(allocator)
, m_state(New) // All cells start out unmarked.
- , m_heap(heap)
+ , m_weakSet(allocator->heap()->vm(), *this)
{
- ASSERT(heap);
+ ASSERT(allocator);
HEAP_LOG_BLOCK_STATE_TRANSITION(this);
}
if (cell->isZapped())
return;
-#if ENABLE(SIMPLE_HEAP_PROFILING)
- m_heap->m_destroyedTypeCounts.countVPtr(vptr);
-#endif
- cell->methodTable()->destroy(cell);
-
+ ASSERT(cell->structureID());
+ if (cell->inlineTypeFlags() & StructureIsImmortal)
+ cell->structure(*vm())->classInfo()->methodTable.destroy(cell);
+ else
+ jsCast<JSDestructibleObject*>(cell)->classInfo()->methodTable.destroy(cell);
cell->zap();
}
-template<MarkedBlock::BlockState blockState, MarkedBlock::SweepMode sweepMode, bool destructorCallNeeded>
+template<MarkedBlock::BlockState blockState, MarkedBlock::SweepMode sweepMode, bool callDestructors>
MarkedBlock::FreeList MarkedBlock::specializedSweep()
{
ASSERT(blockState != Allocated && blockState != FreeListed);
- ASSERT(destructorCallNeeded || sweepMode != SweepOnly);
+ ASSERT(!(!callDestructors && sweepMode == SweepOnly));
+ SamplingRegion samplingRegion((!callDestructors && blockState != New) ? "Calling destructors" : "sweeping");
+
// This produces a free list that is ordered in reverse through the block.
// This is fine, since the allocation code makes no assumptions about the
// order of the free list.
FreeCell* head = 0;
size_t count = 0;
for (size_t i = firstAtom(); i < m_endAtom; i += m_atomsPerCell) {
- if (blockState == Marked && m_marks.get(i))
+ if (blockState == Marked && (m_marks.get(i) || (m_newlyAllocated && m_newlyAllocated->get(i))))
continue;
JSCell* cell = reinterpret_cast_ptr<JSCell*>(&atoms()[i]);
- if (blockState == Zapped && !cell->isZapped())
- continue;
- if (destructorCallNeeded && blockState != New)
+ if (callDestructors && blockState != New)
callDestructor(cell);
if (sweepMode == SweepToFreeList) {
}
}
- m_state = ((sweepMode == SweepToFreeList) ? FreeListed : Zapped);
+ // We only want to discard the newlyAllocated bits if we're creating a FreeList,
+ // otherwise we would lose information on what's currently alive.
+ if (sweepMode == SweepToFreeList && m_newlyAllocated)
+ m_newlyAllocated = nullptr;
+
+ m_state = ((sweepMode == SweepToFreeList) ? FreeListed : Marked);
return FreeList(head, count * cellSize());
}
{
HEAP_LOG_BLOCK_STATE_TRANSITION(this);
- if (sweepMode == SweepOnly && !m_cellsNeedDestruction)
+ m_weakSet.sweep();
+
+ if (sweepMode == SweepOnly && !m_needsDestruction)
return FreeList();
- if (m_cellsNeedDestruction)
+ if (m_needsDestruction)
return sweepHelper<true>(sweepMode);
return sweepHelper<false>(sweepMode);
}
-template<bool destructorCallNeeded>
+template<bool callDestructors>
MarkedBlock::FreeList MarkedBlock::sweepHelper(SweepMode sweepMode)
{
switch (m_state) {
case New:
ASSERT(sweepMode == SweepToFreeList);
- return specializedSweep<New, SweepToFreeList, destructorCallNeeded>();
+ return specializedSweep<New, SweepToFreeList, callDestructors>();
case FreeListed:
// Happens when a block transitions to fully allocated.
ASSERT(sweepMode == SweepToFreeList);
return FreeList();
+ case Retired:
case Allocated:
- ASSERT_NOT_REACHED();
+ RELEASE_ASSERT_NOT_REACHED();
return FreeList();
case Marked:
return sweepMode == SweepToFreeList
- ? specializedSweep<Marked, SweepToFreeList, destructorCallNeeded>()
- : specializedSweep<Marked, SweepOnly, destructorCallNeeded>();
- case Zapped:
- return sweepMode == SweepToFreeList
- ? specializedSweep<Zapped, SweepToFreeList, destructorCallNeeded>()
- : specializedSweep<Zapped, SweepOnly, destructorCallNeeded>();
+ ? specializedSweep<Marked, SweepToFreeList, callDestructors>()
+ : specializedSweep<Marked, SweepOnly, callDestructors>();
}
- ASSERT_NOT_REACHED();
+ RELEASE_ASSERT_NOT_REACHED();
return FreeList();
}
-void MarkedBlock::zapFreeList(const FreeList& freeList)
+class SetNewlyAllocatedFunctor : public MarkedBlock::VoidFunctor {
+public:
+ SetNewlyAllocatedFunctor(MarkedBlock* block)
+ : m_block(block)
+ {
+ }
+
+ IterationStatus operator()(JSCell* cell)
+ {
+ ASSERT(MarkedBlock::blockFor(cell) == m_block);
+ m_block->setNewlyAllocated(cell);
+ return IterationStatus::Continue;
+ }
+
+private:
+ MarkedBlock* m_block;
+};
+
+void MarkedBlock::stopAllocating(const FreeList& freeList)
{
HEAP_LOG_BLOCK_STATE_TRANSITION(this);
FreeCell* head = freeList.head;
// Hence if the block is Marked we need to leave it Marked.
ASSERT(!head);
-
- return;
- }
-
- if (m_state == Zapped) {
- // If the block is in the Zapped state then we know that someone already
- // zapped it for us. This could not have happened during a GC, but might
- // be the result of someone having done a GC scan to perform some operation
- // over all live objects (or all live blocks). It also means that somebody
- // had allocated in this block since the last GC, swept all dead objects
- // onto the free list, left the block in the FreeListed state, then the heap
- // scan happened, and canonicalized the block, leading to all dead objects
- // being zapped. Therefore, it is safe for us to simply do nothing, since
- // dead objects will have 0 in their vtables and live objects will have
- // non-zero vtables, which is consistent with the block being zapped.
-
- ASSERT(!head);
-
return;
}
-
+
ASSERT(m_state == FreeListed);
// Roll back to a coherent state for Heap introspection. Cells newly
// allocated from our free list are not currently marked, so we need another
- // way to tell what's live vs dead. We use zapping for that.
+ // way to tell what's live vs dead.
+ ASSERT(!m_newlyAllocated);
+ m_newlyAllocated = std::make_unique<WTF::Bitmap<atomsPerBlock>>();
+
+ SetNewlyAllocatedFunctor functor(this);
+ forEachCell(functor);
+
FreeCell* next;
for (FreeCell* current = head; current; current = next) {
next = current->next;
reinterpret_cast<JSCell*>(current)->zap();
+ clearNewlyAllocated(current);
}
- m_state = Zapped;
+ m_state = Marked;
+}
+
+void MarkedBlock::clearMarks()
+{
+#if ENABLE(GGC)
+ if (heap()->operationInProgress() == JSC::EdenCollection)
+ this->clearMarksWithCollectionType<EdenCollection>();
+ else
+ this->clearMarksWithCollectionType<FullCollection>();
+#else
+ this->clearMarksWithCollectionType<FullCollection>();
+#endif
+}
+
+template <HeapOperation collectionType>
+void MarkedBlock::clearMarksWithCollectionType()
+{
+ ASSERT(collectionType == FullCollection || collectionType == EdenCollection);
+ HEAP_LOG_BLOCK_STATE_TRANSITION(this);
+
+ ASSERT(m_state != New && m_state != FreeListed);
+ if (collectionType == FullCollection) {
+ m_marks.clearAll();
+ // This will become true at the end of the mark phase. We set it now to
+ // avoid an extra pass to do so later.
+ m_state = Marked;
+ return;
+ }
+
+ ASSERT(collectionType == EdenCollection);
+ // If a block was retired then there's no way an EdenCollection can un-retire it.
+ if (m_state != Retired)
+ m_state = Marked;
+}
+
+void MarkedBlock::lastChanceToFinalize()
+{
+ m_weakSet.lastChanceToFinalize();
+
+ clearNewlyAllocated();
+ clearMarksWithCollectionType<FullCollection>();
+ sweep();
+}
+
+MarkedBlock::FreeList MarkedBlock::resumeAllocating()
+{
+ HEAP_LOG_BLOCK_STATE_TRANSITION(this);
+
+ ASSERT(m_state == Marked);
+
+ if (!m_newlyAllocated) {
+ // We didn't have to create a "newly allocated" bitmap. That means we were already Marked
+ // when we last stopped allocation, so return an empty free list and stay in the Marked state.
+ return FreeList();
+ }
+
+ // Re-create our free list from before stopping allocation.
+ return sweep(SweepToFreeList);
+}
+
+void MarkedBlock::didRetireBlock(const FreeList& freeList)
+{
+ HEAP_LOG_BLOCK_STATE_TRANSITION(this);
+ FreeCell* head = freeList.head;
+
+ // Currently we don't notify the Heap that we're giving up on this block.
+ // The Heap might be able to make a better decision about how many bytes should
+ // be allocated before the next collection if it knew about this retired block.
+ // On the other hand we'll waste at most 10% of our Heap space between FullCollections
+ // and only under heavy fragmentation.
+
+ // We need to zap the free list when retiring a block so that we don't try to destroy
+ // previously destroyed objects when we re-sweep the block in the future.
+ FreeCell* next;
+ for (FreeCell* current = head; current; current = next) {
+ next = current->next;
+ reinterpret_cast<JSCell*>(current)->zap();
+ }
+
+ ASSERT(m_state == FreeListed);
+ m_state = Retired;
}
} // namespace JSC