#include "config.h"
#include "MarkedBlock.h"
+#include "IncrementalSweeper.h"
#include "JSCell.h"
-#include "JSObject.h"
-#include "JSZombie.h"
-#include "ScopeChain.h"
+#include "JSDestructibleObject.h"
+#include "JSCInlines.h"
namespace JSC {
-MarkedBlock* MarkedBlock::create(JSGlobalData* globalData, size_t cellSize)
+MarkedBlock* MarkedBlock::create(MarkedAllocator* allocator, size_t capacity, size_t cellSize, bool needsDestruction)
{
- PageAllocationAligned allocation = PageAllocationAligned::allocate(blockSize, blockSize, OSAllocator::JSGCHeapPages);
- if (!static_cast<bool>(allocation))
- CRASH();
- return new (allocation.base()) MarkedBlock(allocation, globalData, cellSize);
+ return new (NotNull, fastAlignedMalloc(blockSize, capacity)) MarkedBlock(allocator, capacity, cellSize, needsDestruction);
}
void MarkedBlock::destroy(MarkedBlock* block)
{
- for (size_t i = block->firstAtom(); i < block->m_endAtom; i += block->m_atomsPerCell)
- reinterpret_cast<JSCell*>(&block->atoms()[i])->~JSCell();
- block->m_allocation.deallocate();
+ block->~MarkedBlock();
+ fastAlignedFree(block);
}
-MarkedBlock::MarkedBlock(const PageAllocationAligned& allocation, JSGlobalData* globalData, size_t cellSize)
- : m_nextAtom(firstAtom())
- , m_allocation(allocation)
- , m_heap(&globalData->heap)
- , m_prev(0)
- , m_next(0)
+MarkedBlock::MarkedBlock(MarkedAllocator* allocator, size_t capacity, size_t cellSize, bool needsDestruction)
+ : DoublyLinkedListNode<MarkedBlock>()
+ , m_atomsPerCell((cellSize + atomSize - 1) / atomSize)
+ , m_endAtom((allocator->cellSize() ? atomsPerBlock - m_atomsPerCell : firstAtom()) + 1)
+ , m_capacity(capacity)
+ , m_needsDestruction(needsDestruction)
+ , m_allocator(allocator)
+ , m_state(New) // All cells start out unmarked.
+ , m_weakSet(allocator->heap()->vm(), *this)
{
- m_atomsPerCell = (cellSize + atomSize - 1) / atomSize;
- m_endAtom = atomsPerBlock - m_atomsPerCell + 1;
+ ASSERT(allocator);
+ HEAP_LOG_BLOCK_STATE_TRANSITION(this);
+}
+
+inline void MarkedBlock::callDestructor(JSCell* cell)
+{
+ // A previous eager sweep may already have run cell's destructor.
+ if (cell->isZapped())
+ return;
- Structure* dummyMarkableCellStructure = globalData->dummyMarkableCellStructure.get();
- for (size_t i = firstAtom(); i < m_endAtom; i += m_atomsPerCell)
- new (&atoms()[i]) JSCell(*globalData, dummyMarkableCellStructure, JSCell::CreatingEarlyCell);
+ ASSERT(cell->structureID());
+ if (cell->inlineTypeFlags() & StructureIsImmortal)
+ cell->structure(*vm())->classInfo()->methodTable.destroy(cell);
+ else
+ jsCast<JSDestructibleObject*>(cell)->classInfo()->methodTable.destroy(cell);
+ cell->zap();
}
-void MarkedBlock::sweep()
+template<MarkedBlock::BlockState blockState, MarkedBlock::SweepMode sweepMode, bool callDestructors>
+MarkedBlock::FreeList MarkedBlock::specializedSweep()
{
- Structure* dummyMarkableCellStructure = m_heap->globalData()->dummyMarkableCellStructure.get();
+ ASSERT(blockState != Allocated && blockState != FreeListed);
+ ASSERT(!(!callDestructors && sweepMode == SweepOnly));
+ SamplingRegion samplingRegion((!callDestructors && blockState != New) ? "Calling destructors" : "sweeping");
+
+ // This produces a free list that is ordered in reverse through the block.
+ // This is fine, since the allocation code makes no assumptions about the
+ // order of the free list.
+ FreeCell* head = 0;
+ size_t count = 0;
for (size_t i = firstAtom(); i < m_endAtom; i += m_atomsPerCell) {
- if (m_marks.get(i))
+ if (blockState == Marked && (m_marks.get(i) || (m_newlyAllocated && m_newlyAllocated->get(i))))
continue;
- JSCell* cell = reinterpret_cast<JSCell*>(&atoms()[i]);
-#if ENABLE(JSC_ZOMBIES)
- if (cell->structure() && cell->structure() != dummyMarkableCellStructure && !cell->isZombie()) {
- const ClassInfo* info = cell->classInfo();
- cell->~JSCell();
- new (cell) JSZombie(*m_heap->globalData(), info, m_heap->globalData()->zombieStructure.get());
- m_marks.set(i);
+ JSCell* cell = reinterpret_cast_ptr<JSCell*>(&atoms()[i]);
+
+ if (callDestructors && blockState != New)
+ callDestructor(cell);
+
+ if (sweepMode == SweepToFreeList) {
+ FreeCell* freeCell = reinterpret_cast<FreeCell*>(cell);
+ freeCell->next = head;
+ head = freeCell;
+ ++count;
}
+ }
+
+ // We only want to discard the newlyAllocated bits if we're creating a FreeList,
+ // otherwise we would lose information on what's currently alive.
+ if (sweepMode == SweepToFreeList && m_newlyAllocated)
+ m_newlyAllocated = nullptr;
+
+ m_state = ((sweepMode == SweepToFreeList) ? FreeListed : Marked);
+ return FreeList(head, count * cellSize());
+}
+
+MarkedBlock::FreeList MarkedBlock::sweep(SweepMode sweepMode)
+{
+ HEAP_LOG_BLOCK_STATE_TRANSITION(this);
+
+ m_weakSet.sweep();
+
+ if (sweepMode == SweepOnly && !m_needsDestruction)
+ return FreeList();
+
+ if (m_needsDestruction)
+ return sweepHelper<true>(sweepMode);
+ return sweepHelper<false>(sweepMode);
+}
+
+template<bool callDestructors>
+MarkedBlock::FreeList MarkedBlock::sweepHelper(SweepMode sweepMode)
+{
+ switch (m_state) {
+ case New:
+ ASSERT(sweepMode == SweepToFreeList);
+ return specializedSweep<New, SweepToFreeList, callDestructors>();
+ case FreeListed:
+ // Happens when a block transitions to fully allocated.
+ ASSERT(sweepMode == SweepToFreeList);
+ return FreeList();
+ case Retired:
+ case Allocated:
+ RELEASE_ASSERT_NOT_REACHED();
+ return FreeList();
+ case Marked:
+ return sweepMode == SweepToFreeList
+ ? specializedSweep<Marked, SweepToFreeList, callDestructors>()
+ : specializedSweep<Marked, SweepOnly, callDestructors>();
+ }
+
+ RELEASE_ASSERT_NOT_REACHED();
+ return FreeList();
+}
+
+class SetNewlyAllocatedFunctor : public MarkedBlock::VoidFunctor {
+public:
+ SetNewlyAllocatedFunctor(MarkedBlock* block)
+ : m_block(block)
+ {
+ }
+
+ IterationStatus operator()(JSCell* cell)
+ {
+ ASSERT(MarkedBlock::blockFor(cell) == m_block);
+ m_block->setNewlyAllocated(cell);
+ return IterationStatus::Continue;
+ }
+
+private:
+ MarkedBlock* m_block;
+};
+
+void MarkedBlock::stopAllocating(const FreeList& freeList)
+{
+ HEAP_LOG_BLOCK_STATE_TRANSITION(this);
+ FreeCell* head = freeList.head;
+
+ if (m_state == Marked) {
+ // If the block is in the Marked state then we know that:
+ // 1) It was not used for allocation during the previous allocation cycle.
+ // 2) It may have dead objects, and we only know them to be dead by the
+ // fact that their mark bits are unset.
+ // Hence if the block is Marked we need to leave it Marked.
+
+ ASSERT(!head);
+ return;
+ }
+
+ ASSERT(m_state == FreeListed);
+
+ // Roll back to a coherent state for Heap introspection. Cells newly
+ // allocated from our free list are not currently marked, so we need another
+ // way to tell what's live vs dead.
+
+ ASSERT(!m_newlyAllocated);
+ m_newlyAllocated = std::make_unique<WTF::Bitmap<atomsPerBlock>>();
+
+ SetNewlyAllocatedFunctor functor(this);
+ forEachCell(functor);
+
+ FreeCell* next;
+ for (FreeCell* current = head; current; current = next) {
+ next = current->next;
+ reinterpret_cast<JSCell*>(current)->zap();
+ clearNewlyAllocated(current);
+ }
+
+ m_state = Marked;
+}
+
+void MarkedBlock::clearMarks()
+{
+#if ENABLE(GGC)
+ if (heap()->operationInProgress() == JSC::EdenCollection)
+ this->clearMarksWithCollectionType<EdenCollection>();
+ else
+ this->clearMarksWithCollectionType<FullCollection>();
#else
- cell->~JSCell();
- new (cell) JSCell(*m_heap->globalData(), dummyMarkableCellStructure);
+ this->clearMarksWithCollectionType<FullCollection>();
#endif
+}
+
+template <HeapOperation collectionType>
+void MarkedBlock::clearMarksWithCollectionType()
+{
+ ASSERT(collectionType == FullCollection || collectionType == EdenCollection);
+ HEAP_LOG_BLOCK_STATE_TRANSITION(this);
+
+ ASSERT(m_state != New && m_state != FreeListed);
+ if (collectionType == FullCollection) {
+ m_marks.clearAll();
+ // This will become true at the end of the mark phase. We set it now to
+ // avoid an extra pass to do so later.
+ m_state = Marked;
+ return;
}
+
+ ASSERT(collectionType == EdenCollection);
+ // If a block was retired then there's no way an EdenCollection can un-retire it.
+ if (m_state != Retired)
+ m_state = Marked;
+}
+
+void MarkedBlock::lastChanceToFinalize()
+{
+ m_weakSet.lastChanceToFinalize();
+
+ clearNewlyAllocated();
+ clearMarksWithCollectionType<FullCollection>();
+ sweep();
+}
+
+MarkedBlock::FreeList MarkedBlock::resumeAllocating()
+{
+ HEAP_LOG_BLOCK_STATE_TRANSITION(this);
+
+ ASSERT(m_state == Marked);
+
+ if (!m_newlyAllocated) {
+ // We didn't have to create a "newly allocated" bitmap. That means we were already Marked
+ // when we last stopped allocation, so return an empty free list and stay in the Marked state.
+ return FreeList();
+ }
+
+ // Re-create our free list from before stopping allocation.
+ return sweep(SweepToFreeList);
+}
+
+void MarkedBlock::didRetireBlock(const FreeList& freeList)
+{
+ HEAP_LOG_BLOCK_STATE_TRANSITION(this);
+ FreeCell* head = freeList.head;
+
+ // Currently we don't notify the Heap that we're giving up on this block.
+ // The Heap might be able to make a better decision about how many bytes should
+ // be allocated before the next collection if it knew about this retired block.
+ // On the other hand we'll waste at most 10% of our Heap space between FullCollections
+ // and only under heavy fragmentation.
+
+ // We need to zap the free list when retiring a block so that we don't try to destroy
+ // previously destroyed objects when we re-sweep the block in the future.
+ FreeCell* next;
+ for (FreeCell* current = head; current; current = next) {
+ next = current->next;
+ reinterpret_cast<JSCell*>(current)->zap();
+ }
+
+ ASSERT(m_state == FreeListed);
+ m_state = Retired;
}
} // namespace JSC