#include "JSCell.h"
#include "JSObject.h"
-#include "JSZombie.h"
#include "ScopeChain.h"
namespace JSC {
-MarkedBlock* MarkedBlock::create(JSGlobalData* globalData, size_t cellSize)
+MarkedBlock* MarkedBlock::create(Heap* heap, size_t cellSize, bool cellsNeedDestruction)
{
PageAllocationAligned allocation = PageAllocationAligned::allocate(blockSize, blockSize, OSAllocator::JSGCHeapPages);
if (!static_cast<bool>(allocation))
CRASH();
- return new (allocation.base()) MarkedBlock(allocation, globalData, cellSize);
+ return new (NotNull, allocation.base()) MarkedBlock(allocation, heap, cellSize, cellsNeedDestruction);
+}
+
+MarkedBlock* MarkedBlock::recycle(MarkedBlock* block, Heap* heap, size_t cellSize, bool cellsNeedDestruction)
+{
+ return new (NotNull, block) MarkedBlock(block->m_allocation, heap, cellSize, cellsNeedDestruction);
}
void MarkedBlock::destroy(MarkedBlock* block)
{
- for (size_t i = block->firstAtom(); i < block->m_endAtom; i += block->m_atomsPerCell)
- reinterpret_cast<JSCell*>(&block->atoms()[i])->~JSCell();
block->m_allocation.deallocate();
}
-MarkedBlock::MarkedBlock(const PageAllocationAligned& allocation, JSGlobalData* globalData, size_t cellSize)
- : m_nextAtom(firstAtom())
- , m_allocation(allocation)
- , m_heap(&globalData->heap)
- , m_prev(0)
- , m_next(0)
+MarkedBlock::MarkedBlock(PageAllocationAligned& allocation, Heap* heap, size_t cellSize, bool cellsNeedDestruction)
+ : HeapBlock(allocation)
+ , m_atomsPerCell((cellSize + atomSize - 1) / atomSize)
+ , m_endAtom(atomsPerBlock - m_atomsPerCell + 1)
+ , m_cellsNeedDestruction(cellsNeedDestruction)
+ , m_state(New) // All cells start out unmarked.
+ , m_heap(heap)
+{
+ ASSERT(heap);
+ HEAP_LOG_BLOCK_STATE_TRANSITION(this);
+}
+
+inline void MarkedBlock::callDestructor(JSCell* cell)
{
- m_atomsPerCell = (cellSize + atomSize - 1) / atomSize;
- m_endAtom = atomsPerBlock - m_atomsPerCell + 1;
+ // A previous eager sweep may already have run cell's destructor.
+ if (cell->isZapped())
+ return;
+
+#if ENABLE(SIMPLE_HEAP_PROFILING)
+ m_heap->m_destroyedTypeCounts.countVPtr(vptr);
+#endif
+ cell->methodTable()->destroy(cell);
- Structure* dummyMarkableCellStructure = globalData->dummyMarkableCellStructure.get();
- for (size_t i = firstAtom(); i < m_endAtom; i += m_atomsPerCell)
- new (&atoms()[i]) JSCell(*globalData, dummyMarkableCellStructure, JSCell::CreatingEarlyCell);
+ cell->zap();
}
-void MarkedBlock::sweep()
+template<MarkedBlock::BlockState blockState, MarkedBlock::SweepMode sweepMode, bool destructorCallNeeded>
+MarkedBlock::FreeList MarkedBlock::specializedSweep()
{
- Structure* dummyMarkableCellStructure = m_heap->globalData()->dummyMarkableCellStructure.get();
+ ASSERT(blockState != Allocated && blockState != FreeListed);
+ ASSERT(destructorCallNeeded || sweepMode != SweepOnly);
+ // This produces a free list that is ordered in reverse through the block.
+ // This is fine, since the allocation code makes no assumptions about the
+ // order of the free list.
+ FreeCell* head = 0;
+ size_t count = 0;
for (size_t i = firstAtom(); i < m_endAtom; i += m_atomsPerCell) {
- if (m_marks.get(i))
+ if (blockState == Marked && m_marks.get(i))
continue;
- JSCell* cell = reinterpret_cast<JSCell*>(&atoms()[i]);
-#if ENABLE(JSC_ZOMBIES)
- if (cell->structure() && cell->structure() != dummyMarkableCellStructure && !cell->isZombie()) {
- const ClassInfo* info = cell->classInfo();
- cell->~JSCell();
- new (cell) JSZombie(*m_heap->globalData(), info, m_heap->globalData()->zombieStructure.get());
- m_marks.set(i);
+ JSCell* cell = reinterpret_cast_ptr<JSCell*>(&atoms()[i]);
+ if (blockState == Zapped && !cell->isZapped())
+ continue;
+
+ if (destructorCallNeeded && blockState != New)
+ callDestructor(cell);
+
+ if (sweepMode == SweepToFreeList) {
+ FreeCell* freeCell = reinterpret_cast<FreeCell*>(cell);
+ freeCell->next = head;
+ head = freeCell;
+ ++count;
}
-#else
- cell->~JSCell();
- new (cell) JSCell(*m_heap->globalData(), dummyMarkableCellStructure);
-#endif
}
+
+ m_state = ((sweepMode == SweepToFreeList) ? FreeListed : Zapped);
+ return FreeList(head, count * cellSize());
+}
+
+MarkedBlock::FreeList MarkedBlock::sweep(SweepMode sweepMode)
+{
+ HEAP_LOG_BLOCK_STATE_TRANSITION(this);
+
+ if (sweepMode == SweepOnly && !m_cellsNeedDestruction)
+ return FreeList();
+
+ if (m_cellsNeedDestruction)
+ return sweepHelper<true>(sweepMode);
+ return sweepHelper<false>(sweepMode);
+}
+
+template<bool destructorCallNeeded>
+MarkedBlock::FreeList MarkedBlock::sweepHelper(SweepMode sweepMode)
+{
+ switch (m_state) {
+ case New:
+ ASSERT(sweepMode == SweepToFreeList);
+ return specializedSweep<New, SweepToFreeList, destructorCallNeeded>();
+ case FreeListed:
+ // Happens when a block transitions to fully allocated.
+ ASSERT(sweepMode == SweepToFreeList);
+ return FreeList();
+ case Allocated:
+ ASSERT_NOT_REACHED();
+ return FreeList();
+ case Marked:
+ return sweepMode == SweepToFreeList
+ ? specializedSweep<Marked, SweepToFreeList, destructorCallNeeded>()
+ : specializedSweep<Marked, SweepOnly, destructorCallNeeded>();
+ case Zapped:
+ return sweepMode == SweepToFreeList
+ ? specializedSweep<Zapped, SweepToFreeList, destructorCallNeeded>()
+ : specializedSweep<Zapped, SweepOnly, destructorCallNeeded>();
+ }
+
+ ASSERT_NOT_REACHED();
+ return FreeList();
+}
+
+void MarkedBlock::zapFreeList(const FreeList& freeList)
+{
+ HEAP_LOG_BLOCK_STATE_TRANSITION(this);
+ FreeCell* head = freeList.head;
+
+ if (m_state == Marked) {
+ // If the block is in the Marked state then we know that:
+ // 1) It was not used for allocation during the previous allocation cycle.
+ // 2) It may have dead objects, and we only know them to be dead by the
+ // fact that their mark bits are unset.
+ // Hence if the block is Marked we need to leave it Marked.
+
+ ASSERT(!head);
+
+ return;
+ }
+
+ if (m_state == Zapped) {
+ // If the block is in the Zapped state then we know that someone already
+ // zapped it for us. This could not have happened during a GC, but might
+ // be the result of someone having done a GC scan to perform some operation
+ // over all live objects (or all live blocks). It also means that somebody
+ // had allocated in this block since the last GC, swept all dead objects
+ // onto the free list, left the block in the FreeListed state, then the heap
+ // scan happened, and canonicalized the block, leading to all dead objects
+ // being zapped. Therefore, it is safe for us to simply do nothing, since
+ // dead objects will have 0 in their vtables and live objects will have
+ // non-zero vtables, which is consistent with the block being zapped.
+
+ ASSERT(!head);
+
+ return;
+ }
+
+ ASSERT(m_state == FreeListed);
+
+ // Roll back to a coherent state for Heap introspection. Cells newly
+ // allocated from our free list are not currently marked, so we need another
+ // way to tell what's live vs dead. We use zapping for that.
+
+ FreeCell* next;
+ for (FreeCell* current = head; current; current = next) {
+ next = current->next;
+ reinterpret_cast<JSCell*>(current)->zap();
+ }
+
+ m_state = Zapped;
}
} // namespace JSC