]> git.saurik.com Git - apple/javascriptcore.git/blobdiff - heap/Heap.cpp
JavaScriptCore-7600.1.4.9.tar.gz
[apple/javascriptcore.git] / heap / Heap.cpp
index e93bfb44fa971aa389503bad691bdd9163647f4f..08a09d15a9128301c122be3c3db61121c3b38ded 100644 (file)
@@ -1,5 +1,5 @@
 /*
- *  Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2011 Apple Inc. All rights reserved.
+ *  Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2011, 2013, 2014 Apple Inc. All rights reserved.
  *  Copyright (C) 2007 Eric Seidel <eric@webkit.org>
  *
  *  This library is free software; you can redistribute it and/or
 
 #include "CodeBlock.h"
 #include "ConservativeRoots.h"
+#include "CopiedSpace.h"
+#include "CopiedSpaceInlines.h"
+#include "CopyVisitorInlines.h"
+#include "DFGWorklist.h"
+#include "DelayedReleaseScope.h"
+#include "EdenGCActivityCallback.h"
+#include "FullGCActivityCallback.h"
 #include "GCActivityCallback.h"
+#include "GCIncomingRefCountedSetInlines.h"
+#include "HeapIterationScope.h"
+#include "HeapRootVisitor.h"
+#include "HeapStatistics.h"
+#include "IncrementalSweeper.h"
 #include "Interpreter.h"
-#include "JSGlobalData.h"
 #include "JSGlobalObject.h"
 #include "JSLock.h"
 #include "JSONObject.h"
+#include "JSCInlines.h"
+#include "JSVirtualMachineInternal.h"
+#include "RecursiveAllocationScope.h"
 #include "Tracing.h"
+#include "UnlinkedCodeBlock.h"
+#include "VM.h"
+#include "WeakSetInlines.h"
 #include <algorithm>
-
-#define COLLECT_ON_EVERY_SLOW_ALLOCATION 0
+#include <wtf/RAMSize.h>
+#include <wtf/CurrentTime.h>
+#include <wtf/ProcessID.h>
 
 using namespace std;
+using namespace JSC;
 
 namespace JSC {
 
-const size_t minBytesPerCycle = 512 * 1024;
+namespace { 
+
+static const size_t largeHeapSize = 32 * MB; // About 1.5X the average webpage.
+static const size_t smallHeapSize = 1 * MB; // Matches the FastMalloc per-thread cache.
+
+#define ENABLE_GC_LOGGING 0
+
+#if ENABLE(GC_LOGGING)
+#if COMPILER(CLANG)
+#define DEFINE_GC_LOGGING_GLOBAL(type, name, arguments) \
+_Pragma("clang diagnostic push") \
+_Pragma("clang diagnostic ignored \"-Wglobal-constructors\"") \
+_Pragma("clang diagnostic ignored \"-Wexit-time-destructors\"") \
+static type name arguments; \
+_Pragma("clang diagnostic pop")
+#else
+#define DEFINE_GC_LOGGING_GLOBAL(type, name, arguments) \
+static type name arguments;
+#endif // COMPILER(CLANG)
+
+struct GCTimer {
+    GCTimer(const char* name)
+        : m_name(name)
+    {
+    }
+    ~GCTimer()
+    {
+        logData(m_allCollectionData, "(All)");
+        logData(m_edenCollectionData, "(Eden)");
+        logData(m_fullCollectionData, "(Full)");
+    }
 
-Heap::Heap(JSGlobalData* globalData)
-    : m_operationInProgress(NoOperation)
-    , m_markedSpace(globalData)
-    , m_markListSet(0)
-    , m_activityCallback(DefaultGCActivityCallback::create(this))
-    , m_globalData(globalData)
-    , m_machineThreads(this)
-    , m_markStack(globalData->jsArrayVPtr)
-    , m_handleHeap(globalData)
-    , m_extraCost(0)
+    struct TimeRecord {
+        TimeRecord()
+            : m_time(0)
+            , m_min(std::numeric_limits<double>::infinity())
+            , m_max(0)
+            , m_count(0)
+        {
+        }
+
+        double m_time;
+        double m_min;
+        double m_max;
+        size_t m_count;
+    };
+
+    void logData(const TimeRecord& data, const char* extra)
+    {
+        dataLogF("[%d] %s %s: %.2lfms (avg. %.2lf, min. %.2lf, max. %.2lf, count %lu)\n", 
+            getCurrentProcessID(),
+            m_name, extra, 
+            data.m_time * 1000, 
+            data.m_time * 1000 / data.m_count, 
+            data.m_min * 1000, 
+            data.m_max * 1000,
+            data.m_count);
+    }
+
+    void updateData(TimeRecord& data, double duration)
+    {
+        if (duration < data.m_min)
+            data.m_min = duration;
+        if (duration > data.m_max)
+            data.m_max = duration;
+        data.m_count++;
+        data.m_time += duration;
+    }
+
+    void didFinishPhase(HeapOperation collectionType, double duration)
+    {
+        TimeRecord& data = collectionType == EdenCollection ? m_edenCollectionData : m_fullCollectionData;
+        updateData(data, duration);
+        updateData(m_allCollectionData, duration);
+    }
+
+    TimeRecord m_allCollectionData;
+    TimeRecord m_fullCollectionData;
+    TimeRecord m_edenCollectionData;
+    const char* m_name;
+};
+
+struct GCTimerScope {
+    GCTimerScope(GCTimer* timer, HeapOperation collectionType)
+        : m_timer(timer)
+        , m_start(WTF::monotonicallyIncreasingTime())
+        , m_collectionType(collectionType)
+    {
+    }
+    ~GCTimerScope()
+    {
+        double delta = WTF::monotonicallyIncreasingTime() - m_start;
+        m_timer->didFinishPhase(m_collectionType, delta);
+    }
+    GCTimer* m_timer;
+    double m_start;
+    HeapOperation m_collectionType;
+};
+
+struct GCCounter {
+    GCCounter(const char* name)
+        : m_name(name)
+        , m_count(0)
+        , m_total(0)
+        , m_min(10000000)
+        , m_max(0)
+    {
+    }
+    
+    void count(size_t amount)
+    {
+        m_count++;
+        m_total += amount;
+        if (amount < m_min)
+            m_min = amount;
+        if (amount > m_max)
+            m_max = amount;
+    }
+    ~GCCounter()
+    {
+        dataLogF("[%d] %s: %zu values (avg. %zu, min. %zu, max. %zu)\n", getCurrentProcessID(), m_name, m_total, m_total / m_count, m_min, m_max);
+    }
+    const char* m_name;
+    size_t m_count;
+    size_t m_total;
+    size_t m_min;
+    size_t m_max;
+};
+
+#define GCPHASE(name) DEFINE_GC_LOGGING_GLOBAL(GCTimer, name##Timer, (#name)); GCTimerScope name##TimerScope(&name##Timer, m_operationInProgress)
+#define GCCOUNTER(name, value) do { DEFINE_GC_LOGGING_GLOBAL(GCCounter, name##Counter, (#name)); name##Counter.count(value); } while (false)
+    
+#else
+
+#define GCPHASE(name) do { } while (false)
+#define GCCOUNTER(name, value) do { } while (false)
+#endif
+
+static inline size_t minHeapSize(HeapType heapType, size_t ramSize)
 {
-    m_markedSpace.setHighWaterMark(minBytesPerCycle);
-    (*m_activityCallback)();
+    if (heapType == LargeHeap)
+        return min(largeHeapSize, ramSize / 4);
+    return smallHeapSize;
 }
 
-Heap::~Heap()
+static inline size_t proportionalHeapSize(size_t heapSize, size_t ramSize)
 {
-    // The destroy function must already have been called, so assert this.
-    ASSERT(!m_globalData);
+    // Try to stay under 1/2 RAM size to leave room for the DOM, rendering, networking, etc.
+    if (heapSize < ramSize / 4)
+        return 2 * heapSize;
+    if (heapSize < ramSize / 2)
+        return 1.5 * heapSize;
+    return 1.25 * heapSize;
 }
 
-void Heap::destroy()
+static inline bool isValidSharedInstanceThreadState(VM* vm)
 {
-    JSLock lock(SilenceAssertionsOnly);
+    return vm->currentThreadIsHoldingAPILock();
+}
 
-    if (!m_globalData)
-        return;
+static inline bool isValidThreadState(VM* vm)
+{
+    if (vm->atomicStringTable() != wtfThreadData().atomicStringTable())
+        return false;
 
-    ASSERT(!m_globalData->dynamicGlobalObject);
-    ASSERT(m_operationInProgress == NoOperation);
-    
-    // The global object is not GC protected at this point, so sweeping may delete it
-    // (and thus the global data) before other objects that may use the global data.
-    RefPtr<JSGlobalData> protect(m_globalData);
+    if (vm->isSharedInstance() && !isValidSharedInstanceThreadState(vm))
+        return false;
 
-#if ENABLE(JIT)
-    m_globalData->jitStubs->clearHostFunctionStubs();
+    return true;
+}
+
+struct MarkObject : public MarkedBlock::VoidFunctor {
+    void operator()(JSCell* cell)
+    {
+        if (cell->isZapped())
+            return;
+        Heap::heap(cell)->setMarked(cell);
+    }
+};
+
+struct Count : public MarkedBlock::CountFunctor {
+    void operator()(JSCell*) { count(1); }
+};
+
+struct CountIfGlobalObject : MarkedBlock::CountFunctor {
+    void operator()(JSCell* cell) {
+        if (!cell->isObject())
+            return;
+        if (!asObject(cell)->isGlobalObject())
+            return;
+        count(1);
+    }
+};
+
+class RecordType {
+public:
+    typedef PassOwnPtr<TypeCountSet> ReturnType;
+
+    RecordType();
+    void operator()(JSCell*);
+    ReturnType returnValue();
+
+private:
+    const char* typeName(JSCell*);
+    OwnPtr<TypeCountSet> m_typeCountSet;
+};
+
+inline RecordType::RecordType()
+    : m_typeCountSet(adoptPtr(new TypeCountSet))
+{
+}
+
+inline const char* RecordType::typeName(JSCell* cell)
+{
+    const ClassInfo* info = cell->classInfo();
+    if (!info || !info->className)
+        return "[unknown]";
+    return info->className;
+}
+
+inline void RecordType::operator()(JSCell* cell)
+{
+    m_typeCountSet->add(typeName(cell));
+}
+
+inline PassOwnPtr<TypeCountSet> RecordType::returnValue()
+{
+    return m_typeCountSet.release();
+}
+
+} // anonymous namespace
+
+Heap::Heap(VM* vm, HeapType heapType)
+    : m_heapType(heapType)
+    , m_ramSize(ramSize())
+    , m_minBytesPerCycle(minHeapSize(m_heapType, m_ramSize))
+    , m_sizeAfterLastCollect(0)
+    , m_sizeAfterLastFullCollect(0)
+    , m_sizeBeforeLastFullCollect(0)
+    , m_sizeAfterLastEdenCollect(0)
+    , m_sizeBeforeLastEdenCollect(0)
+    , m_bytesAllocatedThisCycle(0)
+    , m_bytesAbandonedSinceLastFullCollect(0)
+    , m_maxEdenSize(m_minBytesPerCycle)
+    , m_maxHeapSize(m_minBytesPerCycle)
+    , m_shouldDoFullCollection(false)
+    , m_totalBytesVisited(0)
+    , m_totalBytesCopied(0)
+    , m_operationInProgress(NoOperation)
+    , m_blockAllocator()
+    , m_objectSpace(this)
+    , m_storageSpace(this)
+    , m_extraMemoryUsage(0)
+    , m_machineThreads(this)
+    , m_sharedData(vm)
+    , m_slotVisitor(m_sharedData)
+    , m_copyVisitor(m_sharedData)
+    , m_handleSet(vm)
+    , m_codeBlocks(m_blockAllocator)
+    , m_isSafeToCollect(false)
+    , m_writeBarrierBuffer(256)
+    , m_vm(vm)
+    // We seed with 10ms so that GCActivityCallback::didAllocate doesn't continuously 
+    // schedule the timer if we've never done a collection.
+    , m_lastFullGCLength(0.01)
+    , m_lastEdenGCLength(0.01)
+    , m_lastCodeDiscardTime(WTF::monotonicallyIncreasingTime())
+    , m_fullActivityCallback(GCActivityCallback::createFullTimer(this))
+#if ENABLE(GGC)
+    , m_edenActivityCallback(GCActivityCallback::createEdenTimer(this))
+#else
+    , m_edenActivityCallback(m_fullActivityCallback)
 #endif
+    , m_sweeper(IncrementalSweeper::create(this))
+    , m_deferralDepth(0)
+{
+    m_storageSpace.init();
+}
 
-    delete m_markListSet;
-    m_markListSet = 0;
-    m_markedSpace.clearMarks();
-    m_handleHeap.finalizeWeakHandles();
-    m_markedSpace.destroy();
+Heap::~Heap()
+{
+}
 
-    m_globalData = 0;
+bool Heap::isPagedOut(double deadline)
+{
+    return m_objectSpace.isPagedOut(deadline) || m_storageSpace.isPagedOut(deadline);
+}
+
+// The VM is being destroyed and the collector will never run again.
+// Run all pending finalizers now because we won't get another chance.
+void Heap::lastChanceToFinalize()
+{
+    RELEASE_ASSERT(!m_vm->entryScope);
+    RELEASE_ASSERT(m_operationInProgress == NoOperation);
+
+    m_objectSpace.lastChanceToFinalize();
 }
 
 void Heap::reportExtraMemoryCostSlowCase(size_t cost)
@@ -101,38 +368,36 @@ void Heap::reportExtraMemoryCostSlowCase(size_t cost)
     // if a large value survives one garbage collection, there is not much point to
     // collecting more frequently as long as it stays alive.
 
-    if (m_extraCost > maxExtraCost && m_extraCost > m_markedSpace.highWaterMark() / 2)
-        collectAllGarbage();
-    m_extraCost += cost;
+    didAllocate(cost);
+    collectIfNecessaryOrDefer();
 }
 
-void* Heap::allocateSlowCase(size_t bytes)
+void Heap::reportAbandonedObjectGraph()
 {
-    ASSERT(globalData()->identifierTable == wtfThreadData().currentIdentifierTable());
-    ASSERT(JSLock::lockCount() > 0);
-    ASSERT(JSLock::currentThreadIsHoldingLock());
-    ASSERT(bytes <= MarkedSpace::maxCellSize);
-    ASSERT(m_operationInProgress == NoOperation);
-
-#if COLLECT_ON_EVERY_SLOW_ALLOCATION
-    collectAllGarbage();
-    ASSERT(m_operationInProgress == NoOperation);
-#endif
-
-    reset(DoNotSweep);
-
-    m_operationInProgress = Allocation;
-    void* result = m_markedSpace.allocate(bytes);
-    m_operationInProgress = NoOperation;
+    // Our clients don't know exactly how much memory they
+    // are abandoning so we just guess for them.
+    double abandonedBytes = 0.1 * m_sizeAfterLastCollect;
+
+    // We want to accelerate the next collection. Because memory has just 
+    // been abandoned, the next collection has the potential to 
+    // be more profitable. Since allocation is the trigger for collection, 
+    // we hasten the next collection by pretending that we've allocated more memory. 
+    didAbandon(abandonedBytes);
+}
 
-    ASSERT(result);
-    return result;
+void Heap::didAbandon(size_t bytes)
+{
+    if (m_fullActivityCallback) {
+        m_fullActivityCallback->didAllocate(
+            m_sizeAfterLastCollect - m_sizeAfterLastFullCollect + m_bytesAllocatedThisCycle + m_bytesAbandonedSinceLastFullCollect);
+    }
+    m_bytesAbandonedSinceLastFullCollect += bytes;
 }
 
 void Heap::protect(JSValue k)
 {
     ASSERT(k);
-    ASSERT(JSLock::currentThreadIsHoldingLock() || !m_globalData->isSharedInstance());
+    ASSERT(m_vm->currentThreadIsHoldingAPILock());
 
     if (!k.isCell())
         return;
@@ -143,7 +408,7 @@ void Heap::protect(JSValue k)
 bool Heap::unprotect(JSValue k)
 {
     ASSERT(k);
-    ASSERT(JSLock::currentThreadIsHoldingLock() || !m_globalData->isSharedInstance());
+    ASSERT(m_vm->currentThreadIsHoldingAPILock());
 
     if (!k.isCell())
         return false;
@@ -151,305 +416,974 @@ bool Heap::unprotect(JSValue k)
     return m_protectedValues.remove(k.asCell());
 }
 
-void Heap::markProtectedObjects(HeapRootVisitor& heapRootMarker)
+void Heap::addReference(JSCell* cell, ArrayBuffer* buffer)
 {
-    ProtectCountSet::iterator end = m_protectedValues.end();
-    for (ProtectCountSet::iterator it = m_protectedValues.begin(); it != end; ++it)
-        heapRootMarker.mark(&it->first);
+    if (m_arrayBuffers.addReference(cell, buffer)) {
+        collectIfNecessaryOrDefer();
+        didAllocate(buffer->gcSizeEstimateInBytes());
+    }
 }
 
-void Heap::pushTempSortVector(Vector<ValueStringPair>* tempVector)
+void Heap::pushTempSortVector(Vector<ValueStringPair, 0, UnsafeVectorOverflow>* tempVector)
 {
     m_tempSortingVectors.append(tempVector);
 }
 
-void Heap::popTempSortVector(Vector<ValueStringPair>* tempVector)
+void Heap::popTempSortVector(Vector<ValueStringPair, 0, UnsafeVectorOverflow>* tempVector)
 {
     ASSERT_UNUSED(tempVector, tempVector == m_tempSortingVectors.last());
     m_tempSortingVectors.removeLast();
 }
-    
-void Heap::markTempSortVectors(HeapRootVisitor& heapRootMarker)
+
+void Heap::harvestWeakReferences()
+{
+    m_slotVisitor.harvestWeakReferences();
+}
+
+void Heap::finalizeUnconditionalFinalizers()
 {
-    typedef Vector<Vector<ValueStringPair>* > VectorOfValueStringVectors;
+    GCPHASE(FinalizeUnconditionalFinalizers);
+    m_slotVisitor.finalizeUnconditionalFinalizers();
+}
 
-    VectorOfValueStringVectors::iterator end = m_tempSortingVectors.end();
-    for (VectorOfValueStringVectors::iterator it = m_tempSortingVectors.begin(); it != end; ++it) {
-        Vector<ValueStringPair>* tempSortingVector = *it;
+inline JSStack& Heap::stack()
+{
+    return m_vm->interpreter->stack();
+}
 
-        Vector<ValueStringPair>::iterator vectorEnd = tempSortingVector->end();
-        for (Vector<ValueStringPair>::iterator vectorIt = tempSortingVector->begin(); vectorIt != vectorEnd; ++vectorIt) {
-            if (vectorIt->first)
-                heapRootMarker.mark(&vectorIt->first);
-        }
-    }
+void Heap::willStartIterating()
+{
+    m_objectSpace.willStartIterating();
 }
 
-inline RegisterFile& Heap::registerFile()
+void Heap::didFinishIterating()
 {
-    return m_globalData->interpreter->registerFile();
+    m_objectSpace.didFinishIterating();
 }
 
 void Heap::getConservativeRegisterRoots(HashSet<JSCell*>& roots)
 {
-#ifndef NDEBUG
-    if (m_globalData->isSharedInstance()) {
-        ASSERT(JSLock::lockCount() > 0);
-        ASSERT(JSLock::currentThreadIsHoldingLock());
-    }
-#endif
-    if (m_operationInProgress != NoOperation)
-        CRASH();
-    m_operationInProgress = Collection;
-    ConservativeRoots registerFileRoots(this);
-    registerFile().gatherConservativeRoots(registerFileRoots);
-    size_t registerFileRootCount = registerFileRoots.size();
-    JSCell** registerRoots = registerFileRoots.roots();
-    for (size_t i = 0; i < registerFileRootCount; i++) {
+    ASSERT(isValidThreadState(m_vm));
+    ConservativeRoots stackRoots(&m_objectSpace.blocks(), &m_storageSpace);
+    stack().gatherConservativeRoots(stackRoots);
+    size_t stackRootCount = stackRoots.size();
+    JSCell** registerRoots = stackRoots.roots();
+    for (size_t i = 0; i < stackRootCount; i++) {
         setMarked(registerRoots[i]);
+        registerRoots[i]->setMarked();
         roots.add(registerRoots[i]);
     }
-    m_operationInProgress = NoOperation;
 }
 
-void Heap::markRoots()
+void Heap::markRoots(double gcStartTime)
 {
-#ifndef NDEBUG
-    if (m_globalData->isSharedInstance()) {
-        ASSERT(JSLock::lockCount() > 0);
-        ASSERT(JSLock::currentThreadIsHoldingLock());
-    }
+    SamplingRegion samplingRegion("Garbage Collection: Marking");
+
+    GCPHASE(MarkRoots);
+    ASSERT(isValidThreadState(m_vm));
+
+#if ENABLE(GGC)
+    Vector<const JSCell*> rememberedSet(m_slotVisitor.markStack().size());
+    m_slotVisitor.markStack().fillVector(rememberedSet);
+#else
+    Vector<const JSCell*> rememberedSet;
 #endif
 
+    if (m_operationInProgress == EdenCollection)
+        m_codeBlocks.clearMarksForEdenCollection(rememberedSet);
+    else
+        m_codeBlocks.clearMarksForFullCollection();
+
+    // We gather conservative roots before clearing mark bits because conservative
+    // gathering uses the mark bits to determine whether a reference is valid.
     void* dummy;
+    ConservativeRoots conservativeRoots(&m_objectSpace.blocks(), &m_storageSpace);
+    gatherStackRoots(conservativeRoots, &dummy);
+    gatherJSStackRoots(conservativeRoots);
+    gatherScratchBufferRoots(conservativeRoots);
+
+    sanitizeStackForVM(m_vm);
+
+    clearLivenessData();
+
+    m_sharedData.didStartMarking();
+    m_slotVisitor.didStartMarking();
+    HeapRootVisitor heapRootVisitor(m_slotVisitor);
+
+    {
+        ParallelModeEnabler enabler(m_slotVisitor);
+
+        visitExternalRememberedSet();
+        visitSmallStrings();
+        visitConservativeRoots(conservativeRoots);
+        visitProtectedObjects(heapRootVisitor);
+        visitTempSortVectors(heapRootVisitor);
+        visitArgumentBuffers(heapRootVisitor);
+        visitException(heapRootVisitor);
+        visitStrongHandles(heapRootVisitor);
+        visitHandleStack(heapRootVisitor);
+        traceCodeBlocksAndJITStubRoutines();
+        converge();
+    }
 
-    ASSERT(m_operationInProgress == NoOperation);
-    if (m_operationInProgress != NoOperation)
-        CRASH();
+    // Weak references must be marked last because their liveness depends on
+    // the liveness of the rest of the object graph.
+    visitWeakHandles(heapRootVisitor);
 
-    m_operationInProgress = Collection;
+    clearRememberedSet(rememberedSet);
+    m_sharedData.didFinishMarking();
+    updateObjectCounts(gcStartTime);
+    resetVisitors();
+}
 
-    MarkStack& visitor = m_markStack;
-    HeapRootVisitor heapRootMarker(visitor);
-    
-    // We gather conservative roots before clearing mark bits because
-    // conservative gathering uses the mark bits from our last mark pass to
-    // determine whether a reference is valid.
-    ConservativeRoots machineThreadRoots(this);
-    m_machineThreads.gatherConservativeRoots(machineThreadRoots, &dummy);
+void Heap::copyBackingStores()
+{
+    if (m_operationInProgress == EdenCollection)
+        m_storageSpace.startedCopying<EdenCollection>();
+    else {
+        ASSERT(m_operationInProgress == FullCollection);
+        m_storageSpace.startedCopying<FullCollection>();
+    }
 
-    ConservativeRoots registerFileRoots(this);
-    registerFile().gatherConservativeRoots(registerFileRoots);
+    if (m_storageSpace.shouldDoCopyPhase()) {
+        m_sharedData.didStartCopying();
+        m_copyVisitor.startCopying();
+        m_copyVisitor.copyFromShared();
+        m_copyVisitor.doneCopying();
+        // We need to wait for everybody to finish and return their CopiedBlocks 
+        // before signaling that the phase is complete.
+        m_storageSpace.doneCopying();
+        m_sharedData.didFinishCopying();
+    } else
+        m_storageSpace.doneCopying();
+}
 
-    m_markedSpace.clearMarks();
+void Heap::gatherStackRoots(ConservativeRoots& roots, void** dummy)
+{
+    GCPHASE(GatherStackRoots);
+    m_jitStubRoutines.clearMarks();
+    m_machineThreads.gatherConservativeRoots(roots, m_jitStubRoutines, m_codeBlocks, dummy);
+}
 
-    visitor.append(machineThreadRoots);
-    visitor.drain();
+void Heap::gatherJSStackRoots(ConservativeRoots& roots)
+{
+#if !ENABLE(JIT)
+    GCPHASE(GatherJSStackRoots);
+    stack().gatherConservativeRoots(roots, m_jitStubRoutines, m_codeBlocks);
+#else
+    UNUSED_PARAM(roots);
+#endif
+}
 
-    visitor.append(registerFileRoots);
-    visitor.drain();
+void Heap::gatherScratchBufferRoots(ConservativeRoots& roots)
+{
+#if ENABLE(DFG_JIT)
+    GCPHASE(GatherScratchBufferRoots);
+    m_vm->gatherConservativeRoots(roots);
+#else
+    UNUSED_PARAM(roots);
+#endif
+}
 
-    markProtectedObjects(heapRootMarker);
-    visitor.drain();
-    
-    markTempSortVectors(heapRootMarker);
-    visitor.drain();
-
-    if (m_markListSet && m_markListSet->size())
-        MarkedArgumentBuffer::markLists(heapRootMarker, *m_markListSet);
-    if (m_globalData->exception)
-        heapRootMarker.mark(&m_globalData->exception);
-    visitor.drain();
-
-    m_handleHeap.markStrongHandles(heapRootMarker);
-    visitor.drain();
-
-    m_handleStack.mark(heapRootMarker);
-    visitor.drain();
-
-    // Mark the small strings cache as late as possible, since it will clear
-    // itself if nothing else has marked it.
-    // FIXME: Change the small strings cache to use Weak<T>.
-    m_globalData->smallStrings.visitChildren(heapRootMarker);
-    visitor.drain();
-    
-    // Weak handles must be marked last, because their owners use the set of
-    // opaque roots to determine reachability.
-    int lastOpaqueRootCount;
-    do {
-        lastOpaqueRootCount = visitor.opaqueRootCount();
-        m_handleHeap.markWeakHandles(heapRootMarker);
-        visitor.drain();
-    // If the set of opaque roots has grown, more weak handles may have become reachable.
-    } while (lastOpaqueRootCount != visitor.opaqueRootCount());
-
-    visitor.reset();
+void Heap::clearLivenessData()
+{
+    GCPHASE(ClearLivenessData);
+    m_objectSpace.clearNewlyAllocated();
+    m_objectSpace.clearMarks();
+}
 
-    m_operationInProgress = NoOperation;
+void Heap::visitExternalRememberedSet()
+{
+#if JSC_OBJC_API_ENABLED
+    scanExternalRememberedSet(*m_vm, m_slotVisitor);
+#endif
 }
 
-size_t Heap::objectCount() const
+void Heap::visitSmallStrings()
 {
-    return m_markedSpace.objectCount();
+    GCPHASE(VisitSmallStrings);
+    m_vm->smallStrings.visitStrongReferences(m_slotVisitor);
+
+    if (Options::logGC() == GCLogging::Verbose)
+        dataLog("Small strings:\n", m_slotVisitor);
+
+    m_slotVisitor.donateAndDrain();
 }
 
-size_t Heap::size() const
+void Heap::visitConservativeRoots(ConservativeRoots& roots)
 {
-    return m_markedSpace.size();
+    GCPHASE(VisitConservativeRoots);
+    m_slotVisitor.append(roots);
+
+    if (Options::logGC() == GCLogging::Verbose)
+        dataLog("Conservative Roots:\n", m_slotVisitor);
+
+    m_slotVisitor.donateAndDrain();
 }
 
-size_t Heap::capacity() const
+void Heap::visitCompilerWorklistWeakReferences()
 {
-    return m_markedSpace.capacity();
+#if ENABLE(DFG_JIT)
+    for (auto worklist : m_suspendedCompilerWorklists)
+        worklist->visitWeakReferences(m_slotVisitor, m_codeBlocks);
+
+    if (Options::logGC() == GCLogging::Verbose)
+        dataLog("DFG Worklists:\n", m_slotVisitor);
+#endif
 }
 
-size_t Heap::globalObjectCount()
+void Heap::removeDeadCompilerWorklistEntries()
 {
-    return m_globalData->globalObjectCount;
+#if ENABLE(DFG_JIT)
+    GCPHASE(FinalizeDFGWorklists);
+    for (auto worklist : m_suspendedCompilerWorklists)
+        worklist->removeDeadPlans(*m_vm);
+#endif
 }
 
-size_t Heap::protectedGlobalObjectCount()
+void Heap::visitProtectedObjects(HeapRootVisitor& heapRootVisitor)
+{
+    GCPHASE(VisitProtectedObjects);
+
+    for (auto& pair : m_protectedValues)
+        heapRootVisitor.visit(&pair.key);
+
+    if (Options::logGC() == GCLogging::Verbose)
+        dataLog("Protected Objects:\n", m_slotVisitor);
+
+    m_slotVisitor.donateAndDrain();
+}
+
+void Heap::visitTempSortVectors(HeapRootVisitor& heapRootVisitor)
 {
-    size_t count = m_handleHeap.protectedGlobalObjectCount();
+    GCPHASE(VisitTempSortVectors);
+    typedef Vector<Vector<ValueStringPair, 0, UnsafeVectorOverflow>*> VectorOfValueStringVectors;
 
-    ProtectCountSet::iterator end = m_protectedValues.end();
-    for (ProtectCountSet::iterator it = m_protectedValues.begin(); it != end; ++it) {
-        if (it->first->isObject() && asObject(it->first)->isGlobalObject())
-            count++;
+    for (auto* vector : m_tempSortingVectors) {
+        for (auto& valueStringPair : *vector) {
+            if (valueStringPair.first)
+                heapRootVisitor.visit(&valueStringPair.first);
+        }
     }
 
-    return count;
+    if (Options::logGC() == GCLogging::Verbose)
+        dataLog("Temp Sort Vectors:\n", m_slotVisitor);
+
+    m_slotVisitor.donateAndDrain();
 }
 
-size_t Heap::protectedObjectCount()
+void Heap::visitArgumentBuffers(HeapRootVisitor& visitor)
 {
-    return m_protectedValues.size();
+    GCPHASE(MarkingArgumentBuffers);
+    if (!m_markListSet || !m_markListSet->size())
+        return;
+
+    MarkedArgumentBuffer::markLists(visitor, *m_markListSet);
+
+    if (Options::logGC() == GCLogging::Verbose)
+        dataLog("Argument Buffers:\n", m_slotVisitor);
+
+    m_slotVisitor.donateAndDrain();
 }
 
-class TypeCounter {
-public:
-    TypeCounter();
-    void operator()(JSCell*);
-    PassOwnPtr<TypeCountSet> take();
-    
-private:
-    const char* typeName(JSCell*);
-    OwnPtr<TypeCountSet> m_typeCountSet;
-    HashSet<JSCell*> m_cells;
-};
+void Heap::visitException(HeapRootVisitor& visitor)
+{
+    GCPHASE(MarkingException);
+    if (!m_vm->exception())
+        return;
 
-inline TypeCounter::TypeCounter()
-    : m_typeCountSet(adoptPtr(new TypeCountSet))
+    visitor.visit(m_vm->addressOfException());
+
+    if (Options::logGC() == GCLogging::Verbose)
+        dataLog("Exceptions:\n", m_slotVisitor);
+
+    m_slotVisitor.donateAndDrain();
+}
+
+void Heap::visitStrongHandles(HeapRootVisitor& visitor)
 {
+    GCPHASE(VisitStrongHandles);
+    m_handleSet.visitStrongHandles(visitor);
+
+    if (Options::logGC() == GCLogging::Verbose)
+        dataLog("Strong Handles:\n", m_slotVisitor);
+
+    m_slotVisitor.donateAndDrain();
 }
 
-inline const char* TypeCounter::typeName(JSCell* cell)
+void Heap::visitHandleStack(HeapRootVisitor& visitor)
 {
-    if (cell->isString())
-        return "string";
-    if (cell->isGetterSetter())
-        return "Getter-Setter";
-    if (cell->isAPIValueWrapper())
-        return "API wrapper";
-    if (cell->isPropertyNameIterator())
-        return "For-in iterator";
-    if (const ClassInfo* info = cell->classInfo())
-        return info->className;
-    if (!cell->isObject())
-        return "[empty cell]";
-    return "Object";
+    GCPHASE(VisitHandleStack);
+    m_handleStack.visit(visitor);
+
+    if (Options::logGC() == GCLogging::Verbose)
+        dataLog("Handle Stack:\n", m_slotVisitor);
+
+    m_slotVisitor.donateAndDrain();
 }
 
-inline void TypeCounter::operator()(JSCell* cell)
+void Heap::traceCodeBlocksAndJITStubRoutines()
 {
-    if (!m_cells.add(cell).second)
-        return;
-    m_typeCountSet->add(typeName(cell));
+    GCPHASE(TraceCodeBlocksAndJITStubRoutines);
+    m_codeBlocks.traceMarked(m_slotVisitor);
+    m_jitStubRoutines.traceMarkedStubRoutines(m_slotVisitor);
+
+    if (Options::logGC() == GCLogging::Verbose)
+        dataLog("Code Blocks and JIT Stub Routines:\n", m_slotVisitor);
+
+    m_slotVisitor.donateAndDrain();
 }
 
-inline PassOwnPtr<TypeCountSet> TypeCounter::take()
+void Heap::converge()
 {
-    return m_typeCountSet.release();
+#if ENABLE(PARALLEL_GC)
+    GCPHASE(Convergence);
+    m_slotVisitor.drainFromShared(SlotVisitor::MasterDrain);
+#endif
+}
+
+void Heap::visitWeakHandles(HeapRootVisitor& visitor)
+{
+    GCPHASE(VisitingLiveWeakHandles);
+    while (true) {
+        m_objectSpace.visitWeakSets(visitor);
+        harvestWeakReferences();
+        visitCompilerWorklistWeakReferences();
+        m_codeBlocks.traceMarked(m_slotVisitor); // New "executing" code blocks may be discovered.
+        if (m_slotVisitor.isEmpty())
+            break;
+
+        if (Options::logGC() == GCLogging::Verbose)
+            dataLog("Live Weak Handles:\n", m_slotVisitor);
+
+        {
+            ParallelModeEnabler enabler(m_slotVisitor);
+            m_slotVisitor.donateAndDrain();
+#if ENABLE(PARALLEL_GC)
+            m_slotVisitor.drainFromShared(SlotVisitor::MasterDrain);
+#endif
+        }
+    }
+}
+
+void Heap::clearRememberedSet(Vector<const JSCell*>& rememberedSet)
+{
+#if ENABLE(GGC)
+    GCPHASE(ClearRememberedSet);
+    for (auto* cell : rememberedSet) {
+        MarkedBlock::blockFor(cell)->clearRemembered(cell);
+        const_cast<JSCell*>(cell)->setRemembered(false);
+    }
+#else
+    UNUSED_PARAM(rememberedSet);
+#endif
+}
+
+void Heap::updateObjectCounts(double gcStartTime)
+{
+    GCCOUNTER(VisitedValueCount, m_slotVisitor.visitCount());
+
+    if (Options::logGC() == GCLogging::Verbose) {
+        size_t visitCount = m_slotVisitor.visitCount();
+#if ENABLE(PARALLEL_GC)
+        visitCount += m_sharedData.childVisitCount();
+#endif
+        dataLogF("\nNumber of live Objects after GC %lu, took %.6f secs\n", static_cast<unsigned long>(visitCount), WTF::monotonicallyIncreasingTime() - gcStartTime);
+    }
+
+    if (m_operationInProgress == EdenCollection) {
+        m_totalBytesVisited += m_slotVisitor.bytesVisited();
+        m_totalBytesCopied += m_slotVisitor.bytesCopied();
+    } else {
+        ASSERT(m_operationInProgress == FullCollection);
+        m_totalBytesVisited = m_slotVisitor.bytesVisited();
+        m_totalBytesCopied = m_slotVisitor.bytesCopied();
+    }
+#if ENABLE(PARALLEL_GC)
+    m_totalBytesVisited += m_sharedData.childBytesVisited();
+    m_totalBytesCopied += m_sharedData.childBytesCopied();
+#endif
+}
+
+void Heap::resetVisitors()
+{
+    m_slotVisitor.reset();
+#if ENABLE(PARALLEL_GC)
+    m_sharedData.resetChildren();
+#endif
+    m_sharedData.reset();
+}
+
+size_t Heap::objectCount()
+{
+    return m_objectSpace.objectCount();
+}
+
+size_t Heap::extraSize()
+{
+    return m_extraMemoryUsage + m_arrayBuffers.size();
+}
+
+size_t Heap::size()
+{
+    return m_objectSpace.size() + m_storageSpace.size() + extraSize();
+}
+
+size_t Heap::capacity()
+{
+    return m_objectSpace.capacity() + m_storageSpace.capacity() + extraSize();
+}
+
+size_t Heap::sizeAfterCollect()
+{
+    // The result here may not agree with the normal Heap::size(). 
+    // This is due to the fact that we only count live copied bytes
+    // rather than all used (including dead) copied bytes, thus it's 
+    // always the case that m_totalBytesCopied <= m_storageSpace.size(). 
+    ASSERT(m_totalBytesCopied <= m_storageSpace.size());
+    return m_totalBytesVisited + m_totalBytesCopied + extraSize();
+}
+
+size_t Heap::protectedGlobalObjectCount()
+{
+    return forEachProtectedCell<CountIfGlobalObject>();
+}
+
+size_t Heap::globalObjectCount()
+{
+    HeapIterationScope iterationScope(*this);
+    return m_objectSpace.forEachLiveCell<CountIfGlobalObject>(iterationScope);
+}
+
+size_t Heap::protectedObjectCount()
+{
+    return forEachProtectedCell<Count>();
 }
 
 PassOwnPtr<TypeCountSet> Heap::protectedObjectTypeCounts()
 {
-    TypeCounter typeCounter;
+    return forEachProtectedCell<RecordType>();
+}
 
-    ProtectCountSet::iterator end = m_protectedValues.end();
-    for (ProtectCountSet::iterator it = m_protectedValues.begin(); it != end; ++it)
-        typeCounter(it->first);
-    m_handleHeap.protectedObjectTypeCounts(typeCounter);
+PassOwnPtr<TypeCountSet> Heap::objectTypeCounts()
+{
+    HeapIterationScope iterationScope(*this);
+    return m_objectSpace.forEachLiveCell<RecordType>(iterationScope);
+}
 
-    return typeCounter.take();
+void Heap::deleteAllCompiledCode()
+{
+    // If JavaScript is running, it's not safe to delete code, since we'll end
+    // up deleting code that is live on the stack.
+    if (m_vm->entryScope)
+        return;
+    
+    // If we have things on any worklist, then don't delete code. This is kind of
+    // a weird heuristic. It's definitely not safe to throw away code that is on
+    // the worklist. But this change was made in a hurry so we just avoid throwing
+    // away any code if there is any code on any worklist. I suspect that this
+    // might not actually be too dumb: if there is code on worklists then that
+    // means that we are running some hot JS code right now. Maybe causing
+    // recompilations isn't a good idea.
+#if ENABLE(DFG_JIT)
+    for (unsigned i = DFG::numberOfWorklists(); i--;) {
+        if (DFG::Worklist* worklist = DFG::worklistForIndexOrNull(i)) {
+            if (worklist->isActiveForVM(*vm()))
+                return;
+        }
+    }
+#endif // ENABLE(DFG_JIT)
+
+    for (ExecutableBase* current = m_compiledCode.head(); current; current = current->next()) {
+        if (!current->isFunctionExecutable())
+            continue;
+        static_cast<FunctionExecutable*>(current)->clearCodeIfNotCompiling();
+    }
+
+    ASSERT(m_operationInProgress == FullCollection || m_operationInProgress == NoOperation);
+    m_codeBlocks.clearMarksForFullCollection();
+    m_codeBlocks.deleteUnmarkedAndUnreferenced(FullCollection);
 }
 
-void HandleHeap::protectedObjectTypeCounts(TypeCounter& typeCounter)
+void Heap::deleteAllUnlinkedFunctionCode()
 {
-    Node* end = m_strongList.end();
-    for (Node* node = m_strongList.begin(); node != end; node = node->next()) {
-        JSValue value = *node->slot();
-        if (value && value.isCell())
-            typeCounter(value.asCell());
+    for (ExecutableBase* current = m_compiledCode.head(); current; current = current->next()) {
+        if (!current->isFunctionExecutable())
+            continue;
+        static_cast<FunctionExecutable*>(current)->clearUnlinkedCodeForRecompilationIfNotCompiling();
     }
 }
 
-PassOwnPtr<TypeCountSet> Heap::objectTypeCounts()
+void Heap::clearUnmarkedExecutables()
+{
+    GCPHASE(ClearUnmarkedExecutables);
+    ExecutableBase* next;
+    for (ExecutableBase* current = m_compiledCode.head(); current; current = next) {
+        next = current->next();
+        if (isMarked(current))
+            continue;
+
+        // We do this because executable memory is limited on some platforms and because
+        // CodeBlock requires eager finalization.
+        ExecutableBase::clearCodeVirtual(current);
+        m_compiledCode.remove(current);
+    }
+}
+
+void Heap::deleteUnmarkedCompiledCode()
+{
+    GCPHASE(DeleteCodeBlocks);
+    clearUnmarkedExecutables();
+    m_codeBlocks.deleteUnmarkedAndUnreferenced(m_operationInProgress);
+    m_jitStubRoutines.deleteUnmarkedJettisonedStubRoutines();
+}
+
+void Heap::addToRememberedSet(const JSCell* cell)
 {
-    TypeCounter typeCounter;
-    forEach(typeCounter);
-    return typeCounter.take();
+    ASSERT(cell);
+    ASSERT(!Options::enableConcurrentJIT() || !isCompilationThread());
+    if (isRemembered(cell))
+        return;
+    MarkedBlock::blockFor(cell)->setRemembered(cell);
+    const_cast<JSCell*>(cell)->setRemembered(true);
+    m_slotVisitor.unconditionallyAppend(const_cast<JSCell*>(cell));
 }
 
 void Heap::collectAllGarbage()
 {
-    m_markStack.setShouldUnlinkCalls(true);
-    reset(DoSweep);
-    m_markStack.setShouldUnlinkCalls(false);
+    if (!m_isSafeToCollect)
+        return;
+
+    collect(FullCollection);
+
+    SamplingRegion samplingRegion("Garbage Collection: Sweeping");
+    DelayedReleaseScope delayedReleaseScope(m_objectSpace);
+    m_objectSpace.sweep();
+    m_objectSpace.shrink();
 }
 
-void Heap::reset(SweepToggle sweepToggle)
+static double minute = 60.0;
+
+void Heap::collect(HeapOperation collectionType)
 {
-    ASSERT(globalData()->identifierTable == wtfThreadData().currentIdentifierTable());
+#if ENABLE(ALLOCATION_LOGGING)
+    dataLogF("JSC GC starting collection.\n");
+#endif
+    
+    double before = 0;
+    if (Options::logGC()) {
+        dataLog("[GC: ");
+        before = currentTimeMS();
+    }
+    
+    SamplingRegion samplingRegion("Garbage Collection");
+    
+    RELEASE_ASSERT(!m_deferralDepth);
+    ASSERT(vm()->currentThreadIsHoldingAPILock());
+    RELEASE_ASSERT(vm()->atomicStringTable() == wtfThreadData().atomicStringTable());
+    ASSERT(m_isSafeToCollect);
     JAVASCRIPTCORE_GC_BEGIN();
+    RELEASE_ASSERT(m_operationInProgress == NoOperation);
+
+    suspendCompilerThreads();
+    willStartCollection(collectionType);
+    GCPHASE(Collect);
 
-    markRoots();
-    m_handleHeap.finalizeWeakHandles();
+    double gcStartTime = WTF::monotonicallyIncreasingTime();
+
+    deleteOldCode(gcStartTime);
+    flushOldStructureIDTables();
+    stopAllocation();
+    flushWriteBarrierBuffer();
+
+    markRoots(gcStartTime);
 
     JAVASCRIPTCORE_GC_MARKED();
 
-    m_markedSpace.reset();
-    m_extraCost = 0;
+    reapWeakHandles();
+    sweepArrayBuffers();
+    snapshotMarkedSpace();
+
+    copyBackingStores();
 
-#if ENABLE(JSC_ZOMBIES)
-    sweepToggle = DoSweep;
+    finalizeUnconditionalFinalizers();
+    removeDeadCompilerWorklistEntries();
+    deleteUnmarkedCompiledCode();
+    deleteSourceProviderCaches();
+    notifyIncrementalSweeper();
+    rememberCurrentlyExecutingCodeBlocks();
+
+    resetAllocators();
+    updateAllocationLimits();
+    didFinishCollection(gcStartTime);
+    resumeCompilerThreads();
+
+    if (Options::logGC()) {
+        double after = currentTimeMS();
+        dataLog(after - before, " ms]\n");
+    }
+}
+
+void Heap::suspendCompilerThreads()
+{
+#if ENABLE(DFG_JIT)
+    GCPHASE(SuspendCompilerThreads);
+    ASSERT(m_suspendedCompilerWorklists.isEmpty());
+    for (unsigned i = DFG::numberOfWorklists(); i--;) {
+        if (DFG::Worklist* worklist = DFG::worklistForIndexOrNull(i)) {
+            m_suspendedCompilerWorklists.append(worklist);
+            worklist->suspendAllThreads();
+        }
+    }
 #endif
+}
 
-    if (sweepToggle == DoSweep) {
-        m_markedSpace.sweep();
-        m_markedSpace.shrink();
+void Heap::willStartCollection(HeapOperation collectionType)
+{
+    GCPHASE(StartingCollection);
+    if (shouldDoFullCollection(collectionType)) {
+        m_operationInProgress = FullCollection;
+        m_slotVisitor.clearMarkStack();
+        m_shouldDoFullCollection = false;
+        if (Options::logGC())
+            dataLog("FullCollection, ");
+    } else {
+        m_operationInProgress = EdenCollection;
+        if (Options::logGC())
+            dataLog("EdenCollection, ");
+    }
+    if (m_operationInProgress == FullCollection) {
+        m_sizeBeforeLastFullCollect = m_sizeAfterLastCollect + m_bytesAllocatedThisCycle;
+        m_extraMemoryUsage = 0;
+
+        if (m_fullActivityCallback)
+            m_fullActivityCallback->willCollect();
+    } else {
+        ASSERT(m_operationInProgress == EdenCollection);
+        m_sizeBeforeLastEdenCollect = m_sizeAfterLastCollect + m_bytesAllocatedThisCycle;
     }
 
-    // To avoid pathological GC churn in large heaps, we set the allocation high
-    // water mark to be proportional to the current size of the heap. The exact
-    // proportion is a bit arbitrary. A 2X multiplier gives a 1:1 (heap size :
-    // new bytes allocated) proportion, and seems to work well in benchmarks.
-    size_t proportionalBytes = 2 * m_markedSpace.size();
-    m_markedSpace.setHighWaterMark(max(proportionalBytes, minBytesPerCycle));
+    if (m_edenActivityCallback)
+        m_edenActivityCallback->willCollect();
+}
 
+void Heap::deleteOldCode(double gcStartTime)
+{
+    if (m_operationInProgress == EdenCollection)
+        return;
+
+    GCPHASE(DeleteOldCode);
+    if (gcStartTime - m_lastCodeDiscardTime > minute) {
+        deleteAllCompiledCode();
+        m_lastCodeDiscardTime = WTF::monotonicallyIncreasingTime();
+    }
+}
+
+void Heap::flushOldStructureIDTables()
+{
+    GCPHASE(FlushOldStructureIDTables);
+    m_structureIDTable.flushOldTables();
+}
+
+void Heap::flushWriteBarrierBuffer()
+{
+    GCPHASE(FlushWriteBarrierBuffer);
+    if (m_operationInProgress == EdenCollection) {
+        m_writeBarrierBuffer.flush(*this);
+        return;
+    }
+    m_writeBarrierBuffer.reset();
+}
+
+void Heap::stopAllocation()
+{
+    GCPHASE(StopAllocation);
+    m_objectSpace.stopAllocating();
+    if (m_operationInProgress == FullCollection)
+        m_storageSpace.didStartFullCollection();
+}
+
+void Heap::reapWeakHandles()
+{
+    GCPHASE(ReapingWeakHandles);
+    m_objectSpace.reapWeakSets();
+}
+
+void Heap::sweepArrayBuffers()
+{
+    GCPHASE(SweepingArrayBuffers);
+    m_arrayBuffers.sweep();
+}
+
+struct MarkedBlockSnapshotFunctor : public MarkedBlock::VoidFunctor {
+    MarkedBlockSnapshotFunctor(Vector<MarkedBlock*>& blocks) 
+        : m_index(0) 
+        , m_blocks(blocks)
+    {
+    }
+
+    void operator()(MarkedBlock* block) { m_blocks[m_index++] = block; }
+
+    size_t m_index;
+    Vector<MarkedBlock*>& m_blocks;
+};
+
+void Heap::snapshotMarkedSpace()
+{
+    GCPHASE(SnapshotMarkedSpace);
+    if (m_operationInProgress != FullCollection)
+        return;
+
+    m_blockSnapshot.resize(m_objectSpace.blocks().set().size());
+    MarkedBlockSnapshotFunctor functor(m_blockSnapshot);
+    m_objectSpace.forEachBlock(functor);
+}
+
+void Heap::deleteSourceProviderCaches()
+{
+    GCPHASE(DeleteSourceProviderCaches);
+    m_vm->clearSourceProviderCaches();
+}
+
+void Heap::notifyIncrementalSweeper()
+{
+    GCPHASE(NotifyIncrementalSweeper);
+    if (m_operationInProgress != FullCollection)
+        return;
+    m_sweeper->startSweeping(m_blockSnapshot);
+}
+
+void Heap::rememberCurrentlyExecutingCodeBlocks()
+{
+    GCPHASE(RememberCurrentlyExecutingCodeBlocks);
+    m_codeBlocks.rememberCurrentlyExecutingCodeBlocks(this);
+}
+
+void Heap::resetAllocators()
+{
+    GCPHASE(ResetAllocators);
+    m_objectSpace.resetAllocators();
+}
+
+void Heap::updateAllocationLimits()
+{
+    GCPHASE(UpdateAllocationLimits);
+    size_t currentHeapSize = sizeAfterCollect();
+    if (Options::gcMaxHeapSize() && currentHeapSize > Options::gcMaxHeapSize())
+        HeapStatistics::exitWithFailure();
+
+    if (m_operationInProgress == FullCollection) {
+        // To avoid pathological GC churn in very small and very large heaps, we set
+        // the new allocation limit based on the current size of the heap, with a
+        // fixed minimum.
+        m_maxHeapSize = max(minHeapSize(m_heapType, m_ramSize), proportionalHeapSize(currentHeapSize, m_ramSize));
+        m_maxEdenSize = m_maxHeapSize - currentHeapSize;
+        m_sizeAfterLastFullCollect = currentHeapSize;
+        m_bytesAbandonedSinceLastFullCollect = 0;
+    } else {
+        ASSERT(currentHeapSize >= m_sizeAfterLastCollect);
+        m_maxEdenSize = m_maxHeapSize - currentHeapSize;
+        m_sizeAfterLastEdenCollect = currentHeapSize;
+        double edenToOldGenerationRatio = (double)m_maxEdenSize / (double)m_maxHeapSize;
+        double minEdenToOldGenerationRatio = 1.0 / 3.0;
+        if (edenToOldGenerationRatio < minEdenToOldGenerationRatio)
+            m_shouldDoFullCollection = true;
+        m_maxHeapSize += currentHeapSize - m_sizeAfterLastCollect;
+        m_maxEdenSize = m_maxHeapSize - currentHeapSize;
+        if (m_fullActivityCallback) {
+            ASSERT(currentHeapSize >= m_sizeAfterLastFullCollect);
+            m_fullActivityCallback->didAllocate(currentHeapSize - m_sizeAfterLastFullCollect);
+        }
+    }
+
+    m_sizeAfterLastCollect = currentHeapSize;
+    m_bytesAllocatedThisCycle = 0;
+
+    if (Options::logGC())
+        dataLog(currentHeapSize / 1024, " kb, ");
+}
+
+void Heap::didFinishCollection(double gcStartTime)
+{
+    GCPHASE(FinishingCollection);
+    double gcEndTime = WTF::monotonicallyIncreasingTime();
+    if (m_operationInProgress == FullCollection)
+        m_lastFullGCLength = gcEndTime - gcStartTime;
+    else
+        m_lastEdenGCLength = gcEndTime - gcStartTime;
+
+    if (Options::recordGCPauseTimes())
+        HeapStatistics::recordGCPauseTime(gcStartTime, gcEndTime);
+    RELEASE_ASSERT(m_operationInProgress == EdenCollection || m_operationInProgress == FullCollection);
+
+    m_operationInProgress = NoOperation;
     JAVASCRIPTCORE_GC_END();
 
-    (*m_activityCallback)();
+    if (Options::useZombieMode())
+        zombifyDeadObjects();
+
+    if (Options::objectsAreImmortal())
+        markDeadObjects();
+
+    if (Options::showObjectStatistics())
+        HeapStatistics::showObjectStatistics(this);
+
+    if (Options::logGC() == GCLogging::Verbose)
+        GCLogging::dumpObjectGraph(this);
+}
+
+void Heap::resumeCompilerThreads()
+{
+#if ENABLE(DFG_JIT)
+    GCPHASE(ResumeCompilerThreads);
+    for (auto worklist : m_suspendedCompilerWorklists)
+        worklist->resumeAllThreads();
+    m_suspendedCompilerWorklists.clear();
+#endif
+}
+
+void Heap::markDeadObjects()
+{
+    HeapIterationScope iterationScope(*this);
+    m_objectSpace.forEachDeadCell<MarkObject>(iterationScope);
+}
+
+void Heap::setFullActivityCallback(PassRefPtr<FullGCActivityCallback> activityCallback)
+{
+    m_fullActivityCallback = activityCallback;
+}
+
+void Heap::setEdenActivityCallback(PassRefPtr<EdenGCActivityCallback> activityCallback)
+{
+    m_edenActivityCallback = activityCallback;
+}
+
+GCActivityCallback* Heap::fullActivityCallback()
+{
+    return m_fullActivityCallback.get();
 }
 
-void Heap::setActivityCallback(PassOwnPtr<GCActivityCallback> activityCallback)
+GCActivityCallback* Heap::edenActivityCallback()
 {
-    m_activityCallback = activityCallback;
+    return m_edenActivityCallback.get();
 }
 
-GCActivityCallback* Heap::activityCallback()
+void Heap::setIncrementalSweeper(PassOwnPtr<IncrementalSweeper> sweeper)
 {
-    return m_activityCallback.get();
+    m_sweeper = sweeper;
+}
+
+IncrementalSweeper* Heap::sweeper()
+{
+    return m_sweeper.get();
+}
+
+void Heap::setGarbageCollectionTimerEnabled(bool enable)
+{
+    if (m_fullActivityCallback)
+        m_fullActivityCallback->setEnabled(enable);
+    if (m_edenActivityCallback)
+        m_edenActivityCallback->setEnabled(enable);
+}
+
+void Heap::didAllocate(size_t bytes)
+{
+    if (m_edenActivityCallback)
+        m_edenActivityCallback->didAllocate(m_bytesAllocatedThisCycle + m_bytesAbandonedSinceLastFullCollect);
+    m_bytesAllocatedThisCycle += bytes;
+}
+
+bool Heap::isValidAllocation(size_t)
+{
+    if (!isValidThreadState(m_vm))
+        return false;
+
+    if (m_operationInProgress != NoOperation)
+        return false;
+    
+    return true;
+}
+
+void Heap::addFinalizer(JSCell* cell, Finalizer finalizer)
+{
+    WeakSet::allocate(cell, &m_finalizerOwner, reinterpret_cast<void*>(finalizer)); // Balanced by FinalizerOwner::finalize().
+}
+
+void Heap::FinalizerOwner::finalize(Handle<Unknown> handle, void* context)
+{
+    HandleSlot slot = handle.slot();
+    Finalizer finalizer = reinterpret_cast<Finalizer>(context);
+    finalizer(slot->asCell());
+    WeakSet::deallocate(WeakImpl::asWeakImpl(slot));
+}
+
+void Heap::addCompiledCode(ExecutableBase* executable)
+{
+    m_compiledCode.append(executable);
+}
+
+class Zombify : public MarkedBlock::VoidFunctor {
+public:
+    void operator()(JSCell* cell)
+    {
+        void** current = reinterpret_cast<void**>(cell);
+
+        // We want to maintain zapped-ness because that's how we know if we've called 
+        // the destructor.
+        if (cell->isZapped())
+            current++;
+
+        void* limit = static_cast<void*>(reinterpret_cast<char*>(cell) + MarkedBlock::blockFor(cell)->cellSize());
+        for (; current < limit; current++)
+            *current = zombifiedBits;
+    }
+};
+
+void Heap::zombifyDeadObjects()
+{
+    // Sweep now because destructors will crash once we're zombified.
+    {
+        SamplingRegion samplingRegion("Garbage Collection: Sweeping");
+        DelayedReleaseScope delayedReleaseScope(m_objectSpace);
+        m_objectSpace.zombifySweep();
+    }
+    HeapIterationScope iterationScope(*this);
+    m_objectSpace.forEachDeadCell<Zombify>(iterationScope);
+}
+
+void Heap::flushWriteBarrierBuffer(JSCell* cell)
+{
+#if ENABLE(GGC)
+    m_writeBarrierBuffer.flush(*this);
+    m_writeBarrierBuffer.add(cell);
+#else
+    UNUSED_PARAM(cell);
+#endif
+}
+
+bool Heap::shouldDoFullCollection(HeapOperation requestedCollectionType) const
+{
+#if ENABLE(GGC)
+    if (Options::alwaysDoFullCollection())
+        return true;
+
+    switch (requestedCollectionType) {
+    case EdenCollection:
+        return false;
+    case FullCollection:
+        return true;
+    case AnyCollection:
+        return m_shouldDoFullCollection;
+    default:
+        RELEASE_ASSERT_NOT_REACHED();
+        return false;
+    }
+    RELEASE_ASSERT_NOT_REACHED();
+    return false;
+#else
+    UNUSED_PARAM(requestedCollectionType);
+    return true;
+#endif
 }
 
 } // namespace JSC