X-Git-Url: https://git.saurik.com/apple/javascriptcore.git/blobdiff_plain/93a3786624b2768d89bfa27e46598dc64e2fb70a..ed1e77d3adeb83d26fd1dfb16dd84cabdcefd250:/heap/Heap.cpp diff --git a/heap/Heap.cpp b/heap/Heap.cpp index bcf2b82..b1f6625 100644 --- a/heap/Heap.cpp +++ b/heap/Heap.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2011 Apple Inc. All rights reserved. + * Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2011, 2013, 2014 Apple Inc. All rights reserved. * Copyright (C) 2007 Eric Seidel * * This library is free software; you can redistribute it and/or @@ -26,22 +26,32 @@ #include "CopiedSpace.h" #include "CopiedSpaceInlines.h" #include "CopyVisitorInlines.h" +#include "DFGWorklist.h" +#include "EdenGCActivityCallback.h" +#include "FullGCActivityCallback.h" #include "GCActivityCallback.h" +#include "GCIncomingRefCountedSetInlines.h" +#include "HeapIterationScope.h" #include "HeapRootVisitor.h" #include "HeapStatistics.h" +#include "HeapVerifier.h" #include "IncrementalSweeper.h" #include "Interpreter.h" -#include "VM.h" #include "JSGlobalObject.h" #include "JSLock.h" #include "JSONObject.h" -#include "Operations.h" +#include "JSCInlines.h" +#include "JSVirtualMachineInternal.h" +#include "RecursiveAllocationScope.h" #include "Tracing.h" +#include "TypeProfilerLog.h" #include "UnlinkedCodeBlock.h" +#include "VM.h" #include "WeakSetInlines.h" #include #include #include +#include using namespace std; using namespace JSC; @@ -53,6 +63,8 @@ namespace { static const size_t largeHeapSize = 32 * MB; // About 1.5X the average webpage. static const size_t smallHeapSize = 1 * MB; // Matches the FastMalloc per-thread cache. +#define ENABLE_GC_LOGGING 0 + #if ENABLE(GC_LOGGING) #if COMPILER(CLANG) #define DEFINE_GC_LOGGING_GLOBAL(type, name, arguments) \ @@ -68,82 +80,129 @@ static type name arguments; struct GCTimer { GCTimer(const char* name) - : m_time(0) - , m_min(100000000) - , m_max(0) - , m_count(0) - , m_name(name) + : name(name) { } ~GCTimer() { - dataLogF("%s: %.2lfms (avg. %.2lf, min. %.2lf, max. %.2lf)\n", m_name, m_time * 1000, m_time * 1000 / m_count, m_min*1000, m_max*1000); + logData(allCollectionData, "(All)"); + logData(edenCollectionData, "(Eden)"); + logData(fullCollectionData, "(Full)"); + } + + struct TimeRecord { + TimeRecord() + : time(0) + , min(std::numeric_limits::infinity()) + , max(0) + , count(0) + { + } + + double time; + double min; + double max; + size_t count; + }; + + void logData(const TimeRecord& data, const char* extra) + { + dataLogF("[%d] %s (Parent: %s) %s: %.2lfms (avg. %.2lf, min. %.2lf, max. %.2lf, count %lu)\n", + getCurrentProcessID(), + name, + parent ? parent->name : "nullptr", + extra, + data.time * 1000, + data.time * 1000 / data.count, + data.min * 1000, + data.max * 1000, + data.count); } - double m_time; - double m_min; - double m_max; - size_t m_count; - const char* m_name; + + void updateData(TimeRecord& data, double duration) + { + if (duration < data.min) + data.min = duration; + if (duration > data.max) + data.max = duration; + data.count++; + data.time += duration; + } + + void didFinishPhase(HeapOperation collectionType, double duration) + { + TimeRecord& data = collectionType == EdenCollection ? edenCollectionData : fullCollectionData; + updateData(data, duration); + updateData(allCollectionData, duration); + } + + static GCTimer* s_currentGlobalTimer; + + TimeRecord allCollectionData; + TimeRecord fullCollectionData; + TimeRecord edenCollectionData; + const char* name; + GCTimer* parent { nullptr }; }; +GCTimer* GCTimer::s_currentGlobalTimer = nullptr; + struct GCTimerScope { - GCTimerScope(GCTimer* timer) - : m_timer(timer) - , m_start(WTF::currentTime()) + GCTimerScope(GCTimer& timer, HeapOperation collectionType) + : timer(timer) + , start(WTF::monotonicallyIncreasingTime()) + , collectionType(collectionType) { + timer.parent = GCTimer::s_currentGlobalTimer; + GCTimer::s_currentGlobalTimer = &timer; } ~GCTimerScope() { - double delta = WTF::currentTime() - m_start; - if (delta < m_timer->m_min) - m_timer->m_min = delta; - if (delta > m_timer->m_max) - m_timer->m_max = delta; - m_timer->m_count++; - m_timer->m_time += delta; - } - GCTimer* m_timer; - double m_start; + double delta = WTF::monotonicallyIncreasingTime() - start; + timer.didFinishPhase(collectionType, delta); + GCTimer::s_currentGlobalTimer = timer.parent; + } + GCTimer& timer; + double start; + HeapOperation collectionType; }; struct GCCounter { GCCounter(const char* name) - : m_name(name) - , m_count(0) - , m_total(0) - , m_min(10000000) - , m_max(0) + : name(name) + , count(0) + , total(0) + , min(10000000) + , max(0) { } - void count(size_t amount) + void add(size_t amount) { - m_count++; - m_total += amount; - if (amount < m_min) - m_min = amount; - if (amount > m_max) - m_max = amount; + count++; + total += amount; + if (amount < min) + min = amount; + if (amount > max) + max = amount; } ~GCCounter() { - dataLogF("%s: %zu values (avg. %zu, min. %zu, max. %zu)\n", m_name, m_total, m_total / m_count, m_min, m_max); + dataLogF("[%d] %s: %zu values (avg. %zu, min. %zu, max. %zu)\n", getCurrentProcessID(), name, total, total / count, min, max); } - const char* m_name; - size_t m_count; - size_t m_total; - size_t m_min; - size_t m_max; + const char* name; + size_t count; + size_t total; + size_t min; + size_t max; }; -#define GCPHASE(name) DEFINE_GC_LOGGING_GLOBAL(GCTimer, name##Timer, (#name)); GCTimerScope name##TimerScope(&name##Timer) -#define COND_GCPHASE(cond, name1, name2) DEFINE_GC_LOGGING_GLOBAL(GCTimer, name1##Timer, (#name1)); DEFINE_GC_LOGGING_GLOBAL(GCTimer, name2##Timer, (#name2)); GCTimerScope name1##CondTimerScope(cond ? &name1##Timer : &name2##Timer) -#define GCCOUNTER(name, value) do { DEFINE_GC_LOGGING_GLOBAL(GCCounter, name##Counter, (#name)); name##Counter.count(value); } while (false) +#define GCPHASE(name) DEFINE_GC_LOGGING_GLOBAL(GCTimer, name##Timer, (#name)); GCTimerScope name##TimerScope(name##Timer, m_operationInProgress) +#define GCCOUNTER(name, value) do { DEFINE_GC_LOGGING_GLOBAL(GCCounter, name##Counter, (#name)); name##Counter.add(value); } while (false) #else #define GCPHASE(name) do { } while (false) -#define COND_GCPHASE(cond, name1, name2) do { } while (false) #define GCCOUNTER(name, value) do { } while (false) #endif @@ -166,12 +225,12 @@ static inline size_t proportionalHeapSize(size_t heapSize, size_t ramSize) static inline bool isValidSharedInstanceThreadState(VM* vm) { - return vm->apiLock().currentThreadIsHoldingLock(); + return vm->currentThreadIsHoldingAPILock(); } static inline bool isValidThreadState(VM* vm) { - if (vm->identifierTable != wtfThreadData().currentIdentifierTable()) + if (vm->atomicStringTable() != wtfThreadData().atomicStringTable()) return false; if (vm->isSharedInstance() && !isValidSharedInstanceThreadState(vm)) @@ -181,12 +240,17 @@ static inline bool isValidThreadState(VM* vm) } struct MarkObject : public MarkedBlock::VoidFunctor { - void operator()(JSCell* cell) + inline void visit(JSCell* cell) { if (cell->isZapped()) return; Heap::heap(cell)->setMarked(cell); } + IterationStatus operator()(JSCell* cell) + { + visit(cell); + return IterationStatus::Continue; + } }; struct Count : public MarkedBlock::CountFunctor { @@ -194,30 +258,36 @@ struct Count : public MarkedBlock::CountFunctor { }; struct CountIfGlobalObject : MarkedBlock::CountFunctor { - void operator()(JSCell* cell) { + inline void visit(JSCell* cell) + { if (!cell->isObject()) return; if (!asObject(cell)->isGlobalObject()) return; count(1); } + IterationStatus operator()(JSCell* cell) + { + visit(cell); + return IterationStatus::Continue; + } }; class RecordType { public: - typedef PassOwnPtr ReturnType; + typedef std::unique_ptr ReturnType; RecordType(); - void operator()(JSCell*); + IterationStatus operator()(JSCell*); ReturnType returnValue(); private: const char* typeName(JSCell*); - OwnPtr m_typeCountSet; + std::unique_ptr m_typeCountSet; }; inline RecordType::RecordType() - : m_typeCountSet(adoptPtr(new TypeCountSet)) + : m_typeCountSet(std::make_unique()) { } @@ -229,47 +299,78 @@ inline const char* RecordType::typeName(JSCell* cell) return info->className; } -inline void RecordType::operator()(JSCell* cell) +inline IterationStatus RecordType::operator()(JSCell* cell) { m_typeCountSet->add(typeName(cell)); + return IterationStatus::Continue; } -inline PassOwnPtr RecordType::returnValue() +inline std::unique_ptr RecordType::returnValue() { - return m_typeCountSet.release(); + return WTF::move(m_typeCountSet); } } // anonymous namespace Heap::Heap(VM* vm, HeapType heapType) : m_heapType(heapType) - , m_ramSize(ramSize()) + , m_ramSize(Options::forceRAMSize() ? Options::forceRAMSize() : ramSize()) , m_minBytesPerCycle(minHeapSize(m_heapType, m_ramSize)) , m_sizeAfterLastCollect(0) - , m_bytesAllocatedLimit(m_minBytesPerCycle) - , m_bytesAllocated(0) - , m_bytesAbandoned(0) + , m_sizeAfterLastFullCollect(0) + , m_sizeBeforeLastFullCollect(0) + , m_sizeAfterLastEdenCollect(0) + , m_sizeBeforeLastEdenCollect(0) + , m_bytesAllocatedThisCycle(0) + , m_bytesAbandonedSinceLastFullCollect(0) + , m_maxEdenSize(m_minBytesPerCycle) + , m_maxHeapSize(m_minBytesPerCycle) + , m_shouldDoFullCollection(false) + , m_totalBytesVisited(0) + , m_totalBytesCopied(0) , m_operationInProgress(NoOperation) - , m_blockAllocator() , m_objectSpace(this) , m_storageSpace(this) + , m_extraMemorySize(0) + , m_deprecatedExtraMemorySize(0) , m_machineThreads(this) , m_sharedData(vm) , m_slotVisitor(m_sharedData) , m_copyVisitor(m_sharedData) , m_handleSet(vm) , m_isSafeToCollect(false) + , m_writeBarrierBuffer(256) , m_vm(vm) - , m_lastGCLength(0) - , m_lastCodeDiscardTime(WTF::currentTime()) - , m_activityCallback(DefaultGCActivityCallback::create(this)) - , m_sweeper(IncrementalSweeper::create(this)) + // We seed with 10ms so that GCActivityCallback::didAllocate doesn't continuously + // schedule the timer if we've never done a collection. + , m_lastFullGCLength(0.01) + , m_lastEdenGCLength(0.01) + , m_lastCodeDiscardTime(WTF::monotonicallyIncreasingTime()) + , m_fullActivityCallback(GCActivityCallback::createFullTimer(this)) +#if ENABLE(GGC) + , m_edenActivityCallback(GCActivityCallback::createEdenTimer(this)) +#else + , m_edenActivityCallback(m_fullActivityCallback) +#endif +#if USE(CF) + , m_sweeper(std::make_unique(this, CFRunLoopGetCurrent())) +#else + , m_sweeper(std::make_unique(this->vm())) +#endif + , m_deferralDepth(0) +#if USE(CF) + , m_delayedReleaseRecursionCount(0) +#endif { m_storageSpace.init(); + if (Options::verifyHeap()) + m_verifier = std::make_unique(this, Options::numberOfGCCyclesToRecordForVerification()); } Heap::~Heap() { + for (WeakBlock* block : m_logicallyEmptyWeakBlocks) + WeakBlock::destroy(block); } bool Heap::isPagedOut(double deadline) @@ -281,40 +382,60 @@ bool Heap::isPagedOut(double deadline) // Run all pending finalizers now because we won't get another chance. void Heap::lastChanceToFinalize() { - RELEASE_ASSERT(!m_vm->dynamicGlobalObject); + RELEASE_ASSERT(!m_vm->entryScope); RELEASE_ASSERT(m_operationInProgress == NoOperation); m_objectSpace.lastChanceToFinalize(); + releaseDelayedReleasedObjects(); + + sweepAllLogicallyEmptyWeakBlocks(); +} + +void Heap::releaseDelayedReleasedObjects() +{ +#if USE(CF) + // We need to guard against the case that releasing an object can create more objects due to the + // release calling into JS. When those JS call(s) exit and all locks are being dropped we end up + // back here and could try to recursively release objects. We guard that with a recursive entry + // count. Only the initial call will release objects, recursive calls simple return and let the + // the initial call to the function take care of any objects created during release time. + // This also means that we need to loop until there are no objects in m_delayedReleaseObjects + // and use a temp Vector for the actual releasing. + if (!m_delayedReleaseRecursionCount++) { + while (!m_delayedReleaseObjects.isEmpty()) { + ASSERT(m_vm->currentThreadIsHoldingAPILock()); + + Vector> objectsToRelease = WTF::move(m_delayedReleaseObjects); -#if ENABLE(SIMPLE_HEAP_PROFILING) - m_slotVisitor.m_visitedTypeCounts.dump(WTF::dataFile(), "Visited Type Counts"); - m_destroyedTypeCounts.dump(WTF::dataFile(), "Destroyed Type Counts"); + { + // We need to drop locks before calling out to arbitrary code. + JSLock::DropAllLocks dropAllLocks(m_vm); + + objectsToRelease.clear(); + } + } + } + m_delayedReleaseRecursionCount--; #endif } -void Heap::reportExtraMemoryCostSlowCase(size_t cost) +void Heap::reportExtraMemoryAllocatedSlowCase(size_t size) { - // Our frequency of garbage collection tries to balance memory use against speed - // by collecting based on the number of newly created values. However, for values - // that hold on to a great deal of memory that's not in the form of other JS values, - // that is not good enough - in some cases a lot of those objects can pile up and - // use crazy amounts of memory without a GC happening. So we track these extra - // memory costs. Only unusually large objects are noted, and we only keep track - // of this extra cost until the next GC. In garbage collected languages, most values - // are either very short lived temporaries, or have extremely long lifetimes. So - // if a large value survives one garbage collection, there is not much point to - // collecting more frequently as long as it stays alive. + didAllocate(size); + collectIfNecessaryOrDefer(); +} - didAllocate(cost); - if (shouldCollect()) - collect(DoNotSweep); +void Heap::deprecatedReportExtraMemorySlowCase(size_t size) +{ + m_deprecatedExtraMemorySize += size; + reportExtraMemoryAllocatedSlowCase(size); } void Heap::reportAbandonedObjectGraph() { // Our clients don't know exactly how much memory they // are abandoning so we just guess for them. - double abandonedBytes = 0.10 * m_sizeAfterLastCollect; + double abandonedBytes = 0.1 * m_sizeAfterLastCollect; // We want to accelerate the next collection. Because memory has just // been abandoned, the next collection has the potential to @@ -325,19 +446,17 @@ void Heap::reportAbandonedObjectGraph() void Heap::didAbandon(size_t bytes) { -#if PLATFORM(IOS) - if (m_activityCallback) - m_activityCallback->didAllocate(m_bytesAllocated + m_bytesAbandoned); -#else - m_activityCallback->didAllocate(m_bytesAllocated + m_bytesAbandoned); -#endif // PLATFORM(IOS) - m_bytesAbandoned += bytes; + if (m_fullActivityCallback) { + m_fullActivityCallback->didAllocate( + m_sizeAfterLastCollect - m_sizeAfterLastFullCollect + m_bytesAllocatedThisCycle + m_bytesAbandonedSinceLastFullCollect); + } + m_bytesAbandonedSinceLastFullCollect += bytes; } void Heap::protect(JSValue k) { ASSERT(k); - ASSERT(m_vm->apiLock().currentThreadIsHoldingLock()); + ASSERT(m_vm->currentThreadIsHoldingAPILock()); if (!k.isCell()) return; @@ -348,7 +467,7 @@ void Heap::protect(JSValue k) bool Heap::unprotect(JSValue k) { ASSERT(k); - ASSERT(m_vm->apiLock().currentThreadIsHoldingLock()); + ASSERT(m_vm->currentThreadIsHoldingAPILock()); if (!k.isCell()) return false; @@ -356,42 +475,11 @@ bool Heap::unprotect(JSValue k) return m_protectedValues.remove(k.asCell()); } -void Heap::jettisonDFGCodeBlock(PassOwnPtr codeBlock) +void Heap::addReference(JSCell* cell, ArrayBuffer* buffer) { - m_dfgCodeBlocks.jettison(codeBlock); -} - -void Heap::markProtectedObjects(HeapRootVisitor& heapRootVisitor) -{ - ProtectCountSet::iterator end = m_protectedValues.end(); - for (ProtectCountSet::iterator it = m_protectedValues.begin(); it != end; ++it) - heapRootVisitor.visit(&it->key); -} - -void Heap::pushTempSortVector(Vector* tempVector) -{ - m_tempSortingVectors.append(tempVector); -} - -void Heap::popTempSortVector(Vector* tempVector) -{ - ASSERT_UNUSED(tempVector, tempVector == m_tempSortingVectors.last()); - m_tempSortingVectors.removeLast(); -} - -void Heap::markTempSortVectors(HeapRootVisitor& heapRootVisitor) -{ - typedef Vector* > VectorOfValueStringVectors; - - VectorOfValueStringVectors::iterator end = m_tempSortingVectors.end(); - for (VectorOfValueStringVectors::iterator it = m_tempSortingVectors.begin(); it != end; ++it) { - Vector* tempSortingVector = *it; - - Vector::iterator vectorEnd = tempSortingVector->end(); - for (Vector::iterator vectorIt = tempSortingVector->begin(); vectorIt != vectorEnd; ++vectorIt) { - if (vectorIt->first) - heapRootVisitor.visit(&vectorIt->first); - } + if (m_arrayBuffers.addReference(cell, buffer)) { + collectIfNecessaryOrDefer(); + didAllocate(buffer->gcSizeEstimateInBytes()); } } @@ -402,6 +490,7 @@ void Heap::harvestWeakReferences() void Heap::finalizeUnconditionalFinalizers() { + GCPHASE(FinalizeUnconditionalFinalizers); m_slotVisitor.finalizeUnconditionalFinalizers(); } @@ -410,9 +499,14 @@ inline JSStack& Heap::stack() return m_vm->interpreter->stack(); } -void Heap::canonicalizeCellLivenessData() +void Heap::willStartIterating() +{ + m_objectSpace.willStartIterating(); +} + +void Heap::didFinishIterating() { - m_objectSpace.canonicalizeCellLivenessData(); + m_objectSpace.didFinishIterating(); } void Heap::getConservativeRegisterRoots(HashSet& roots) @@ -424,214 +518,360 @@ void Heap::getConservativeRegisterRoots(HashSet& roots) JSCell** registerRoots = stackRoots.roots(); for (size_t i = 0; i < stackRootCount; i++) { setMarked(registerRoots[i]); + registerRoots[i]->setMarked(); roots.add(registerRoots[i]); } } -void Heap::markRoots() +void Heap::markRoots(double gcStartTime, void* stackOrigin, void* stackTop, MachineThreads::RegisterState& calleeSavedRegisters) { - SamplingRegion samplingRegion("Garbage Collection: Tracing"); + SamplingRegion samplingRegion("Garbage Collection: Marking"); GCPHASE(MarkRoots); ASSERT(isValidThreadState(m_vm)); -#if ENABLE(OBJECT_MARK_LOGGING) - double gcStartTime = WTF::currentTime(); +#if ENABLE(GGC) + Vector rememberedSet(m_slotVisitor.markStack().size()); + m_slotVisitor.markStack().fillVector(rememberedSet); +#else + Vector rememberedSet; #endif - void* dummy; - + if (m_operationInProgress == EdenCollection) + m_codeBlocks.clearMarksForEdenCollection(rememberedSet); + else + m_codeBlocks.clearMarksForFullCollection(); + // We gather conservative roots before clearing mark bits because conservative // gathering uses the mark bits to determine whether a reference is valid. - ConservativeRoots machineThreadRoots(&m_objectSpace.blocks(), &m_storageSpace); - m_jitStubRoutines.clearMarks(); + ConservativeRoots conservativeRoots(&m_objectSpace.blocks(), &m_storageSpace); + gatherStackRoots(conservativeRoots, stackOrigin, stackTop, calleeSavedRegisters); + gatherJSStackRoots(conservativeRoots); + gatherScratchBufferRoots(conservativeRoots); + + clearLivenessData(); + + m_sharedData.didStartMarking(); + m_slotVisitor.didStartMarking(); + HeapRootVisitor heapRootVisitor(m_slotVisitor); + { - GCPHASE(GatherConservativeRoots); - m_machineThreads.gatherConservativeRoots(machineThreadRoots, &dummy); + ParallelModeEnabler enabler(m_slotVisitor); + + visitExternalRememberedSet(); + visitSmallStrings(); + visitConservativeRoots(conservativeRoots); + visitProtectedObjects(heapRootVisitor); + visitArgumentBuffers(heapRootVisitor); + visitException(heapRootVisitor); + visitStrongHandles(heapRootVisitor); + visitHandleStack(heapRootVisitor); + traceCodeBlocksAndJITStubRoutines(); + converge(); } - ConservativeRoots stackRoots(&m_objectSpace.blocks(), &m_storageSpace); - m_dfgCodeBlocks.clearMarks(); - { - GCPHASE(GatherStackRoots); - stack().gatherConservativeRoots( - stackRoots, m_jitStubRoutines, m_dfgCodeBlocks); + // Weak references must be marked last because their liveness depends on + // the liveness of the rest of the object graph. + visitWeakHandles(heapRootVisitor); + + clearRememberedSet(rememberedSet); + m_sharedData.didFinishMarking(); + updateObjectCounts(gcStartTime); + resetVisitors(); +} + +void Heap::copyBackingStores() +{ + GCPHASE(CopyBackingStores); + if (m_operationInProgress == EdenCollection) + m_storageSpace.startedCopying(); + else { + ASSERT(m_operationInProgress == FullCollection); + m_storageSpace.startedCopying(); } + if (m_storageSpace.shouldDoCopyPhase()) { + m_sharedData.didStartCopying(); + m_copyVisitor.startCopying(); + m_copyVisitor.copyFromShared(); + m_copyVisitor.doneCopying(); + // We need to wait for everybody to finish and return their CopiedBlocks + // before signaling that the phase is complete. + m_storageSpace.doneCopying(); + m_sharedData.didFinishCopying(); + } else + m_storageSpace.doneCopying(); +} + +void Heap::gatherStackRoots(ConservativeRoots& roots, void* stackOrigin, void* stackTop, MachineThreads::RegisterState& calleeSavedRegisters) +{ + GCPHASE(GatherStackRoots); + m_jitStubRoutines.clearMarks(); + m_machineThreads.gatherConservativeRoots(roots, m_jitStubRoutines, m_codeBlocks, stackOrigin, stackTop, calleeSavedRegisters); +} + +void Heap::gatherJSStackRoots(ConservativeRoots& roots) +{ +#if !ENABLE(JIT) + GCPHASE(GatherJSStackRoots); + stack().gatherConservativeRoots(roots, m_jitStubRoutines, m_codeBlocks); +#else + UNUSED_PARAM(roots); +#endif +} + +void Heap::gatherScratchBufferRoots(ConservativeRoots& roots) +{ #if ENABLE(DFG_JIT) - ConservativeRoots scratchBufferRoots(&m_objectSpace.blocks(), &m_storageSpace); - { - GCPHASE(GatherScratchBufferRoots); - m_vm->gatherConservativeRoots(scratchBufferRoots); - } + GCPHASE(GatherScratchBufferRoots); + m_vm->gatherConservativeRoots(roots); +#else + UNUSED_PARAM(roots); #endif +} - { - GCPHASE(clearMarks); - m_objectSpace.clearMarks(); - } +void Heap::clearLivenessData() +{ + GCPHASE(ClearLivenessData); + m_objectSpace.clearNewlyAllocated(); + m_objectSpace.clearMarks(); +} - m_sharedData.didStartMarking(); - SlotVisitor& visitor = m_slotVisitor; - visitor.setup(); - HeapRootVisitor heapRootVisitor(visitor); +void Heap::visitExternalRememberedSet() +{ +#if JSC_OBJC_API_ENABLED + scanExternalRememberedSet(*m_vm, m_slotVisitor); +#endif +} - { - ParallelModeEnabler enabler(visitor); +void Heap::visitSmallStrings() +{ + GCPHASE(VisitSmallStrings); + if (!m_vm->smallStrings.needsToBeVisited(m_operationInProgress)) + return; - if (m_vm->codeBlocksBeingCompiled.size()) { - GCPHASE(VisitActiveCodeBlock); - for (size_t i = 0; i < m_vm->codeBlocksBeingCompiled.size(); i++) - m_vm->codeBlocksBeingCompiled[i]->visitAggregate(visitor); - } + m_vm->smallStrings.visitStrongReferences(m_slotVisitor); + if (Options::logGC() == GCLogging::Verbose) + dataLog("Small strings:\n", m_slotVisitor); + m_slotVisitor.donateAndDrain(); +} - m_vm->smallStrings.visitStrongReferences(visitor); +void Heap::visitConservativeRoots(ConservativeRoots& roots) +{ + GCPHASE(VisitConservativeRoots); + m_slotVisitor.append(roots); - { - GCPHASE(VisitMachineRoots); - MARK_LOG_ROOT(visitor, "C++ Stack"); - visitor.append(machineThreadRoots); - visitor.donateAndDrain(); - } - { - GCPHASE(VisitStackRoots); - MARK_LOG_ROOT(visitor, "Stack"); - visitor.append(stackRoots); - visitor.donateAndDrain(); - } + if (Options::logGC() == GCLogging::Verbose) + dataLog("Conservative Roots:\n", m_slotVisitor); + + m_slotVisitor.donateAndDrain(); +} + +void Heap::visitCompilerWorklistWeakReferences() +{ #if ENABLE(DFG_JIT) - { - GCPHASE(VisitScratchBufferRoots); - MARK_LOG_ROOT(visitor, "Scratch Buffers"); - visitor.append(scratchBufferRoots); - visitor.donateAndDrain(); - } + for (auto worklist : m_suspendedCompilerWorklists) + worklist->visitWeakReferences(m_slotVisitor, m_codeBlocks); + + if (Options::logGC() == GCLogging::Verbose) + dataLog("DFG Worklists:\n", m_slotVisitor); #endif - { - GCPHASE(VisitProtectedObjects); - MARK_LOG_ROOT(visitor, "Protected Objects"); - markProtectedObjects(heapRootVisitor); - visitor.donateAndDrain(); - } - { - GCPHASE(VisitTempSortVectors); - MARK_LOG_ROOT(visitor, "Temp Sort Vectors"); - markTempSortVectors(heapRootVisitor); - visitor.donateAndDrain(); - } +} - { - GCPHASE(MarkingArgumentBuffers); - if (m_markListSet && m_markListSet->size()) { - MARK_LOG_ROOT(visitor, "Argument Buffers"); - MarkedArgumentBuffer::markLists(heapRootVisitor, *m_markListSet); - visitor.donateAndDrain(); - } - } - if (m_vm->exception) { - GCPHASE(MarkingException); - MARK_LOG_ROOT(visitor, "Exceptions"); - heapRootVisitor.visit(&m_vm->exception); - visitor.donateAndDrain(); - } - - { - GCPHASE(VisitStrongHandles); - MARK_LOG_ROOT(visitor, "Strong Handles"); - m_handleSet.visitStrongHandles(heapRootVisitor); - visitor.donateAndDrain(); - } - - { - GCPHASE(HandleStack); - MARK_LOG_ROOT(visitor, "Handle Stack"); - m_handleStack.visit(heapRootVisitor); - visitor.donateAndDrain(); - } - - { - GCPHASE(TraceCodeBlocksAndJITStubRoutines); - MARK_LOG_ROOT(visitor, "Trace Code Blocks and JIT Stub Routines"); - m_dfgCodeBlocks.traceMarkedCodeBlocks(visitor); - m_jitStubRoutines.traceMarkedStubRoutines(visitor); - visitor.donateAndDrain(); - } - +void Heap::removeDeadCompilerWorklistEntries() +{ +#if ENABLE(DFG_JIT) + GCPHASE(FinalizeDFGWorklists); + for (auto worklist : m_suspendedCompilerWorklists) + worklist->removeDeadPlans(*m_vm); +#endif +} + +void Heap::visitProtectedObjects(HeapRootVisitor& heapRootVisitor) +{ + GCPHASE(VisitProtectedObjects); + + for (auto& pair : m_protectedValues) + heapRootVisitor.visit(&pair.key); + + if (Options::logGC() == GCLogging::Verbose) + dataLog("Protected Objects:\n", m_slotVisitor); + + m_slotVisitor.donateAndDrain(); +} + +void Heap::visitArgumentBuffers(HeapRootVisitor& visitor) +{ + GCPHASE(MarkingArgumentBuffers); + if (!m_markListSet || !m_markListSet->size()) + return; + + MarkedArgumentBuffer::markLists(visitor, *m_markListSet); + + if (Options::logGC() == GCLogging::Verbose) + dataLog("Argument Buffers:\n", m_slotVisitor); + + m_slotVisitor.donateAndDrain(); +} + +void Heap::visitException(HeapRootVisitor& visitor) +{ + GCPHASE(MarkingException); + if (!m_vm->exception() && !m_vm->lastException()) + return; + + visitor.visit(m_vm->addressOfException()); + visitor.visit(m_vm->addressOfLastException()); + + if (Options::logGC() == GCLogging::Verbose) + dataLog("Exceptions:\n", m_slotVisitor); + + m_slotVisitor.donateAndDrain(); +} + +void Heap::visitStrongHandles(HeapRootVisitor& visitor) +{ + GCPHASE(VisitStrongHandles); + m_handleSet.visitStrongHandles(visitor); + + if (Options::logGC() == GCLogging::Verbose) + dataLog("Strong Handles:\n", m_slotVisitor); + + m_slotVisitor.donateAndDrain(); +} + +void Heap::visitHandleStack(HeapRootVisitor& visitor) +{ + GCPHASE(VisitHandleStack); + m_handleStack.visit(visitor); + + if (Options::logGC() == GCLogging::Verbose) + dataLog("Handle Stack:\n", m_slotVisitor); + + m_slotVisitor.donateAndDrain(); +} + +void Heap::traceCodeBlocksAndJITStubRoutines() +{ + GCPHASE(TraceCodeBlocksAndJITStubRoutines); + m_codeBlocks.traceMarked(m_slotVisitor); + m_jitStubRoutines.traceMarkedStubRoutines(m_slotVisitor); + + if (Options::logGC() == GCLogging::Verbose) + dataLog("Code Blocks and JIT Stub Routines:\n", m_slotVisitor); + + m_slotVisitor.donateAndDrain(); +} + +void Heap::converge() +{ #if ENABLE(PARALLEL_GC) - { - GCPHASE(Convergence); - visitor.drainFromShared(SlotVisitor::MasterDrain); - } + GCPHASE(Convergence); + m_slotVisitor.drainFromShared(SlotVisitor::MasterDrain); #endif - } +} - // Weak references must be marked last because their liveness depends on - // the liveness of the rest of the object graph. - { - GCPHASE(VisitingLiveWeakHandles); - MARK_LOG_ROOT(visitor, "Live Weak Handles"); - while (true) { - m_objectSpace.visitWeakSets(heapRootVisitor); - harvestWeakReferences(); - if (visitor.isEmpty()) - break; - { - ParallelModeEnabler enabler(visitor); - visitor.donateAndDrain(); +void Heap::visitWeakHandles(HeapRootVisitor& visitor) +{ + GCPHASE(VisitingLiveWeakHandles); + while (true) { + m_objectSpace.visitWeakSets(visitor); + harvestWeakReferences(); + visitCompilerWorklistWeakReferences(); + m_codeBlocks.traceMarked(m_slotVisitor); // New "executing" code blocks may be discovered. + if (m_slotVisitor.isEmpty()) + break; + + if (Options::logGC() == GCLogging::Verbose) + dataLog("Live Weak Handles:\n", m_slotVisitor); + + { + ParallelModeEnabler enabler(m_slotVisitor); + m_slotVisitor.donateAndDrain(); #if ENABLE(PARALLEL_GC) - visitor.drainFromShared(SlotVisitor::MasterDrain); + m_slotVisitor.drainFromShared(SlotVisitor::MasterDrain); #endif - } } } +} - GCCOUNTER(VisitedValueCount, visitor.visitCount()); +void Heap::clearRememberedSet(Vector& rememberedSet) +{ +#if ENABLE(GGC) + GCPHASE(ClearRememberedSet); + for (auto* cell : rememberedSet) + const_cast(cell)->setRemembered(false); +#else + UNUSED_PARAM(rememberedSet); +#endif +} - m_sharedData.didFinishMarking(); -#if ENABLE(OBJECT_MARK_LOGGING) - size_t visitCount = visitor.visitCount(); +void Heap::updateObjectCounts(double gcStartTime) +{ + GCCOUNTER(VisitedValueCount, m_slotVisitor.visitCount()); + + if (Options::logGC() == GCLogging::Verbose) { + size_t visitCount = m_slotVisitor.visitCount(); #if ENABLE(PARALLEL_GC) - visitCount += m_sharedData.childVisitCount(); + visitCount += m_sharedData.childVisitCount(); #endif - MARK_LOG_MESSAGE2("\nNumber of live Objects after full GC %lu, took %.6f secs\n", visitCount, WTF::currentTime() - gcStartTime); + dataLogF("\nNumber of live Objects after GC %lu, took %.6f secs\n", static_cast(visitCount), WTF::monotonicallyIncreasingTime() - gcStartTime); + } + + size_t bytesRemovedFromOldSpaceDueToReallocation = + m_storageSpace.takeBytesRemovedFromOldSpaceDueToReallocation(); + + if (m_operationInProgress == FullCollection) { + m_totalBytesVisited = 0; + m_totalBytesCopied = 0; + } else + m_totalBytesCopied -= bytesRemovedFromOldSpaceDueToReallocation; + + m_totalBytesVisited += m_slotVisitor.bytesVisited(); + m_totalBytesCopied += m_slotVisitor.bytesCopied(); +#if ENABLE(PARALLEL_GC) + m_totalBytesVisited += m_sharedData.childBytesVisited(); + m_totalBytesCopied += m_sharedData.childBytesCopied(); #endif +} - visitor.reset(); +void Heap::resetVisitors() +{ + m_slotVisitor.reset(); #if ENABLE(PARALLEL_GC) m_sharedData.resetChildren(); #endif m_sharedData.reset(); } -void Heap::copyBackingStores() +size_t Heap::objectCount() { - m_storageSpace.startedCopying(); - if (m_storageSpace.shouldDoCopyPhase()) { - m_sharedData.didStartCopying(); - m_copyVisitor.startCopying(); - m_copyVisitor.copyFromShared(); - m_copyVisitor.doneCopying(); - // We need to wait for everybody to finish and return their CopiedBlocks - // before signaling that the phase is complete. - m_storageSpace.doneCopying(); - m_sharedData.didFinishCopying(); - } else - m_storageSpace.doneCopying(); + return m_objectSpace.objectCount(); } -size_t Heap::objectCount() +size_t Heap::extraMemorySize() { - return m_objectSpace.objectCount(); + return m_extraMemorySize + m_deprecatedExtraMemorySize + m_arrayBuffers.size(); } size_t Heap::size() { - return m_objectSpace.size() + m_storageSpace.size(); + return m_objectSpace.size() + m_storageSpace.size() + extraMemorySize(); } size_t Heap::capacity() { - return m_objectSpace.capacity() + m_storageSpace.capacity(); + return m_objectSpace.capacity() + m_storageSpace.capacity() + extraMemorySize(); +} + +size_t Heap::sizeAfterCollect() +{ + // The result here may not agree with the normal Heap::size(). + // This is due to the fact that we only count live copied bytes + // rather than all used (including dead) copied bytes, thus it's + // always the case that m_totalBytesCopied <= m_storageSpace.size(). + ASSERT(m_totalBytesCopied <= m_storageSpace.size()); + return m_totalBytesVisited + m_totalBytesCopied + extraMemorySize(); } size_t Heap::protectedGlobalObjectCount() @@ -641,7 +881,8 @@ size_t Heap::protectedGlobalObjectCount() size_t Heap::globalObjectCount() { - return m_objectSpace.forEachLiveCell(); + HeapIterationScope iterationScope(*this); + return m_objectSpace.forEachLiveCell(iterationScope); } size_t Heap::protectedObjectCount() @@ -649,165 +890,417 @@ size_t Heap::protectedObjectCount() return forEachProtectedCell(); } -PassOwnPtr Heap::protectedObjectTypeCounts() +std::unique_ptr Heap::protectedObjectTypeCounts() { return forEachProtectedCell(); } -PassOwnPtr Heap::objectTypeCounts() +std::unique_ptr Heap::objectTypeCounts() { - return m_objectSpace.forEachLiveCell(); + HeapIterationScope iterationScope(*this); + return m_objectSpace.forEachLiveCell(iterationScope); } void Heap::deleteAllCompiledCode() { // If JavaScript is running, it's not safe to delete code, since we'll end // up deleting code that is live on the stack. - if (m_vm->dynamicGlobalObject) + if (m_vm->entryScope) return; + + // If we have things on any worklist, then don't delete code. This is kind of + // a weird heuristic. It's definitely not safe to throw away code that is on + // the worklist. But this change was made in a hurry so we just avoid throwing + // away any code if there is any code on any worklist. I suspect that this + // might not actually be too dumb: if there is code on worklists then that + // means that we are running some hot JS code right now. Maybe causing + // recompilations isn't a good idea. +#if ENABLE(DFG_JIT) + for (unsigned i = DFG::numberOfWorklists(); i--;) { + if (DFG::Worklist* worklist = DFG::worklistForIndexOrNull(i)) { + if (worklist->isActiveForVM(*vm())) + return; + } + } +#endif // ENABLE(DFG_JIT) - for (ExecutableBase* current = m_compiledCode.head(); current; current = current->next()) { + for (ExecutableBase* current : m_compiledCode) { if (!current->isFunctionExecutable()) continue; - static_cast(current)->clearCodeIfNotCompiling(); + static_cast(current)->clearCode(); } - m_dfgCodeBlocks.clearMarks(); - m_dfgCodeBlocks.deleteUnmarkedJettisonedCodeBlocks(); + ASSERT(m_operationInProgress == FullCollection || m_operationInProgress == NoOperation); + m_codeBlocks.clearMarksForFullCollection(); + m_codeBlocks.deleteUnmarkedAndUnreferenced(FullCollection); } -void Heap::deleteUnmarkedCompiledCode() +void Heap::deleteAllUnlinkedFunctionCode() { - ExecutableBase* next; - for (ExecutableBase* current = m_compiledCode.head(); current; current = next) { - next = current->next(); + for (ExecutableBase* current : m_compiledCode) { + if (!current->isFunctionExecutable()) + continue; + static_cast(current)->clearUnlinkedCodeForRecompilation(); + } +} + +void Heap::clearUnmarkedExecutables() +{ + GCPHASE(ClearUnmarkedExecutables); + for (unsigned i = m_compiledCode.size(); i--;) { + ExecutableBase* current = m_compiledCode[i]; if (isMarked(current)) continue; // We do this because executable memory is limited on some platforms and because // CodeBlock requires eager finalization. ExecutableBase::clearCodeVirtual(current); - m_compiledCode.remove(current); + std::swap(m_compiledCode[i], m_compiledCode.last()); + m_compiledCode.removeLast(); } +} - m_dfgCodeBlocks.deleteUnmarkedJettisonedCodeBlocks(); +void Heap::deleteUnmarkedCompiledCode() +{ + GCPHASE(DeleteCodeBlocks); + clearUnmarkedExecutables(); + m_codeBlocks.deleteUnmarkedAndUnreferenced(m_operationInProgress); m_jitStubRoutines.deleteUnmarkedJettisonedStubRoutines(); } -void Heap::collectAllGarbage() +void Heap::addToRememberedSet(const JSCell* cell) +{ + ASSERT(cell); + ASSERT(!Options::enableConcurrentJIT() || !isCompilationThread()); + if (isRemembered(cell)) + return; + const_cast(cell)->setRemembered(true); + m_slotVisitor.unconditionallyAppend(const_cast(cell)); +} + +void Heap::collectAndSweep(HeapOperation collectionType) { if (!m_isSafeToCollect) return; - collect(DoSweep); + collect(collectionType); + + SamplingRegion samplingRegion("Garbage Collection: Sweeping"); + + DeferGCForAWhile deferGC(*this); + m_objectSpace.sweep(); + m_objectSpace.shrink(); + + sweepAllLogicallyEmptyWeakBlocks(); } static double minute = 60.0; -void Heap::collect(SweepToggle sweepToggle) +NEVER_INLINE void Heap::collect(HeapOperation collectionType) { + void* stackTop; + ALLOCATE_AND_GET_REGISTER_STATE(registers); + + collectImpl(collectionType, wtfThreadData().stack().origin(), &stackTop, registers); + + sanitizeStackForVM(m_vm); +} + +NEVER_INLINE void Heap::collectImpl(HeapOperation collectionType, void* stackOrigin, void* stackTop, MachineThreads::RegisterState& calleeSavedRegisters) +{ +#if ENABLE(ALLOCATION_LOGGING) + dataLogF("JSC GC starting collection.\n"); +#endif + + double before = 0; + if (Options::logGC()) { + dataLog("[GC: "); + before = currentTimeMS(); + } + SamplingRegion samplingRegion("Garbage Collection"); - GCPHASE(Collect); - ASSERT(vm()->apiLock().currentThreadIsHoldingLock()); - RELEASE_ASSERT(vm()->identifierTable == wtfThreadData().currentIdentifierTable()); + if (vm()->typeProfiler()) { + DeferGCForAWhile awhile(*this); + vm()->typeProfilerLog()->processLogEntries(ASCIILiteral("GC")); + } + + RELEASE_ASSERT(!m_deferralDepth); + ASSERT(vm()->currentThreadIsHoldingAPILock()); + RELEASE_ASSERT(vm()->atomicStringTable() == wtfThreadData().atomicStringTable()); ASSERT(m_isSafeToCollect); JAVASCRIPTCORE_GC_BEGIN(); RELEASE_ASSERT(m_operationInProgress == NoOperation); - m_operationInProgress = Collection; -#if PLATFORM(IOS) - if (m_activityCallback) - m_activityCallback->willCollect(); -#else - m_activityCallback->willCollect(); -#endif // PLATFORM(IOS) + suspendCompilerThreads(); + willStartCollection(collectionType); + GCPHASE(Collect); - double lastGCStartTime = WTF::currentTime(); - if (lastGCStartTime - m_lastCodeDiscardTime > minute) { - deleteAllCompiledCode(); - m_lastCodeDiscardTime = WTF::currentTime(); - } + double gcStartTime = WTF::monotonicallyIncreasingTime(); + if (m_verifier) { + // Verify that live objects from the last GC cycle haven't been corrupted by + // mutators before we begin this new GC cycle. + m_verifier->verify(HeapVerifier::Phase::BeforeGC); - { - GCPHASE(Canonicalize); - m_objectSpace.canonicalizeCellLivenessData(); + m_verifier->initializeGCCycle(); + m_verifier->gatherLiveObjects(HeapVerifier::Phase::BeforeMarking); } - markRoots(); - - { - GCPHASE(ReapingWeakHandles); - m_objectSpace.reapWeakSets(); - } + deleteOldCode(gcStartTime); + flushOldStructureIDTables(); + stopAllocation(); + flushWriteBarrierBuffer(); - JAVASCRIPTCORE_GC_MARKED(); + markRoots(gcStartTime, stackOrigin, stackTop, calleeSavedRegisters); - { - m_blockSnapshot.resize(m_objectSpace.blocks().set().size()); - MarkedBlockSnapshotFunctor functor(m_blockSnapshot); - m_objectSpace.forEachBlock(functor); + if (m_verifier) { + m_verifier->gatherLiveObjects(HeapVerifier::Phase::AfterMarking); + m_verifier->verify(HeapVerifier::Phase::AfterMarking); } + JAVASCRIPTCORE_GC_MARKED(); + + if (vm()->typeProfiler()) + vm()->typeProfiler()->invalidateTypeSetCache(); + + reapWeakHandles(); + pruneStaleEntriesFromWeakGCMaps(); + sweepArrayBuffers(); + snapshotMarkedSpace(); copyBackingStores(); - { - GCPHASE(FinalizeUnconditionalFinalizers); - finalizeUnconditionalFinalizers(); + finalizeUnconditionalFinalizers(); + removeDeadCompilerWorklistEntries(); + deleteUnmarkedCompiledCode(); + deleteSourceProviderCaches(); + notifyIncrementalSweeper(); + rememberCurrentlyExecutingCodeBlocks(); + + resetAllocators(); + updateAllocationLimits(); + didFinishCollection(gcStartTime); + resumeCompilerThreads(); + + if (m_verifier) { + m_verifier->trimDeadObjects(); + m_verifier->verify(HeapVerifier::Phase::AfterGC); } - { - GCPHASE(finalizeSmallStrings); - m_vm->smallStrings.finalizeSmallStrings(); + if (Options::logGC()) { + double after = currentTimeMS(); + dataLog(after - before, " ms]\n"); } +} - { - GCPHASE(DeleteCodeBlocks); - deleteUnmarkedCompiledCode(); +void Heap::suspendCompilerThreads() +{ +#if ENABLE(DFG_JIT) + GCPHASE(SuspendCompilerThreads); + ASSERT(m_suspendedCompilerWorklists.isEmpty()); + for (unsigned i = DFG::numberOfWorklists(); i--;) { + if (DFG::Worklist* worklist = DFG::worklistForIndexOrNull(i)) { + m_suspendedCompilerWorklists.append(worklist); + worklist->suspendAllThreads(); + } } +#endif +} - { - GCPHASE(DeleteSourceProviderCaches); - m_vm->clearSourceProviderCaches(); +void Heap::willStartCollection(HeapOperation collectionType) +{ + GCPHASE(StartingCollection); + if (shouldDoFullCollection(collectionType)) { + m_operationInProgress = FullCollection; + m_slotVisitor.clearMarkStack(); + m_shouldDoFullCollection = false; + if (Options::logGC()) + dataLog("FullCollection, "); + } else { + m_operationInProgress = EdenCollection; + if (Options::logGC()) + dataLog("EdenCollection, "); + } + if (m_operationInProgress == FullCollection) { + m_sizeBeforeLastFullCollect = m_sizeAfterLastCollect + m_bytesAllocatedThisCycle; + m_extraMemorySize = 0; + m_deprecatedExtraMemorySize = 0; + + if (m_fullActivityCallback) + m_fullActivityCallback->willCollect(); + } else { + ASSERT(m_operationInProgress == EdenCollection); + m_sizeBeforeLastEdenCollect = m_sizeAfterLastCollect + m_bytesAllocatedThisCycle; } - if (sweepToggle == DoSweep) { - SamplingRegion samplingRegion("Garbage Collection: Sweeping"); - GCPHASE(Sweeping); - m_objectSpace.sweep(); - m_objectSpace.shrink(); + if (m_edenActivityCallback) + m_edenActivityCallback->willCollect(); +} + +void Heap::deleteOldCode(double gcStartTime) +{ + if (m_operationInProgress == EdenCollection) + return; + + GCPHASE(DeleteOldCode); + if (gcStartTime - m_lastCodeDiscardTime > minute) { + deleteAllCompiledCode(); + m_lastCodeDiscardTime = WTF::monotonicallyIncreasingTime(); + } +} + +void Heap::flushOldStructureIDTables() +{ + GCPHASE(FlushOldStructureIDTables); + m_structureIDTable.flushOldTables(); +} + +void Heap::flushWriteBarrierBuffer() +{ + GCPHASE(FlushWriteBarrierBuffer); + if (m_operationInProgress == EdenCollection) { + m_writeBarrierBuffer.flush(*this); + return; } + m_writeBarrierBuffer.reset(); +} + +void Heap::stopAllocation() +{ + GCPHASE(StopAllocation); + m_objectSpace.stopAllocating(); + if (m_operationInProgress == FullCollection) + m_storageSpace.didStartFullCollection(); +} + +void Heap::reapWeakHandles() +{ + GCPHASE(ReapingWeakHandles); + m_objectSpace.reapWeakSets(); +} + +void Heap::pruneStaleEntriesFromWeakGCMaps() +{ + GCPHASE(PruningStaleEntriesFromWeakGCMaps); + if (m_operationInProgress != FullCollection) + return; + for (auto& pruneCallback : m_weakGCMaps.values()) + pruneCallback(); +} - m_sweeper->startSweeping(m_blockSnapshot); - m_bytesAbandoned = 0; +void Heap::sweepArrayBuffers() +{ + GCPHASE(SweepingArrayBuffers); + m_arrayBuffers.sweep(); +} +struct MarkedBlockSnapshotFunctor : public MarkedBlock::VoidFunctor { + MarkedBlockSnapshotFunctor(Vector& blocks) + : m_index(0) + , m_blocks(blocks) { - GCPHASE(ResetAllocators); - m_objectSpace.resetAllocators(); } - - size_t currentHeapSize = size(); + + void operator()(MarkedBlock* block) { m_blocks[m_index++] = block; } + + size_t m_index; + Vector& m_blocks; +}; + +void Heap::snapshotMarkedSpace() +{ + GCPHASE(SnapshotMarkedSpace); + + if (m_operationInProgress == EdenCollection) { + m_blockSnapshot.appendVector(m_objectSpace.blocksWithNewObjects()); + // Sort and deduplicate the block snapshot since we might be appending to an unfinished work list. + std::sort(m_blockSnapshot.begin(), m_blockSnapshot.end()); + m_blockSnapshot.shrink(std::unique(m_blockSnapshot.begin(), m_blockSnapshot.end()) - m_blockSnapshot.begin()); + } else { + m_blockSnapshot.resizeToFit(m_objectSpace.blocks().set().size()); + MarkedBlockSnapshotFunctor functor(m_blockSnapshot); + m_objectSpace.forEachBlock(functor); + } +} + +void Heap::deleteSourceProviderCaches() +{ + GCPHASE(DeleteSourceProviderCaches); + m_vm->clearSourceProviderCaches(); +} + +void Heap::notifyIncrementalSweeper() +{ + GCPHASE(NotifyIncrementalSweeper); + + if (m_operationInProgress == FullCollection) { + if (!m_logicallyEmptyWeakBlocks.isEmpty()) + m_indexOfNextLogicallyEmptyWeakBlockToSweep = 0; + } + + m_sweeper->startSweeping(); +} + +void Heap::rememberCurrentlyExecutingCodeBlocks() +{ + GCPHASE(RememberCurrentlyExecutingCodeBlocks); + m_codeBlocks.rememberCurrentlyExecutingCodeBlocks(this); +} + +void Heap::resetAllocators() +{ + GCPHASE(ResetAllocators); + m_objectSpace.resetAllocators(); +} + +void Heap::updateAllocationLimits() +{ + GCPHASE(UpdateAllocationLimits); + size_t currentHeapSize = sizeAfterCollect(); if (Options::gcMaxHeapSize() && currentHeapSize > Options::gcMaxHeapSize()) HeapStatistics::exitWithFailure(); + if (m_operationInProgress == FullCollection) { + // To avoid pathological GC churn in very small and very large heaps, we set + // the new allocation limit based on the current size of the heap, with a + // fixed minimum. + m_maxHeapSize = max(minHeapSize(m_heapType, m_ramSize), proportionalHeapSize(currentHeapSize, m_ramSize)); + m_maxEdenSize = m_maxHeapSize - currentHeapSize; + m_sizeAfterLastFullCollect = currentHeapSize; + m_bytesAbandonedSinceLastFullCollect = 0; + } else { + ASSERT(currentHeapSize >= m_sizeAfterLastCollect); + m_maxEdenSize = m_maxHeapSize - currentHeapSize; + m_sizeAfterLastEdenCollect = currentHeapSize; + double edenToOldGenerationRatio = (double)m_maxEdenSize / (double)m_maxHeapSize; + double minEdenToOldGenerationRatio = 1.0 / 3.0; + if (edenToOldGenerationRatio < minEdenToOldGenerationRatio) + m_shouldDoFullCollection = true; + m_maxHeapSize += currentHeapSize - m_sizeAfterLastCollect; + m_maxEdenSize = m_maxHeapSize - currentHeapSize; + if (m_fullActivityCallback) { + ASSERT(currentHeapSize >= m_sizeAfterLastFullCollect); + m_fullActivityCallback->didAllocate(currentHeapSize - m_sizeAfterLastFullCollect); + } + } + m_sizeAfterLastCollect = currentHeapSize; + m_bytesAllocatedThisCycle = 0; - // To avoid pathological GC churn in very small and very large heaps, we set - // the new allocation limit based on the current size of the heap, with a - // fixed minimum. - size_t maxHeapSize = max(minHeapSize(m_heapType, m_ramSize), proportionalHeapSize(currentHeapSize, m_ramSize)); - m_bytesAllocatedLimit = maxHeapSize - currentHeapSize; + if (Options::logGC()) + dataLog(currentHeapSize / 1024, " kb, "); +} - m_bytesAllocated = 0; - double lastGCEndTime = WTF::currentTime(); - m_lastGCLength = lastGCEndTime - lastGCStartTime; +void Heap::didFinishCollection(double gcStartTime) +{ + GCPHASE(FinishingCollection); + double gcEndTime = WTF::monotonicallyIncreasingTime(); + if (m_operationInProgress == FullCollection) + m_lastFullGCLength = gcEndTime - gcStartTime; + else + m_lastEdenGCLength = gcEndTime - gcStartTime; if (Options::recordGCPauseTimes()) - HeapStatistics::recordGCPauseTime(lastGCStartTime, lastGCEndTime); - RELEASE_ASSERT(m_operationInProgress == Collection); - - m_operationInProgress = NoOperation; - JAVASCRIPTCORE_GC_END(); + HeapStatistics::recordGCPauseTime(gcStartTime, gcEndTime); if (Options::useZombieMode()) zombifyDeadObjects(); @@ -817,29 +1310,55 @@ void Heap::collect(SweepToggle sweepToggle) if (Options::showObjectStatistics()) HeapStatistics::showObjectStatistics(this); + + if (Options::logGC() == GCLogging::Verbose) + GCLogging::dumpObjectGraph(this); + + RELEASE_ASSERT(m_operationInProgress == EdenCollection || m_operationInProgress == FullCollection); + m_operationInProgress = NoOperation; + JAVASCRIPTCORE_GC_END(); +} + +void Heap::resumeCompilerThreads() +{ +#if ENABLE(DFG_JIT) + GCPHASE(ResumeCompilerThreads); + for (auto worklist : m_suspendedCompilerWorklists) + worklist->resumeAllThreads(); + m_suspendedCompilerWorklists.clear(); +#endif } void Heap::markDeadObjects() { - m_objectSpace.forEachDeadCell(); + HeapIterationScope iterationScope(*this); + m_objectSpace.forEachDeadCell(iterationScope); +} + +void Heap::setFullActivityCallback(PassRefPtr activityCallback) +{ + m_fullActivityCallback = activityCallback; +} + +void Heap::setEdenActivityCallback(PassRefPtr activityCallback) +{ + m_edenActivityCallback = activityCallback; } -void Heap::setActivityCallback(PassOwnPtr activityCallback) +GCActivityCallback* Heap::fullActivityCallback() { - m_activityCallback = activityCallback; + return m_fullActivityCallback.get(); } -GCActivityCallback* Heap::activityCallback() +GCActivityCallback* Heap::edenActivityCallback() { - return m_activityCallback.get(); + return m_edenActivityCallback.get(); } -#if PLATFORM(IOS) -void Heap::setIncrementalSweeper(PassOwnPtr sweeper) +void Heap::setIncrementalSweeper(std::unique_ptr sweeper) { - m_sweeper = sweeper; + m_sweeper = WTF::move(sweeper); } -#endif // PLATFORM(IOS) IncrementalSweeper* Heap::sweeper() { @@ -848,23 +1367,17 @@ IncrementalSweeper* Heap::sweeper() void Heap::setGarbageCollectionTimerEnabled(bool enable) { -#if PLATFORM(IOS) - if (m_activityCallback) - m_activityCallback->setEnabled(enable); -#else - activityCallback()->setEnabled(enable); -#endif // PLATFORM(IOS) + if (m_fullActivityCallback) + m_fullActivityCallback->setEnabled(enable); + if (m_edenActivityCallback) + m_edenActivityCallback->setEnabled(enable); } void Heap::didAllocate(size_t bytes) { -#if PLATFORM(IOS) - if (m_activityCallback) - m_activityCallback->didAllocate(m_bytesAllocated + m_bytesAbandoned); -#else - m_activityCallback->didAllocate(m_bytesAllocated + m_bytesAbandoned); -#endif // PLATFORM(IOS) - m_bytesAllocated += bytes; + if (m_edenActivityCallback) + m_edenActivityCallback->didAllocate(m_bytesAllocatedThisCycle + m_bytesAbandonedSinceLastFullCollect); + m_bytesAllocatedThisCycle += bytes; } bool Heap::isValidAllocation(size_t) @@ -896,9 +1409,26 @@ void Heap::addCompiledCode(ExecutableBase* executable) m_compiledCode.append(executable); } +void Heap::collectAllGarbageIfNotDoneRecently() +{ + if (!m_fullActivityCallback) { + collectAllGarbage(); + return; + } + + if (m_fullActivityCallback->didSyncGCRecently()) { + // A synchronous GC was already requested recently so we merely accelerate next collection. + reportAbandonedObjectGraph(); + return; + } + + m_fullActivityCallback->setDidSyncGCRecently(); + collectAllGarbage(); +} + class Zombify : public MarkedBlock::VoidFunctor { public: - void operator()(JSCell* cell) + inline void visit(JSCell* cell) { void** current = reinterpret_cast(cell); @@ -909,15 +1439,96 @@ public: void* limit = static_cast(reinterpret_cast(cell) + MarkedBlock::blockFor(cell)->cellSize()); for (; current < limit; current++) - *current = reinterpret_cast(0xbbadbeef); + *current = zombifiedBits; + } + IterationStatus operator()(JSCell* cell) + { + visit(cell); + return IterationStatus::Continue; } }; void Heap::zombifyDeadObjects() { // Sweep now because destructors will crash once we're zombified. - m_objectSpace.sweep(); - m_objectSpace.forEachDeadCell(); + { + SamplingRegion samplingRegion("Garbage Collection: Sweeping"); + m_objectSpace.zombifySweep(); + } + HeapIterationScope iterationScope(*this); + m_objectSpace.forEachDeadCell(iterationScope); +} + +void Heap::flushWriteBarrierBuffer(JSCell* cell) +{ +#if ENABLE(GGC) + m_writeBarrierBuffer.flush(*this); + m_writeBarrierBuffer.add(cell); +#else + UNUSED_PARAM(cell); +#endif +} + +bool Heap::shouldDoFullCollection(HeapOperation requestedCollectionType) const +{ +#if ENABLE(GGC) + if (Options::alwaysDoFullCollection()) + return true; + + switch (requestedCollectionType) { + case EdenCollection: + return false; + case FullCollection: + return true; + case AnyCollection: + return m_shouldDoFullCollection; + default: + RELEASE_ASSERT_NOT_REACHED(); + return false; + } + RELEASE_ASSERT_NOT_REACHED(); + return false; +#else + UNUSED_PARAM(requestedCollectionType); + return true; +#endif +} + +void Heap::addLogicallyEmptyWeakBlock(WeakBlock* block) +{ + m_logicallyEmptyWeakBlocks.append(block); +} + +void Heap::sweepAllLogicallyEmptyWeakBlocks() +{ + if (m_logicallyEmptyWeakBlocks.isEmpty()) + return; + + m_indexOfNextLogicallyEmptyWeakBlockToSweep = 0; + while (sweepNextLogicallyEmptyWeakBlock()) { } +} + +bool Heap::sweepNextLogicallyEmptyWeakBlock() +{ + if (m_indexOfNextLogicallyEmptyWeakBlockToSweep == WTF::notFound) + return false; + + WeakBlock* block = m_logicallyEmptyWeakBlocks[m_indexOfNextLogicallyEmptyWeakBlockToSweep]; + + block->sweep(); + if (block->isEmpty()) { + std::swap(m_logicallyEmptyWeakBlocks[m_indexOfNextLogicallyEmptyWeakBlockToSweep], m_logicallyEmptyWeakBlocks.last()); + m_logicallyEmptyWeakBlocks.removeLast(); + WeakBlock::destroy(block); + } else + m_indexOfNextLogicallyEmptyWeakBlockToSweep++; + + if (m_indexOfNextLogicallyEmptyWeakBlockToSweep >= m_logicallyEmptyWeakBlocks.size()) { + m_indexOfNextLogicallyEmptyWeakBlockToSweep = WTF::notFound; + return false; + } + + return true; } } // namespace JSC