X-Git-Url: https://git.saurik.com/apple/javascriptcore.git/blobdiff_plain/40a37d088818fc2fbeba2ef850dbcaaf294befbf..ed1e77d3adeb83d26fd1dfb16dd84cabdcefd250:/heap/Heap.cpp diff --git a/heap/Heap.cpp b/heap/Heap.cpp index 447383b..b1f6625 100644 --- a/heap/Heap.cpp +++ b/heap/Heap.cpp @@ -27,7 +27,6 @@ #include "CopiedSpaceInlines.h" #include "CopyVisitorInlines.h" #include "DFGWorklist.h" -#include "DelayedReleaseScope.h" #include "EdenGCActivityCallback.h" #include "FullGCActivityCallback.h" #include "GCActivityCallback.h" @@ -35,6 +34,7 @@ #include "HeapIterationScope.h" #include "HeapRootVisitor.h" #include "HeapStatistics.h" +#include "HeapVerifier.h" #include "IncrementalSweeper.h" #include "Interpreter.h" #include "JSGlobalObject.h" @@ -44,6 +44,7 @@ #include "JSVirtualMachineInternal.h" #include "RecursiveAllocationScope.h" #include "Tracing.h" +#include "TypeProfilerLog.h" #include "UnlinkedCodeBlock.h" #include "VM.h" #include "WeakSetInlines.h" @@ -79,115 +80,125 @@ static type name arguments; struct GCTimer { GCTimer(const char* name) - : m_name(name) + : name(name) { } ~GCTimer() { - logData(m_allCollectionData, "(All)"); - logData(m_edenCollectionData, "(Eden)"); - logData(m_fullCollectionData, "(Full)"); + logData(allCollectionData, "(All)"); + logData(edenCollectionData, "(Eden)"); + logData(fullCollectionData, "(Full)"); } struct TimeRecord { TimeRecord() - : m_time(0) - , m_min(std::numeric_limits::infinity()) - , m_max(0) - , m_count(0) + : time(0) + , min(std::numeric_limits::infinity()) + , max(0) + , count(0) { } - double m_time; - double m_min; - double m_max; - size_t m_count; + double time; + double min; + double max; + size_t count; }; void logData(const TimeRecord& data, const char* extra) { - dataLogF("[%d] %s %s: %.2lfms (avg. %.2lf, min. %.2lf, max. %.2lf, count %lu)\n", + dataLogF("[%d] %s (Parent: %s) %s: %.2lfms (avg. %.2lf, min. %.2lf, max. %.2lf, count %lu)\n", getCurrentProcessID(), - m_name, extra, - data.m_time * 1000, - data.m_time * 1000 / data.m_count, - data.m_min * 1000, - data.m_max * 1000, - data.m_count); + name, + parent ? parent->name : "nullptr", + extra, + data.time * 1000, + data.time * 1000 / data.count, + data.min * 1000, + data.max * 1000, + data.count); } void updateData(TimeRecord& data, double duration) { - if (duration < data.m_min) - data.m_min = duration; - if (duration > data.m_max) - data.m_max = duration; - data.m_count++; - data.m_time += duration; + if (duration < data.min) + data.min = duration; + if (duration > data.max) + data.max = duration; + data.count++; + data.time += duration; } void didFinishPhase(HeapOperation collectionType, double duration) { - TimeRecord& data = collectionType == EdenCollection ? m_edenCollectionData : m_fullCollectionData; + TimeRecord& data = collectionType == EdenCollection ? edenCollectionData : fullCollectionData; updateData(data, duration); - updateData(m_allCollectionData, duration); + updateData(allCollectionData, duration); } - TimeRecord m_allCollectionData; - TimeRecord m_fullCollectionData; - TimeRecord m_edenCollectionData; - const char* m_name; + static GCTimer* s_currentGlobalTimer; + + TimeRecord allCollectionData; + TimeRecord fullCollectionData; + TimeRecord edenCollectionData; + const char* name; + GCTimer* parent { nullptr }; }; +GCTimer* GCTimer::s_currentGlobalTimer = nullptr; + struct GCTimerScope { - GCTimerScope(GCTimer* timer, HeapOperation collectionType) - : m_timer(timer) - , m_start(WTF::monotonicallyIncreasingTime()) - , m_collectionType(collectionType) + GCTimerScope(GCTimer& timer, HeapOperation collectionType) + : timer(timer) + , start(WTF::monotonicallyIncreasingTime()) + , collectionType(collectionType) { + timer.parent = GCTimer::s_currentGlobalTimer; + GCTimer::s_currentGlobalTimer = &timer; } ~GCTimerScope() { - double delta = WTF::monotonicallyIncreasingTime() - m_start; - m_timer->didFinishPhase(m_collectionType, delta); + double delta = WTF::monotonicallyIncreasingTime() - start; + timer.didFinishPhase(collectionType, delta); + GCTimer::s_currentGlobalTimer = timer.parent; } - GCTimer* m_timer; - double m_start; - HeapOperation m_collectionType; + GCTimer& timer; + double start; + HeapOperation collectionType; }; struct GCCounter { GCCounter(const char* name) - : m_name(name) - , m_count(0) - , m_total(0) - , m_min(10000000) - , m_max(0) + : name(name) + , count(0) + , total(0) + , min(10000000) + , max(0) { } - void count(size_t amount) + void add(size_t amount) { - m_count++; - m_total += amount; - if (amount < m_min) - m_min = amount; - if (amount > m_max) - m_max = amount; + count++; + total += amount; + if (amount < min) + min = amount; + if (amount > max) + max = amount; } ~GCCounter() { - dataLogF("[%d] %s: %zu values (avg. %zu, min. %zu, max. %zu)\n", getCurrentProcessID(), m_name, m_total, m_total / m_count, m_min, m_max); + dataLogF("[%d] %s: %zu values (avg. %zu, min. %zu, max. %zu)\n", getCurrentProcessID(), name, total, total / count, min, max); } - const char* m_name; - size_t m_count; - size_t m_total; - size_t m_min; - size_t m_max; + const char* name; + size_t count; + size_t total; + size_t min; + size_t max; }; -#define GCPHASE(name) DEFINE_GC_LOGGING_GLOBAL(GCTimer, name##Timer, (#name)); GCTimerScope name##TimerScope(&name##Timer, m_operationInProgress) -#define GCCOUNTER(name, value) do { DEFINE_GC_LOGGING_GLOBAL(GCCounter, name##Counter, (#name)); name##Counter.count(value); } while (false) +#define GCPHASE(name) DEFINE_GC_LOGGING_GLOBAL(GCTimer, name##Timer, (#name)); GCTimerScope name##TimerScope(name##Timer, m_operationInProgress) +#define GCCOUNTER(name, value) do { DEFINE_GC_LOGGING_GLOBAL(GCCounter, name##Counter, (#name)); name##Counter.add(value); } while (false) #else @@ -229,12 +240,17 @@ static inline bool isValidThreadState(VM* vm) } struct MarkObject : public MarkedBlock::VoidFunctor { - void operator()(JSCell* cell) + inline void visit(JSCell* cell) { if (cell->isZapped()) return; Heap::heap(cell)->setMarked(cell); } + IterationStatus operator()(JSCell* cell) + { + visit(cell); + return IterationStatus::Continue; + } }; struct Count : public MarkedBlock::CountFunctor { @@ -242,30 +258,36 @@ struct Count : public MarkedBlock::CountFunctor { }; struct CountIfGlobalObject : MarkedBlock::CountFunctor { - void operator()(JSCell* cell) { + inline void visit(JSCell* cell) + { if (!cell->isObject()) return; if (!asObject(cell)->isGlobalObject()) return; count(1); } + IterationStatus operator()(JSCell* cell) + { + visit(cell); + return IterationStatus::Continue; + } }; class RecordType { public: - typedef PassOwnPtr ReturnType; + typedef std::unique_ptr ReturnType; RecordType(); - void operator()(JSCell*); + IterationStatus operator()(JSCell*); ReturnType returnValue(); private: const char* typeName(JSCell*); - OwnPtr m_typeCountSet; + std::unique_ptr m_typeCountSet; }; inline RecordType::RecordType() - : m_typeCountSet(adoptPtr(new TypeCountSet)) + : m_typeCountSet(std::make_unique()) { } @@ -277,21 +299,22 @@ inline const char* RecordType::typeName(JSCell* cell) return info->className; } -inline void RecordType::operator()(JSCell* cell) +inline IterationStatus RecordType::operator()(JSCell* cell) { m_typeCountSet->add(typeName(cell)); + return IterationStatus::Continue; } -inline PassOwnPtr RecordType::returnValue() +inline std::unique_ptr RecordType::returnValue() { - return m_typeCountSet.release(); + return WTF::move(m_typeCountSet); } } // anonymous namespace Heap::Heap(VM* vm, HeapType heapType) : m_heapType(heapType) - , m_ramSize(ramSize()) + , m_ramSize(Options::forceRAMSize() ? Options::forceRAMSize() : ramSize()) , m_minBytesPerCycle(minHeapSize(m_heapType, m_ramSize)) , m_sizeAfterLastCollect(0) , m_sizeAfterLastFullCollect(0) @@ -306,16 +329,15 @@ Heap::Heap(VM* vm, HeapType heapType) , m_totalBytesVisited(0) , m_totalBytesCopied(0) , m_operationInProgress(NoOperation) - , m_blockAllocator() , m_objectSpace(this) , m_storageSpace(this) - , m_extraMemoryUsage(0) + , m_extraMemorySize(0) + , m_deprecatedExtraMemorySize(0) , m_machineThreads(this) , m_sharedData(vm) , m_slotVisitor(m_sharedData) , m_copyVisitor(m_sharedData) , m_handleSet(vm) - , m_codeBlocks(m_blockAllocator) , m_isSafeToCollect(false) , m_writeBarrierBuffer(256) , m_vm(vm) @@ -330,14 +352,25 @@ Heap::Heap(VM* vm, HeapType heapType) #else , m_edenActivityCallback(m_fullActivityCallback) #endif - , m_sweeper(IncrementalSweeper::create(this)) +#if USE(CF) + , m_sweeper(std::make_unique(this, CFRunLoopGetCurrent())) +#else + , m_sweeper(std::make_unique(this->vm())) +#endif , m_deferralDepth(0) +#if USE(CF) + , m_delayedReleaseRecursionCount(0) +#endif { m_storageSpace.init(); + if (Options::verifyHeap()) + m_verifier = std::make_unique(this, Options::numberOfGCCyclesToRecordForVerification()); } Heap::~Heap() { + for (WeakBlock* block : m_logicallyEmptyWeakBlocks) + WeakBlock::destroy(block); } bool Heap::isPagedOut(double deadline) @@ -353,25 +386,51 @@ void Heap::lastChanceToFinalize() RELEASE_ASSERT(m_operationInProgress == NoOperation); m_objectSpace.lastChanceToFinalize(); + releaseDelayedReleasedObjects(); + + sweepAllLogicallyEmptyWeakBlocks(); } -void Heap::reportExtraMemoryCostSlowCase(size_t cost) +void Heap::releaseDelayedReleasedObjects() { - // Our frequency of garbage collection tries to balance memory use against speed - // by collecting based on the number of newly created values. However, for values - // that hold on to a great deal of memory that's not in the form of other JS values, - // that is not good enough - in some cases a lot of those objects can pile up and - // use crazy amounts of memory without a GC happening. So we track these extra - // memory costs. Only unusually large objects are noted, and we only keep track - // of this extra cost until the next GC. In garbage collected languages, most values - // are either very short lived temporaries, or have extremely long lifetimes. So - // if a large value survives one garbage collection, there is not much point to - // collecting more frequently as long as it stays alive. +#if USE(CF) + // We need to guard against the case that releasing an object can create more objects due to the + // release calling into JS. When those JS call(s) exit and all locks are being dropped we end up + // back here and could try to recursively release objects. We guard that with a recursive entry + // count. Only the initial call will release objects, recursive calls simple return and let the + // the initial call to the function take care of any objects created during release time. + // This also means that we need to loop until there are no objects in m_delayedReleaseObjects + // and use a temp Vector for the actual releasing. + if (!m_delayedReleaseRecursionCount++) { + while (!m_delayedReleaseObjects.isEmpty()) { + ASSERT(m_vm->currentThreadIsHoldingAPILock()); + + Vector> objectsToRelease = WTF::move(m_delayedReleaseObjects); + + { + // We need to drop locks before calling out to arbitrary code. + JSLock::DropAllLocks dropAllLocks(m_vm); + + objectsToRelease.clear(); + } + } + } + m_delayedReleaseRecursionCount--; +#endif +} - didAllocate(cost); +void Heap::reportExtraMemoryAllocatedSlowCase(size_t size) +{ + didAllocate(size); collectIfNecessaryOrDefer(); } +void Heap::deprecatedReportExtraMemorySlowCase(size_t size) +{ + m_deprecatedExtraMemorySize += size; + reportExtraMemoryAllocatedSlowCase(size); +} + void Heap::reportAbandonedObjectGraph() { // Our clients don't know exactly how much memory they @@ -424,17 +483,6 @@ void Heap::addReference(JSCell* cell, ArrayBuffer* buffer) } } -void Heap::pushTempSortVector(Vector* tempVector) -{ - m_tempSortingVectors.append(tempVector); -} - -void Heap::popTempSortVector(Vector* tempVector) -{ - ASSERT_UNUSED(tempVector, tempVector == m_tempSortingVectors.last()); - m_tempSortingVectors.removeLast(); -} - void Heap::harvestWeakReferences() { m_slotVisitor.harvestWeakReferences(); @@ -475,7 +523,7 @@ void Heap::getConservativeRegisterRoots(HashSet& roots) } } -void Heap::markRoots(double gcStartTime) +void Heap::markRoots(double gcStartTime, void* stackOrigin, void* stackTop, MachineThreads::RegisterState& calleeSavedRegisters) { SamplingRegion samplingRegion("Garbage Collection: Marking"); @@ -496,15 +544,11 @@ void Heap::markRoots(double gcStartTime) // We gather conservative roots before clearing mark bits because conservative // gathering uses the mark bits to determine whether a reference is valid. - void* dummy; - ALLOCATE_AND_GET_REGISTER_STATE(registers); ConservativeRoots conservativeRoots(&m_objectSpace.blocks(), &m_storageSpace); - gatherStackRoots(conservativeRoots, &dummy, registers); + gatherStackRoots(conservativeRoots, stackOrigin, stackTop, calleeSavedRegisters); gatherJSStackRoots(conservativeRoots); gatherScratchBufferRoots(conservativeRoots); - sanitizeStackForVM(m_vm); - clearLivenessData(); m_sharedData.didStartMarking(); @@ -518,7 +562,6 @@ void Heap::markRoots(double gcStartTime) visitSmallStrings(); visitConservativeRoots(conservativeRoots); visitProtectedObjects(heapRootVisitor); - visitTempSortVectors(heapRootVisitor); visitArgumentBuffers(heapRootVisitor); visitException(heapRootVisitor); visitStrongHandles(heapRootVisitor); @@ -539,6 +582,7 @@ void Heap::markRoots(double gcStartTime) void Heap::copyBackingStores() { + GCPHASE(CopyBackingStores); if (m_operationInProgress == EdenCollection) m_storageSpace.startedCopying(); else { @@ -559,11 +603,11 @@ void Heap::copyBackingStores() m_storageSpace.doneCopying(); } -void Heap::gatherStackRoots(ConservativeRoots& roots, void** dummy, MachineThreads::RegisterState& registers) +void Heap::gatherStackRoots(ConservativeRoots& roots, void* stackOrigin, void* stackTop, MachineThreads::RegisterState& calleeSavedRegisters) { GCPHASE(GatherStackRoots); m_jitStubRoutines.clearMarks(); - m_machineThreads.gatherConservativeRoots(roots, m_jitStubRoutines, m_codeBlocks, dummy, registers); + m_machineThreads.gatherConservativeRoots(roots, m_jitStubRoutines, m_codeBlocks, stackOrigin, stackTop, calleeSavedRegisters); } void Heap::gatherJSStackRoots(ConservativeRoots& roots) @@ -603,11 +647,12 @@ void Heap::visitExternalRememberedSet() void Heap::visitSmallStrings() { GCPHASE(VisitSmallStrings); - m_vm->smallStrings.visitStrongReferences(m_slotVisitor); + if (!m_vm->smallStrings.needsToBeVisited(m_operationInProgress)) + return; + m_vm->smallStrings.visitStrongReferences(m_slotVisitor); if (Options::logGC() == GCLogging::Verbose) dataLog("Small strings:\n", m_slotVisitor); - m_slotVisitor.donateAndDrain(); } @@ -655,24 +700,6 @@ void Heap::visitProtectedObjects(HeapRootVisitor& heapRootVisitor) m_slotVisitor.donateAndDrain(); } -void Heap::visitTempSortVectors(HeapRootVisitor& heapRootVisitor) -{ - GCPHASE(VisitTempSortVectors); - typedef Vector*> VectorOfValueStringVectors; - - for (auto* vector : m_tempSortingVectors) { - for (auto& valueStringPair : *vector) { - if (valueStringPair.first) - heapRootVisitor.visit(&valueStringPair.first); - } - } - - if (Options::logGC() == GCLogging::Verbose) - dataLog("Temp Sort Vectors:\n", m_slotVisitor); - - m_slotVisitor.donateAndDrain(); -} - void Heap::visitArgumentBuffers(HeapRootVisitor& visitor) { GCPHASE(MarkingArgumentBuffers); @@ -690,10 +717,11 @@ void Heap::visitArgumentBuffers(HeapRootVisitor& visitor) void Heap::visitException(HeapRootVisitor& visitor) { GCPHASE(MarkingException); - if (!m_vm->exception()) + if (!m_vm->exception() && !m_vm->lastException()) return; visitor.visit(m_vm->addressOfException()); + visitor.visit(m_vm->addressOfLastException()); if (Options::logGC() == GCLogging::Verbose) dataLog("Exceptions:\n", m_slotVisitor); @@ -771,10 +799,8 @@ void Heap::clearRememberedSet(Vector& rememberedSet) { #if ENABLE(GGC) GCPHASE(ClearRememberedSet); - for (auto* cell : rememberedSet) { - MarkedBlock::blockFor(cell)->clearRemembered(cell); + for (auto* cell : rememberedSet) const_cast(cell)->setRemembered(false); - } #else UNUSED_PARAM(rememberedSet); #endif @@ -791,15 +817,18 @@ void Heap::updateObjectCounts(double gcStartTime) #endif dataLogF("\nNumber of live Objects after GC %lu, took %.6f secs\n", static_cast(visitCount), WTF::monotonicallyIncreasingTime() - gcStartTime); } - - if (m_operationInProgress == EdenCollection) { - m_totalBytesVisited += m_slotVisitor.bytesVisited(); - m_totalBytesCopied += m_slotVisitor.bytesCopied(); - } else { - ASSERT(m_operationInProgress == FullCollection); - m_totalBytesVisited = m_slotVisitor.bytesVisited(); - m_totalBytesCopied = m_slotVisitor.bytesCopied(); - } + + size_t bytesRemovedFromOldSpaceDueToReallocation = + m_storageSpace.takeBytesRemovedFromOldSpaceDueToReallocation(); + + if (m_operationInProgress == FullCollection) { + m_totalBytesVisited = 0; + m_totalBytesCopied = 0; + } else + m_totalBytesCopied -= bytesRemovedFromOldSpaceDueToReallocation; + + m_totalBytesVisited += m_slotVisitor.bytesVisited(); + m_totalBytesCopied += m_slotVisitor.bytesCopied(); #if ENABLE(PARALLEL_GC) m_totalBytesVisited += m_sharedData.childBytesVisited(); m_totalBytesCopied += m_sharedData.childBytesCopied(); @@ -820,19 +849,19 @@ size_t Heap::objectCount() return m_objectSpace.objectCount(); } -size_t Heap::extraSize() +size_t Heap::extraMemorySize() { - return m_extraMemoryUsage + m_arrayBuffers.size(); + return m_extraMemorySize + m_deprecatedExtraMemorySize + m_arrayBuffers.size(); } size_t Heap::size() { - return m_objectSpace.size() + m_storageSpace.size() + extraSize(); + return m_objectSpace.size() + m_storageSpace.size() + extraMemorySize(); } size_t Heap::capacity() { - return m_objectSpace.capacity() + m_storageSpace.capacity() + extraSize(); + return m_objectSpace.capacity() + m_storageSpace.capacity() + extraMemorySize(); } size_t Heap::sizeAfterCollect() @@ -842,7 +871,7 @@ size_t Heap::sizeAfterCollect() // rather than all used (including dead) copied bytes, thus it's // always the case that m_totalBytesCopied <= m_storageSpace.size(). ASSERT(m_totalBytesCopied <= m_storageSpace.size()); - return m_totalBytesVisited + m_totalBytesCopied + extraSize(); + return m_totalBytesVisited + m_totalBytesCopied + extraMemorySize(); } size_t Heap::protectedGlobalObjectCount() @@ -861,12 +890,12 @@ size_t Heap::protectedObjectCount() return forEachProtectedCell(); } -PassOwnPtr Heap::protectedObjectTypeCounts() +std::unique_ptr Heap::protectedObjectTypeCounts() { return forEachProtectedCell(); } -PassOwnPtr Heap::objectTypeCounts() +std::unique_ptr Heap::objectTypeCounts() { HeapIterationScope iterationScope(*this); return m_objectSpace.forEachLiveCell(iterationScope); @@ -895,10 +924,10 @@ void Heap::deleteAllCompiledCode() } #endif // ENABLE(DFG_JIT) - for (ExecutableBase* current = m_compiledCode.head(); current; current = current->next()) { + for (ExecutableBase* current : m_compiledCode) { if (!current->isFunctionExecutable()) continue; - static_cast(current)->clearCodeIfNotCompiling(); + static_cast(current)->clearCode(); } ASSERT(m_operationInProgress == FullCollection || m_operationInProgress == NoOperation); @@ -908,26 +937,26 @@ void Heap::deleteAllCompiledCode() void Heap::deleteAllUnlinkedFunctionCode() { - for (ExecutableBase* current = m_compiledCode.head(); current; current = current->next()) { + for (ExecutableBase* current : m_compiledCode) { if (!current->isFunctionExecutable()) continue; - static_cast(current)->clearUnlinkedCodeForRecompilationIfNotCompiling(); + static_cast(current)->clearUnlinkedCodeForRecompilation(); } } void Heap::clearUnmarkedExecutables() { GCPHASE(ClearUnmarkedExecutables); - ExecutableBase* next; - for (ExecutableBase* current = m_compiledCode.head(); current; current = next) { - next = current->next(); + for (unsigned i = m_compiledCode.size(); i--;) { + ExecutableBase* current = m_compiledCode[i]; if (isMarked(current)) continue; // We do this because executable memory is limited on some platforms and because // CodeBlock requires eager finalization. ExecutableBase::clearCodeVirtual(current); - m_compiledCode.remove(current); + std::swap(m_compiledCode[i], m_compiledCode.last()); + m_compiledCode.removeLast(); } } @@ -945,27 +974,39 @@ void Heap::addToRememberedSet(const JSCell* cell) ASSERT(!Options::enableConcurrentJIT() || !isCompilationThread()); if (isRemembered(cell)) return; - MarkedBlock::blockFor(cell)->setRemembered(cell); const_cast(cell)->setRemembered(true); m_slotVisitor.unconditionallyAppend(const_cast(cell)); } -void Heap::collectAllGarbage() +void Heap::collectAndSweep(HeapOperation collectionType) { if (!m_isSafeToCollect) return; - collect(FullCollection); + collect(collectionType); SamplingRegion samplingRegion("Garbage Collection: Sweeping"); - DelayedReleaseScope delayedReleaseScope(m_objectSpace); + + DeferGCForAWhile deferGC(*this); m_objectSpace.sweep(); m_objectSpace.shrink(); + + sweepAllLogicallyEmptyWeakBlocks(); } static double minute = 60.0; -void Heap::collect(HeapOperation collectionType) +NEVER_INLINE void Heap::collect(HeapOperation collectionType) +{ + void* stackTop; + ALLOCATE_AND_GET_REGISTER_STATE(registers); + + collectImpl(collectionType, wtfThreadData().stack().origin(), &stackTop, registers); + + sanitizeStackForVM(m_vm); +} + +NEVER_INLINE void Heap::collectImpl(HeapOperation collectionType, void* stackOrigin, void* stackTop, MachineThreads::RegisterState& calleeSavedRegisters) { #if ENABLE(ALLOCATION_LOGGING) dataLogF("JSC GC starting collection.\n"); @@ -979,6 +1020,11 @@ void Heap::collect(HeapOperation collectionType) SamplingRegion samplingRegion("Garbage Collection"); + if (vm()->typeProfiler()) { + DeferGCForAWhile awhile(*this); + vm()->typeProfilerLog()->processLogEntries(ASCIILiteral("GC")); + } + RELEASE_ASSERT(!m_deferralDepth); ASSERT(vm()->currentThreadIsHoldingAPILock()); RELEASE_ASSERT(vm()->atomicStringTable() == wtfThreadData().atomicStringTable()); @@ -991,17 +1037,33 @@ void Heap::collect(HeapOperation collectionType) GCPHASE(Collect); double gcStartTime = WTF::monotonicallyIncreasingTime(); + if (m_verifier) { + // Verify that live objects from the last GC cycle haven't been corrupted by + // mutators before we begin this new GC cycle. + m_verifier->verify(HeapVerifier::Phase::BeforeGC); + + m_verifier->initializeGCCycle(); + m_verifier->gatherLiveObjects(HeapVerifier::Phase::BeforeMarking); + } deleteOldCode(gcStartTime); flushOldStructureIDTables(); stopAllocation(); flushWriteBarrierBuffer(); - markRoots(gcStartTime); + markRoots(gcStartTime, stackOrigin, stackTop, calleeSavedRegisters); + if (m_verifier) { + m_verifier->gatherLiveObjects(HeapVerifier::Phase::AfterMarking); + m_verifier->verify(HeapVerifier::Phase::AfterMarking); + } JAVASCRIPTCORE_GC_MARKED(); + if (vm()->typeProfiler()) + vm()->typeProfiler()->invalidateTypeSetCache(); + reapWeakHandles(); + pruneStaleEntriesFromWeakGCMaps(); sweepArrayBuffers(); snapshotMarkedSpace(); @@ -1019,6 +1081,11 @@ void Heap::collect(HeapOperation collectionType) didFinishCollection(gcStartTime); resumeCompilerThreads(); + if (m_verifier) { + m_verifier->trimDeadObjects(); + m_verifier->verify(HeapVerifier::Phase::AfterGC); + } + if (Options::logGC()) { double after = currentTimeMS(); dataLog(after - before, " ms]\n"); @@ -1055,7 +1122,8 @@ void Heap::willStartCollection(HeapOperation collectionType) } if (m_operationInProgress == FullCollection) { m_sizeBeforeLastFullCollect = m_sizeAfterLastCollect + m_bytesAllocatedThisCycle; - m_extraMemoryUsage = 0; + m_extraMemorySize = 0; + m_deprecatedExtraMemorySize = 0; if (m_fullActivityCallback) m_fullActivityCallback->willCollect(); @@ -1110,6 +1178,15 @@ void Heap::reapWeakHandles() m_objectSpace.reapWeakSets(); } +void Heap::pruneStaleEntriesFromWeakGCMaps() +{ + GCPHASE(PruningStaleEntriesFromWeakGCMaps); + if (m_operationInProgress != FullCollection) + return; + for (auto& pruneCallback : m_weakGCMaps.values()) + pruneCallback(); +} + void Heap::sweepArrayBuffers() { GCPHASE(SweepingArrayBuffers); @@ -1132,12 +1209,17 @@ struct MarkedBlockSnapshotFunctor : public MarkedBlock::VoidFunctor { void Heap::snapshotMarkedSpace() { GCPHASE(SnapshotMarkedSpace); - if (m_operationInProgress != FullCollection) - return; - m_blockSnapshot.resize(m_objectSpace.blocks().set().size()); - MarkedBlockSnapshotFunctor functor(m_blockSnapshot); - m_objectSpace.forEachBlock(functor); + if (m_operationInProgress == EdenCollection) { + m_blockSnapshot.appendVector(m_objectSpace.blocksWithNewObjects()); + // Sort and deduplicate the block snapshot since we might be appending to an unfinished work list. + std::sort(m_blockSnapshot.begin(), m_blockSnapshot.end()); + m_blockSnapshot.shrink(std::unique(m_blockSnapshot.begin(), m_blockSnapshot.end()) - m_blockSnapshot.begin()); + } else { + m_blockSnapshot.resizeToFit(m_objectSpace.blocks().set().size()); + MarkedBlockSnapshotFunctor functor(m_blockSnapshot); + m_objectSpace.forEachBlock(functor); + } } void Heap::deleteSourceProviderCaches() @@ -1149,9 +1231,13 @@ void Heap::deleteSourceProviderCaches() void Heap::notifyIncrementalSweeper() { GCPHASE(NotifyIncrementalSweeper); - if (m_operationInProgress != FullCollection) - return; - m_sweeper->startSweeping(m_blockSnapshot); + + if (m_operationInProgress == FullCollection) { + if (!m_logicallyEmptyWeakBlocks.isEmpty()) + m_indexOfNextLogicallyEmptyWeakBlockToSweep = 0; + } + + m_sweeper->startSweeping(); } void Heap::rememberCurrentlyExecutingCodeBlocks() @@ -1215,10 +1301,6 @@ void Heap::didFinishCollection(double gcStartTime) if (Options::recordGCPauseTimes()) HeapStatistics::recordGCPauseTime(gcStartTime, gcEndTime); - RELEASE_ASSERT(m_operationInProgress == EdenCollection || m_operationInProgress == FullCollection); - - m_operationInProgress = NoOperation; - JAVASCRIPTCORE_GC_END(); if (Options::useZombieMode()) zombifyDeadObjects(); @@ -1231,6 +1313,10 @@ void Heap::didFinishCollection(double gcStartTime) if (Options::logGC() == GCLogging::Verbose) GCLogging::dumpObjectGraph(this); + + RELEASE_ASSERT(m_operationInProgress == EdenCollection || m_operationInProgress == FullCollection); + m_operationInProgress = NoOperation; + JAVASCRIPTCORE_GC_END(); } void Heap::resumeCompilerThreads() @@ -1269,9 +1355,9 @@ GCActivityCallback* Heap::edenActivityCallback() return m_edenActivityCallback.get(); } -void Heap::setIncrementalSweeper(PassOwnPtr sweeper) +void Heap::setIncrementalSweeper(std::unique_ptr sweeper) { - m_sweeper = sweeper; + m_sweeper = WTF::move(sweeper); } IncrementalSweeper* Heap::sweeper() @@ -1323,9 +1409,26 @@ void Heap::addCompiledCode(ExecutableBase* executable) m_compiledCode.append(executable); } +void Heap::collectAllGarbageIfNotDoneRecently() +{ + if (!m_fullActivityCallback) { + collectAllGarbage(); + return; + } + + if (m_fullActivityCallback->didSyncGCRecently()) { + // A synchronous GC was already requested recently so we merely accelerate next collection. + reportAbandonedObjectGraph(); + return; + } + + m_fullActivityCallback->setDidSyncGCRecently(); + collectAllGarbage(); +} + class Zombify : public MarkedBlock::VoidFunctor { public: - void operator()(JSCell* cell) + inline void visit(JSCell* cell) { void** current = reinterpret_cast(cell); @@ -1338,6 +1441,11 @@ public: for (; current < limit; current++) *current = zombifiedBits; } + IterationStatus operator()(JSCell* cell) + { + visit(cell); + return IterationStatus::Continue; + } }; void Heap::zombifyDeadObjects() @@ -1345,7 +1453,6 @@ void Heap::zombifyDeadObjects() // Sweep now because destructors will crash once we're zombified. { SamplingRegion samplingRegion("Garbage Collection: Sweeping"); - DelayedReleaseScope delayedReleaseScope(m_objectSpace); m_objectSpace.zombifySweep(); } HeapIterationScope iterationScope(*this); @@ -1387,4 +1494,41 @@ bool Heap::shouldDoFullCollection(HeapOperation requestedCollectionType) const #endif } +void Heap::addLogicallyEmptyWeakBlock(WeakBlock* block) +{ + m_logicallyEmptyWeakBlocks.append(block); +} + +void Heap::sweepAllLogicallyEmptyWeakBlocks() +{ + if (m_logicallyEmptyWeakBlocks.isEmpty()) + return; + + m_indexOfNextLogicallyEmptyWeakBlockToSweep = 0; + while (sweepNextLogicallyEmptyWeakBlock()) { } +} + +bool Heap::sweepNextLogicallyEmptyWeakBlock() +{ + if (m_indexOfNextLogicallyEmptyWeakBlockToSweep == WTF::notFound) + return false; + + WeakBlock* block = m_logicallyEmptyWeakBlocks[m_indexOfNextLogicallyEmptyWeakBlockToSweep]; + + block->sweep(); + if (block->isEmpty()) { + std::swap(m_logicallyEmptyWeakBlocks[m_indexOfNextLogicallyEmptyWeakBlockToSweep], m_logicallyEmptyWeakBlocks.last()); + m_logicallyEmptyWeakBlocks.removeLast(); + WeakBlock::destroy(block); + } else + m_indexOfNextLogicallyEmptyWeakBlockToSweep++; + + if (m_indexOfNextLogicallyEmptyWeakBlockToSweep >= m_logicallyEmptyWeakBlocks.size()) { + m_indexOfNextLogicallyEmptyWeakBlockToSweep = WTF::notFound; + return false; + } + + return true; +} + } // namespace JSC