#include "CopiedSpaceInlines.h"
#include "CopyVisitorInlines.h"
#include "DFGWorklist.h"
-#include "DelayedReleaseScope.h"
#include "EdenGCActivityCallback.h"
#include "FullGCActivityCallback.h"
#include "GCActivityCallback.h"
#include "HeapIterationScope.h"
#include "HeapRootVisitor.h"
#include "HeapStatistics.h"
+#include "HeapVerifier.h"
#include "IncrementalSweeper.h"
#include "Interpreter.h"
#include "JSGlobalObject.h"
#include "JSVirtualMachineInternal.h"
#include "RecursiveAllocationScope.h"
#include "Tracing.h"
+#include "TypeProfilerLog.h"
#include "UnlinkedCodeBlock.h"
#include "VM.h"
#include "WeakSetInlines.h"
struct GCTimer {
GCTimer(const char* name)
- : m_name(name)
+ : name(name)
{
}
~GCTimer()
{
- logData(m_allCollectionData, "(All)");
- logData(m_edenCollectionData, "(Eden)");
- logData(m_fullCollectionData, "(Full)");
+ logData(allCollectionData, "(All)");
+ logData(edenCollectionData, "(Eden)");
+ logData(fullCollectionData, "(Full)");
}
struct TimeRecord {
TimeRecord()
- : m_time(0)
- , m_min(std::numeric_limits<double>::infinity())
- , m_max(0)
- , m_count(0)
+ : time(0)
+ , min(std::numeric_limits<double>::infinity())
+ , max(0)
+ , count(0)
{
}
- double m_time;
- double m_min;
- double m_max;
- size_t m_count;
+ double time;
+ double min;
+ double max;
+ size_t count;
};
void logData(const TimeRecord& data, const char* extra)
{
- dataLogF("[%d] %s %s: %.2lfms (avg. %.2lf, min. %.2lf, max. %.2lf, count %lu)\n",
+ dataLogF("[%d] %s (Parent: %s) %s: %.2lfms (avg. %.2lf, min. %.2lf, max. %.2lf, count %lu)\n",
getCurrentProcessID(),
- m_name, extra,
- data.m_time * 1000,
- data.m_time * 1000 / data.m_count,
- data.m_min * 1000,
- data.m_max * 1000,
- data.m_count);
+ name,
+ parent ? parent->name : "nullptr",
+ extra,
+ data.time * 1000,
+ data.time * 1000 / data.count,
+ data.min * 1000,
+ data.max * 1000,
+ data.count);
}
void updateData(TimeRecord& data, double duration)
{
- if (duration < data.m_min)
- data.m_min = duration;
- if (duration > data.m_max)
- data.m_max = duration;
- data.m_count++;
- data.m_time += duration;
+ if (duration < data.min)
+ data.min = duration;
+ if (duration > data.max)
+ data.max = duration;
+ data.count++;
+ data.time += duration;
}
void didFinishPhase(HeapOperation collectionType, double duration)
{
- TimeRecord& data = collectionType == EdenCollection ? m_edenCollectionData : m_fullCollectionData;
+ TimeRecord& data = collectionType == EdenCollection ? edenCollectionData : fullCollectionData;
updateData(data, duration);
- updateData(m_allCollectionData, duration);
+ updateData(allCollectionData, duration);
}
- TimeRecord m_allCollectionData;
- TimeRecord m_fullCollectionData;
- TimeRecord m_edenCollectionData;
- const char* m_name;
+ static GCTimer* s_currentGlobalTimer;
+
+ TimeRecord allCollectionData;
+ TimeRecord fullCollectionData;
+ TimeRecord edenCollectionData;
+ const char* name;
+ GCTimer* parent { nullptr };
};
+GCTimer* GCTimer::s_currentGlobalTimer = nullptr;
+
struct GCTimerScope {
- GCTimerScope(GCTimer* timer, HeapOperation collectionType)
- : m_timer(timer)
- , m_start(WTF::monotonicallyIncreasingTime())
- , m_collectionType(collectionType)
+ GCTimerScope(GCTimer& timer, HeapOperation collectionType)
+ : timer(timer)
+ , start(WTF::monotonicallyIncreasingTime())
+ , collectionType(collectionType)
{
+ timer.parent = GCTimer::s_currentGlobalTimer;
+ GCTimer::s_currentGlobalTimer = &timer;
}
~GCTimerScope()
{
- double delta = WTF::monotonicallyIncreasingTime() - m_start;
- m_timer->didFinishPhase(m_collectionType, delta);
+ double delta = WTF::monotonicallyIncreasingTime() - start;
+ timer.didFinishPhase(collectionType, delta);
+ GCTimer::s_currentGlobalTimer = timer.parent;
}
- GCTimer* m_timer;
- double m_start;
- HeapOperation m_collectionType;
+ GCTimer& timer;
+ double start;
+ HeapOperation collectionType;
};
struct GCCounter {
GCCounter(const char* name)
- : m_name(name)
- , m_count(0)
- , m_total(0)
- , m_min(10000000)
- , m_max(0)
+ : name(name)
+ , count(0)
+ , total(0)
+ , min(10000000)
+ , max(0)
{
}
- void count(size_t amount)
+ void add(size_t amount)
{
- m_count++;
- m_total += amount;
- if (amount < m_min)
- m_min = amount;
- if (amount > m_max)
- m_max = amount;
+ count++;
+ total += amount;
+ if (amount < min)
+ min = amount;
+ if (amount > max)
+ max = amount;
}
~GCCounter()
{
- dataLogF("[%d] %s: %zu values (avg. %zu, min. %zu, max. %zu)\n", getCurrentProcessID(), m_name, m_total, m_total / m_count, m_min, m_max);
+ dataLogF("[%d] %s: %zu values (avg. %zu, min. %zu, max. %zu)\n", getCurrentProcessID(), name, total, total / count, min, max);
}
- const char* m_name;
- size_t m_count;
- size_t m_total;
- size_t m_min;
- size_t m_max;
+ const char* name;
+ size_t count;
+ size_t total;
+ size_t min;
+ size_t max;
};
-#define GCPHASE(name) DEFINE_GC_LOGGING_GLOBAL(GCTimer, name##Timer, (#name)); GCTimerScope name##TimerScope(&name##Timer, m_operationInProgress)
-#define GCCOUNTER(name, value) do { DEFINE_GC_LOGGING_GLOBAL(GCCounter, name##Counter, (#name)); name##Counter.count(value); } while (false)
+#define GCPHASE(name) DEFINE_GC_LOGGING_GLOBAL(GCTimer, name##Timer, (#name)); GCTimerScope name##TimerScope(name##Timer, m_operationInProgress)
+#define GCCOUNTER(name, value) do { DEFINE_GC_LOGGING_GLOBAL(GCCounter, name##Counter, (#name)); name##Counter.add(value); } while (false)
#else
}
struct MarkObject : public MarkedBlock::VoidFunctor {
- void operator()(JSCell* cell)
+ inline void visit(JSCell* cell)
{
if (cell->isZapped())
return;
Heap::heap(cell)->setMarked(cell);
}
+ IterationStatus operator()(JSCell* cell)
+ {
+ visit(cell);
+ return IterationStatus::Continue;
+ }
};
struct Count : public MarkedBlock::CountFunctor {
};
struct CountIfGlobalObject : MarkedBlock::CountFunctor {
- void operator()(JSCell* cell) {
+ inline void visit(JSCell* cell)
+ {
if (!cell->isObject())
return;
if (!asObject(cell)->isGlobalObject())
return;
count(1);
}
+ IterationStatus operator()(JSCell* cell)
+ {
+ visit(cell);
+ return IterationStatus::Continue;
+ }
};
class RecordType {
public:
- typedef PassOwnPtr<TypeCountSet> ReturnType;
+ typedef std::unique_ptr<TypeCountSet> ReturnType;
RecordType();
- void operator()(JSCell*);
+ IterationStatus operator()(JSCell*);
ReturnType returnValue();
private:
const char* typeName(JSCell*);
- OwnPtr<TypeCountSet> m_typeCountSet;
+ std::unique_ptr<TypeCountSet> m_typeCountSet;
};
inline RecordType::RecordType()
- : m_typeCountSet(adoptPtr(new TypeCountSet))
+ : m_typeCountSet(std::make_unique<TypeCountSet>())
{
}
return info->className;
}
-inline void RecordType::operator()(JSCell* cell)
+inline IterationStatus RecordType::operator()(JSCell* cell)
{
m_typeCountSet->add(typeName(cell));
+ return IterationStatus::Continue;
}
-inline PassOwnPtr<TypeCountSet> RecordType::returnValue()
+inline std::unique_ptr<TypeCountSet> RecordType::returnValue()
{
- return m_typeCountSet.release();
+ return WTF::move(m_typeCountSet);
}
} // anonymous namespace
Heap::Heap(VM* vm, HeapType heapType)
: m_heapType(heapType)
- , m_ramSize(ramSize())
+ , m_ramSize(Options::forceRAMSize() ? Options::forceRAMSize() : ramSize())
, m_minBytesPerCycle(minHeapSize(m_heapType, m_ramSize))
, m_sizeAfterLastCollect(0)
, m_sizeAfterLastFullCollect(0)
, m_totalBytesVisited(0)
, m_totalBytesCopied(0)
, m_operationInProgress(NoOperation)
- , m_blockAllocator()
, m_objectSpace(this)
, m_storageSpace(this)
- , m_extraMemoryUsage(0)
+ , m_extraMemorySize(0)
+ , m_deprecatedExtraMemorySize(0)
, m_machineThreads(this)
, m_sharedData(vm)
, m_slotVisitor(m_sharedData)
, m_copyVisitor(m_sharedData)
, m_handleSet(vm)
- , m_codeBlocks(m_blockAllocator)
, m_isSafeToCollect(false)
, m_writeBarrierBuffer(256)
, m_vm(vm)
#else
, m_edenActivityCallback(m_fullActivityCallback)
#endif
- , m_sweeper(IncrementalSweeper::create(this))
+#if USE(CF)
+ , m_sweeper(std::make_unique<IncrementalSweeper>(this, CFRunLoopGetCurrent()))
+#else
+ , m_sweeper(std::make_unique<IncrementalSweeper>(this->vm()))
+#endif
, m_deferralDepth(0)
+#if USE(CF)
+ , m_delayedReleaseRecursionCount(0)
+#endif
{
m_storageSpace.init();
+ if (Options::verifyHeap())
+ m_verifier = std::make_unique<HeapVerifier>(this, Options::numberOfGCCyclesToRecordForVerification());
}
Heap::~Heap()
{
+ for (WeakBlock* block : m_logicallyEmptyWeakBlocks)
+ WeakBlock::destroy(block);
}
bool Heap::isPagedOut(double deadline)
RELEASE_ASSERT(m_operationInProgress == NoOperation);
m_objectSpace.lastChanceToFinalize();
+ releaseDelayedReleasedObjects();
+
+ sweepAllLogicallyEmptyWeakBlocks();
}
-void Heap::reportExtraMemoryCostSlowCase(size_t cost)
+void Heap::releaseDelayedReleasedObjects()
{
- // Our frequency of garbage collection tries to balance memory use against speed
- // by collecting based on the number of newly created values. However, for values
- // that hold on to a great deal of memory that's not in the form of other JS values,
- // that is not good enough - in some cases a lot of those objects can pile up and
- // use crazy amounts of memory without a GC happening. So we track these extra
- // memory costs. Only unusually large objects are noted, and we only keep track
- // of this extra cost until the next GC. In garbage collected languages, most values
- // are either very short lived temporaries, or have extremely long lifetimes. So
- // if a large value survives one garbage collection, there is not much point to
- // collecting more frequently as long as it stays alive.
+#if USE(CF)
+ // We need to guard against the case that releasing an object can create more objects due to the
+ // release calling into JS. When those JS call(s) exit and all locks are being dropped we end up
+ // back here and could try to recursively release objects. We guard that with a recursive entry
+ // count. Only the initial call will release objects, recursive calls simple return and let the
+ // the initial call to the function take care of any objects created during release time.
+ // This also means that we need to loop until there are no objects in m_delayedReleaseObjects
+ // and use a temp Vector for the actual releasing.
+ if (!m_delayedReleaseRecursionCount++) {
+ while (!m_delayedReleaseObjects.isEmpty()) {
+ ASSERT(m_vm->currentThreadIsHoldingAPILock());
+
+ Vector<RetainPtr<CFTypeRef>> objectsToRelease = WTF::move(m_delayedReleaseObjects);
+
+ {
+ // We need to drop locks before calling out to arbitrary code.
+ JSLock::DropAllLocks dropAllLocks(m_vm);
+
+ objectsToRelease.clear();
+ }
+ }
+ }
+ m_delayedReleaseRecursionCount--;
+#endif
+}
- didAllocate(cost);
+void Heap::reportExtraMemoryAllocatedSlowCase(size_t size)
+{
+ didAllocate(size);
collectIfNecessaryOrDefer();
}
+void Heap::deprecatedReportExtraMemorySlowCase(size_t size)
+{
+ m_deprecatedExtraMemorySize += size;
+ reportExtraMemoryAllocatedSlowCase(size);
+}
+
void Heap::reportAbandonedObjectGraph()
{
// Our clients don't know exactly how much memory they
}
}
-void Heap::pushTempSortVector(Vector<ValueStringPair, 0, UnsafeVectorOverflow>* tempVector)
-{
- m_tempSortingVectors.append(tempVector);
-}
-
-void Heap::popTempSortVector(Vector<ValueStringPair, 0, UnsafeVectorOverflow>* tempVector)
-{
- ASSERT_UNUSED(tempVector, tempVector == m_tempSortingVectors.last());
- m_tempSortingVectors.removeLast();
-}
-
void Heap::harvestWeakReferences()
{
m_slotVisitor.harvestWeakReferences();
}
}
-void Heap::markRoots(double gcStartTime)
+void Heap::markRoots(double gcStartTime, void* stackOrigin, void* stackTop, MachineThreads::RegisterState& calleeSavedRegisters)
{
SamplingRegion samplingRegion("Garbage Collection: Marking");
// We gather conservative roots before clearing mark bits because conservative
// gathering uses the mark bits to determine whether a reference is valid.
- void* dummy;
- ALLOCATE_AND_GET_REGISTER_STATE(registers);
ConservativeRoots conservativeRoots(&m_objectSpace.blocks(), &m_storageSpace);
- gatherStackRoots(conservativeRoots, &dummy, registers);
+ gatherStackRoots(conservativeRoots, stackOrigin, stackTop, calleeSavedRegisters);
gatherJSStackRoots(conservativeRoots);
gatherScratchBufferRoots(conservativeRoots);
- sanitizeStackForVM(m_vm);
-
clearLivenessData();
m_sharedData.didStartMarking();
visitSmallStrings();
visitConservativeRoots(conservativeRoots);
visitProtectedObjects(heapRootVisitor);
- visitTempSortVectors(heapRootVisitor);
visitArgumentBuffers(heapRootVisitor);
visitException(heapRootVisitor);
visitStrongHandles(heapRootVisitor);
void Heap::copyBackingStores()
{
+ GCPHASE(CopyBackingStores);
if (m_operationInProgress == EdenCollection)
m_storageSpace.startedCopying<EdenCollection>();
else {
m_storageSpace.doneCopying();
}
-void Heap::gatherStackRoots(ConservativeRoots& roots, void** dummy, MachineThreads::RegisterState& registers)
+void Heap::gatherStackRoots(ConservativeRoots& roots, void* stackOrigin, void* stackTop, MachineThreads::RegisterState& calleeSavedRegisters)
{
GCPHASE(GatherStackRoots);
m_jitStubRoutines.clearMarks();
- m_machineThreads.gatherConservativeRoots(roots, m_jitStubRoutines, m_codeBlocks, dummy, registers);
+ m_machineThreads.gatherConservativeRoots(roots, m_jitStubRoutines, m_codeBlocks, stackOrigin, stackTop, calleeSavedRegisters);
}
void Heap::gatherJSStackRoots(ConservativeRoots& roots)
void Heap::visitSmallStrings()
{
GCPHASE(VisitSmallStrings);
- m_vm->smallStrings.visitStrongReferences(m_slotVisitor);
+ if (!m_vm->smallStrings.needsToBeVisited(m_operationInProgress))
+ return;
+ m_vm->smallStrings.visitStrongReferences(m_slotVisitor);
if (Options::logGC() == GCLogging::Verbose)
dataLog("Small strings:\n", m_slotVisitor);
-
m_slotVisitor.donateAndDrain();
}
m_slotVisitor.donateAndDrain();
}
-void Heap::visitTempSortVectors(HeapRootVisitor& heapRootVisitor)
-{
- GCPHASE(VisitTempSortVectors);
- typedef Vector<Vector<ValueStringPair, 0, UnsafeVectorOverflow>*> VectorOfValueStringVectors;
-
- for (auto* vector : m_tempSortingVectors) {
- for (auto& valueStringPair : *vector) {
- if (valueStringPair.first)
- heapRootVisitor.visit(&valueStringPair.first);
- }
- }
-
- if (Options::logGC() == GCLogging::Verbose)
- dataLog("Temp Sort Vectors:\n", m_slotVisitor);
-
- m_slotVisitor.donateAndDrain();
-}
-
void Heap::visitArgumentBuffers(HeapRootVisitor& visitor)
{
GCPHASE(MarkingArgumentBuffers);
void Heap::visitException(HeapRootVisitor& visitor)
{
GCPHASE(MarkingException);
- if (!m_vm->exception())
+ if (!m_vm->exception() && !m_vm->lastException())
return;
visitor.visit(m_vm->addressOfException());
+ visitor.visit(m_vm->addressOfLastException());
if (Options::logGC() == GCLogging::Verbose)
dataLog("Exceptions:\n", m_slotVisitor);
{
#if ENABLE(GGC)
GCPHASE(ClearRememberedSet);
- for (auto* cell : rememberedSet) {
- MarkedBlock::blockFor(cell)->clearRemembered(cell);
+ for (auto* cell : rememberedSet)
const_cast<JSCell*>(cell)->setRemembered(false);
- }
#else
UNUSED_PARAM(rememberedSet);
#endif
#endif
dataLogF("\nNumber of live Objects after GC %lu, took %.6f secs\n", static_cast<unsigned long>(visitCount), WTF::monotonicallyIncreasingTime() - gcStartTime);
}
-
- if (m_operationInProgress == EdenCollection) {
- m_totalBytesVisited += m_slotVisitor.bytesVisited();
- m_totalBytesCopied += m_slotVisitor.bytesCopied();
- } else {
- ASSERT(m_operationInProgress == FullCollection);
- m_totalBytesVisited = m_slotVisitor.bytesVisited();
- m_totalBytesCopied = m_slotVisitor.bytesCopied();
- }
+
+ size_t bytesRemovedFromOldSpaceDueToReallocation =
+ m_storageSpace.takeBytesRemovedFromOldSpaceDueToReallocation();
+
+ if (m_operationInProgress == FullCollection) {
+ m_totalBytesVisited = 0;
+ m_totalBytesCopied = 0;
+ } else
+ m_totalBytesCopied -= bytesRemovedFromOldSpaceDueToReallocation;
+
+ m_totalBytesVisited += m_slotVisitor.bytesVisited();
+ m_totalBytesCopied += m_slotVisitor.bytesCopied();
#if ENABLE(PARALLEL_GC)
m_totalBytesVisited += m_sharedData.childBytesVisited();
m_totalBytesCopied += m_sharedData.childBytesCopied();
return m_objectSpace.objectCount();
}
-size_t Heap::extraSize()
+size_t Heap::extraMemorySize()
{
- return m_extraMemoryUsage + m_arrayBuffers.size();
+ return m_extraMemorySize + m_deprecatedExtraMemorySize + m_arrayBuffers.size();
}
size_t Heap::size()
{
- return m_objectSpace.size() + m_storageSpace.size() + extraSize();
+ return m_objectSpace.size() + m_storageSpace.size() + extraMemorySize();
}
size_t Heap::capacity()
{
- return m_objectSpace.capacity() + m_storageSpace.capacity() + extraSize();
+ return m_objectSpace.capacity() + m_storageSpace.capacity() + extraMemorySize();
}
size_t Heap::sizeAfterCollect()
// rather than all used (including dead) copied bytes, thus it's
// always the case that m_totalBytesCopied <= m_storageSpace.size().
ASSERT(m_totalBytesCopied <= m_storageSpace.size());
- return m_totalBytesVisited + m_totalBytesCopied + extraSize();
+ return m_totalBytesVisited + m_totalBytesCopied + extraMemorySize();
}
size_t Heap::protectedGlobalObjectCount()
return forEachProtectedCell<Count>();
}
-PassOwnPtr<TypeCountSet> Heap::protectedObjectTypeCounts()
+std::unique_ptr<TypeCountSet> Heap::protectedObjectTypeCounts()
{
return forEachProtectedCell<RecordType>();
}
-PassOwnPtr<TypeCountSet> Heap::objectTypeCounts()
+std::unique_ptr<TypeCountSet> Heap::objectTypeCounts()
{
HeapIterationScope iterationScope(*this);
return m_objectSpace.forEachLiveCell<RecordType>(iterationScope);
}
#endif // ENABLE(DFG_JIT)
- for (ExecutableBase* current = m_compiledCode.head(); current; current = current->next()) {
+ for (ExecutableBase* current : m_compiledCode) {
if (!current->isFunctionExecutable())
continue;
- static_cast<FunctionExecutable*>(current)->clearCodeIfNotCompiling();
+ static_cast<FunctionExecutable*>(current)->clearCode();
}
ASSERT(m_operationInProgress == FullCollection || m_operationInProgress == NoOperation);
void Heap::deleteAllUnlinkedFunctionCode()
{
- for (ExecutableBase* current = m_compiledCode.head(); current; current = current->next()) {
+ for (ExecutableBase* current : m_compiledCode) {
if (!current->isFunctionExecutable())
continue;
- static_cast<FunctionExecutable*>(current)->clearUnlinkedCodeForRecompilationIfNotCompiling();
+ static_cast<FunctionExecutable*>(current)->clearUnlinkedCodeForRecompilation();
}
}
void Heap::clearUnmarkedExecutables()
{
GCPHASE(ClearUnmarkedExecutables);
- ExecutableBase* next;
- for (ExecutableBase* current = m_compiledCode.head(); current; current = next) {
- next = current->next();
+ for (unsigned i = m_compiledCode.size(); i--;) {
+ ExecutableBase* current = m_compiledCode[i];
if (isMarked(current))
continue;
// We do this because executable memory is limited on some platforms and because
// CodeBlock requires eager finalization.
ExecutableBase::clearCodeVirtual(current);
- m_compiledCode.remove(current);
+ std::swap(m_compiledCode[i], m_compiledCode.last());
+ m_compiledCode.removeLast();
}
}
ASSERT(!Options::enableConcurrentJIT() || !isCompilationThread());
if (isRemembered(cell))
return;
- MarkedBlock::blockFor(cell)->setRemembered(cell);
const_cast<JSCell*>(cell)->setRemembered(true);
m_slotVisitor.unconditionallyAppend(const_cast<JSCell*>(cell));
}
-void Heap::collectAllGarbage()
+void Heap::collectAndSweep(HeapOperation collectionType)
{
if (!m_isSafeToCollect)
return;
- collect(FullCollection);
+ collect(collectionType);
SamplingRegion samplingRegion("Garbage Collection: Sweeping");
- DelayedReleaseScope delayedReleaseScope(m_objectSpace);
+
+ DeferGCForAWhile deferGC(*this);
m_objectSpace.sweep();
m_objectSpace.shrink();
+
+ sweepAllLogicallyEmptyWeakBlocks();
}
static double minute = 60.0;
-void Heap::collect(HeapOperation collectionType)
+NEVER_INLINE void Heap::collect(HeapOperation collectionType)
+{
+ void* stackTop;
+ ALLOCATE_AND_GET_REGISTER_STATE(registers);
+
+ collectImpl(collectionType, wtfThreadData().stack().origin(), &stackTop, registers);
+
+ sanitizeStackForVM(m_vm);
+}
+
+NEVER_INLINE void Heap::collectImpl(HeapOperation collectionType, void* stackOrigin, void* stackTop, MachineThreads::RegisterState& calleeSavedRegisters)
{
#if ENABLE(ALLOCATION_LOGGING)
dataLogF("JSC GC starting collection.\n");
SamplingRegion samplingRegion("Garbage Collection");
+ if (vm()->typeProfiler()) {
+ DeferGCForAWhile awhile(*this);
+ vm()->typeProfilerLog()->processLogEntries(ASCIILiteral("GC"));
+ }
+
RELEASE_ASSERT(!m_deferralDepth);
ASSERT(vm()->currentThreadIsHoldingAPILock());
RELEASE_ASSERT(vm()->atomicStringTable() == wtfThreadData().atomicStringTable());
GCPHASE(Collect);
double gcStartTime = WTF::monotonicallyIncreasingTime();
+ if (m_verifier) {
+ // Verify that live objects from the last GC cycle haven't been corrupted by
+ // mutators before we begin this new GC cycle.
+ m_verifier->verify(HeapVerifier::Phase::BeforeGC);
+
+ m_verifier->initializeGCCycle();
+ m_verifier->gatherLiveObjects(HeapVerifier::Phase::BeforeMarking);
+ }
deleteOldCode(gcStartTime);
flushOldStructureIDTables();
stopAllocation();
flushWriteBarrierBuffer();
- markRoots(gcStartTime);
+ markRoots(gcStartTime, stackOrigin, stackTop, calleeSavedRegisters);
+ if (m_verifier) {
+ m_verifier->gatherLiveObjects(HeapVerifier::Phase::AfterMarking);
+ m_verifier->verify(HeapVerifier::Phase::AfterMarking);
+ }
JAVASCRIPTCORE_GC_MARKED();
+ if (vm()->typeProfiler())
+ vm()->typeProfiler()->invalidateTypeSetCache();
+
reapWeakHandles();
+ pruneStaleEntriesFromWeakGCMaps();
sweepArrayBuffers();
snapshotMarkedSpace();
didFinishCollection(gcStartTime);
resumeCompilerThreads();
+ if (m_verifier) {
+ m_verifier->trimDeadObjects();
+ m_verifier->verify(HeapVerifier::Phase::AfterGC);
+ }
+
if (Options::logGC()) {
double after = currentTimeMS();
dataLog(after - before, " ms]\n");
}
if (m_operationInProgress == FullCollection) {
m_sizeBeforeLastFullCollect = m_sizeAfterLastCollect + m_bytesAllocatedThisCycle;
- m_extraMemoryUsage = 0;
+ m_extraMemorySize = 0;
+ m_deprecatedExtraMemorySize = 0;
if (m_fullActivityCallback)
m_fullActivityCallback->willCollect();
m_objectSpace.reapWeakSets();
}
+void Heap::pruneStaleEntriesFromWeakGCMaps()
+{
+ GCPHASE(PruningStaleEntriesFromWeakGCMaps);
+ if (m_operationInProgress != FullCollection)
+ return;
+ for (auto& pruneCallback : m_weakGCMaps.values())
+ pruneCallback();
+}
+
void Heap::sweepArrayBuffers()
{
GCPHASE(SweepingArrayBuffers);
void Heap::snapshotMarkedSpace()
{
GCPHASE(SnapshotMarkedSpace);
- if (m_operationInProgress != FullCollection)
- return;
- m_blockSnapshot.resize(m_objectSpace.blocks().set().size());
- MarkedBlockSnapshotFunctor functor(m_blockSnapshot);
- m_objectSpace.forEachBlock(functor);
+ if (m_operationInProgress == EdenCollection) {
+ m_blockSnapshot.appendVector(m_objectSpace.blocksWithNewObjects());
+ // Sort and deduplicate the block snapshot since we might be appending to an unfinished work list.
+ std::sort(m_blockSnapshot.begin(), m_blockSnapshot.end());
+ m_blockSnapshot.shrink(std::unique(m_blockSnapshot.begin(), m_blockSnapshot.end()) - m_blockSnapshot.begin());
+ } else {
+ m_blockSnapshot.resizeToFit(m_objectSpace.blocks().set().size());
+ MarkedBlockSnapshotFunctor functor(m_blockSnapshot);
+ m_objectSpace.forEachBlock(functor);
+ }
}
void Heap::deleteSourceProviderCaches()
void Heap::notifyIncrementalSweeper()
{
GCPHASE(NotifyIncrementalSweeper);
- if (m_operationInProgress != FullCollection)
- return;
- m_sweeper->startSweeping(m_blockSnapshot);
+
+ if (m_operationInProgress == FullCollection) {
+ if (!m_logicallyEmptyWeakBlocks.isEmpty())
+ m_indexOfNextLogicallyEmptyWeakBlockToSweep = 0;
+ }
+
+ m_sweeper->startSweeping();
}
void Heap::rememberCurrentlyExecutingCodeBlocks()
if (Options::recordGCPauseTimes())
HeapStatistics::recordGCPauseTime(gcStartTime, gcEndTime);
- RELEASE_ASSERT(m_operationInProgress == EdenCollection || m_operationInProgress == FullCollection);
-
- m_operationInProgress = NoOperation;
- JAVASCRIPTCORE_GC_END();
if (Options::useZombieMode())
zombifyDeadObjects();
if (Options::logGC() == GCLogging::Verbose)
GCLogging::dumpObjectGraph(this);
+
+ RELEASE_ASSERT(m_operationInProgress == EdenCollection || m_operationInProgress == FullCollection);
+ m_operationInProgress = NoOperation;
+ JAVASCRIPTCORE_GC_END();
}
void Heap::resumeCompilerThreads()
return m_edenActivityCallback.get();
}
-void Heap::setIncrementalSweeper(PassOwnPtr<IncrementalSweeper> sweeper)
+void Heap::setIncrementalSweeper(std::unique_ptr<IncrementalSweeper> sweeper)
{
- m_sweeper = sweeper;
+ m_sweeper = WTF::move(sweeper);
}
IncrementalSweeper* Heap::sweeper()
m_compiledCode.append(executable);
}
+void Heap::collectAllGarbageIfNotDoneRecently()
+{
+ if (!m_fullActivityCallback) {
+ collectAllGarbage();
+ return;
+ }
+
+ if (m_fullActivityCallback->didSyncGCRecently()) {
+ // A synchronous GC was already requested recently so we merely accelerate next collection.
+ reportAbandonedObjectGraph();
+ return;
+ }
+
+ m_fullActivityCallback->setDidSyncGCRecently();
+ collectAllGarbage();
+}
+
class Zombify : public MarkedBlock::VoidFunctor {
public:
- void operator()(JSCell* cell)
+ inline void visit(JSCell* cell)
{
void** current = reinterpret_cast<void**>(cell);
for (; current < limit; current++)
*current = zombifiedBits;
}
+ IterationStatus operator()(JSCell* cell)
+ {
+ visit(cell);
+ return IterationStatus::Continue;
+ }
};
void Heap::zombifyDeadObjects()
// Sweep now because destructors will crash once we're zombified.
{
SamplingRegion samplingRegion("Garbage Collection: Sweeping");
- DelayedReleaseScope delayedReleaseScope(m_objectSpace);
m_objectSpace.zombifySweep();
}
HeapIterationScope iterationScope(*this);
#endif
}
+void Heap::addLogicallyEmptyWeakBlock(WeakBlock* block)
+{
+ m_logicallyEmptyWeakBlocks.append(block);
+}
+
+void Heap::sweepAllLogicallyEmptyWeakBlocks()
+{
+ if (m_logicallyEmptyWeakBlocks.isEmpty())
+ return;
+
+ m_indexOfNextLogicallyEmptyWeakBlockToSweep = 0;
+ while (sweepNextLogicallyEmptyWeakBlock()) { }
+}
+
+bool Heap::sweepNextLogicallyEmptyWeakBlock()
+{
+ if (m_indexOfNextLogicallyEmptyWeakBlockToSweep == WTF::notFound)
+ return false;
+
+ WeakBlock* block = m_logicallyEmptyWeakBlocks[m_indexOfNextLogicallyEmptyWeakBlockToSweep];
+
+ block->sweep();
+ if (block->isEmpty()) {
+ std::swap(m_logicallyEmptyWeakBlocks[m_indexOfNextLogicallyEmptyWeakBlockToSweep], m_logicallyEmptyWeakBlocks.last());
+ m_logicallyEmptyWeakBlocks.removeLast();
+ WeakBlock::destroy(block);
+ } else
+ m_indexOfNextLogicallyEmptyWeakBlockToSweep++;
+
+ if (m_indexOfNextLogicallyEmptyWeakBlockToSweep >= m_logicallyEmptyWeakBlocks.size()) {
+ m_indexOfNextLogicallyEmptyWeakBlockToSweep = WTF::notFound;
+ return false;
+ }
+
+ return true;
+}
+
} // namespace JSC