2 * Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2011, 2013, 2014 Apple Inc. All rights reserved.
3 * Copyright (C) 2007 Eric Seidel <eric@webkit.org>
5 * This library is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Lesser General Public
7 * License as published by the Free Software Foundation; either
8 * version 2 of the License, or (at your option) any later version.
10 * This library is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Lesser General Public License for more details.
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this library; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
24 #include "CodeBlock.h"
25 #include "ConservativeRoots.h"
26 #include "CopiedSpace.h"
27 #include "CopiedSpaceInlines.h"
28 #include "CopyVisitorInlines.h"
29 #include "DFGWorklist.h"
30 #include "EdenGCActivityCallback.h"
31 #include "FullGCActivityCallback.h"
32 #include "GCActivityCallback.h"
33 #include "GCIncomingRefCountedSetInlines.h"
34 #include "HeapIterationScope.h"
35 #include "HeapRootVisitor.h"
36 #include "HeapStatistics.h"
37 #include "HeapVerifier.h"
38 #include "IncrementalSweeper.h"
39 #include "Interpreter.h"
40 #include "JSGlobalObject.h"
42 #include "JSONObject.h"
43 #include "JSCInlines.h"
44 #include "JSVirtualMachineInternal.h"
45 #include "RecursiveAllocationScope.h"
47 #include "TypeProfilerLog.h"
48 #include "UnlinkedCodeBlock.h"
50 #include "WeakSetInlines.h"
52 #include <wtf/RAMSize.h>
53 #include <wtf/CurrentTime.h>
54 #include <wtf/ProcessID.h>
63 static const size_t largeHeapSize
= 32 * MB
; // About 1.5X the average webpage.
64 static const size_t smallHeapSize
= 1 * MB
; // Matches the FastMalloc per-thread cache.
66 #define ENABLE_GC_LOGGING 0
68 #if ENABLE(GC_LOGGING)
70 #define DEFINE_GC_LOGGING_GLOBAL(type, name, arguments) \
71 _Pragma("clang diagnostic push") \
72 _Pragma("clang diagnostic ignored \"-Wglobal-constructors\"") \
73 _Pragma("clang diagnostic ignored \"-Wexit-time-destructors\"") \
74 static type name arguments; \
75 _Pragma("clang diagnostic pop")
77 #define DEFINE_GC_LOGGING_GLOBAL(type, name, arguments) \
78 static type name arguments;
79 #endif // COMPILER(CLANG)
82 GCTimer(const char* name
)
88 logData(allCollectionData
, "(All)");
89 logData(edenCollectionData
, "(Eden)");
90 logData(fullCollectionData
, "(Full)");
96 , min(std::numeric_limits
<double>::infinity())
108 void logData(const TimeRecord
& data
, const char* extra
)
110 dataLogF("[%d] %s (Parent: %s) %s: %.2lfms (avg. %.2lf, min. %.2lf, max. %.2lf, count %lu)\n",
111 getCurrentProcessID(),
113 parent
? parent
->name
: "nullptr",
116 data
.time
* 1000 / data
.count
,
122 void updateData(TimeRecord
& data
, double duration
)
124 if (duration
< data
.min
)
126 if (duration
> data
.max
)
129 data
.time
+= duration
;
132 void didFinishPhase(HeapOperation collectionType
, double duration
)
134 TimeRecord
& data
= collectionType
== EdenCollection
? edenCollectionData
: fullCollectionData
;
135 updateData(data
, duration
);
136 updateData(allCollectionData
, duration
);
139 static GCTimer
* s_currentGlobalTimer
;
141 TimeRecord allCollectionData
;
142 TimeRecord fullCollectionData
;
143 TimeRecord edenCollectionData
;
145 GCTimer
* parent
{ nullptr };
148 GCTimer
* GCTimer::s_currentGlobalTimer
= nullptr;
150 struct GCTimerScope
{
151 GCTimerScope(GCTimer
& timer
, HeapOperation collectionType
)
153 , start(WTF::monotonicallyIncreasingTime())
154 , collectionType(collectionType
)
156 timer
.parent
= GCTimer::s_currentGlobalTimer
;
157 GCTimer::s_currentGlobalTimer
= &timer
;
161 double delta
= WTF::monotonicallyIncreasingTime() - start
;
162 timer
.didFinishPhase(collectionType
, delta
);
163 GCTimer::s_currentGlobalTimer
= timer
.parent
;
167 HeapOperation collectionType
;
171 GCCounter(const char* name
)
180 void add(size_t amount
)
191 dataLogF("[%d] %s: %zu values (avg. %zu, min. %zu, max. %zu)\n", getCurrentProcessID(), name
, total
, total
/ count
, min
, max
);
200 #define GCPHASE(name) DEFINE_GC_LOGGING_GLOBAL(GCTimer, name##Timer, (#name)); GCTimerScope name##TimerScope(name##Timer, m_operationInProgress)
201 #define GCCOUNTER(name, value) do { DEFINE_GC_LOGGING_GLOBAL(GCCounter, name##Counter, (#name)); name##Counter.add(value); } while (false)
205 #define GCPHASE(name) do { } while (false)
206 #define GCCOUNTER(name, value) do { } while (false)
209 static inline size_t minHeapSize(HeapType heapType
, size_t ramSize
)
211 if (heapType
== LargeHeap
)
212 return min(largeHeapSize
, ramSize
/ 4);
213 return smallHeapSize
;
216 static inline size_t proportionalHeapSize(size_t heapSize
, size_t ramSize
)
218 // Try to stay under 1/2 RAM size to leave room for the DOM, rendering, networking, etc.
219 if (heapSize
< ramSize
/ 4)
221 if (heapSize
< ramSize
/ 2)
222 return 1.5 * heapSize
;
223 return 1.25 * heapSize
;
226 static inline bool isValidSharedInstanceThreadState(VM
* vm
)
228 return vm
->currentThreadIsHoldingAPILock();
231 static inline bool isValidThreadState(VM
* vm
)
233 if (vm
->atomicStringTable() != wtfThreadData().atomicStringTable())
236 if (vm
->isSharedInstance() && !isValidSharedInstanceThreadState(vm
))
242 struct MarkObject
: public MarkedBlock::VoidFunctor
{
243 inline void visit(JSCell
* cell
)
245 if (cell
->isZapped())
247 Heap::heap(cell
)->setMarked(cell
);
249 IterationStatus
operator()(JSCell
* cell
)
252 return IterationStatus::Continue
;
256 struct Count
: public MarkedBlock::CountFunctor
{
257 void operator()(JSCell
*) { count(1); }
260 struct CountIfGlobalObject
: MarkedBlock::CountFunctor
{
261 inline void visit(JSCell
* cell
)
263 if (!cell
->isObject())
265 if (!asObject(cell
)->isGlobalObject())
269 IterationStatus
operator()(JSCell
* cell
)
272 return IterationStatus::Continue
;
278 typedef std::unique_ptr
<TypeCountSet
> ReturnType
;
281 IterationStatus
operator()(JSCell
*);
282 ReturnType
returnValue();
285 const char* typeName(JSCell
*);
286 std::unique_ptr
<TypeCountSet
> m_typeCountSet
;
289 inline RecordType::RecordType()
290 : m_typeCountSet(std::make_unique
<TypeCountSet
>())
294 inline const char* RecordType::typeName(JSCell
* cell
)
296 const ClassInfo
* info
= cell
->classInfo();
297 if (!info
|| !info
->className
)
299 return info
->className
;
302 inline IterationStatus
RecordType::operator()(JSCell
* cell
)
304 m_typeCountSet
->add(typeName(cell
));
305 return IterationStatus::Continue
;
308 inline std::unique_ptr
<TypeCountSet
> RecordType::returnValue()
310 return WTF::move(m_typeCountSet
);
313 } // anonymous namespace
315 Heap::Heap(VM
* vm
, HeapType heapType
)
316 : m_heapType(heapType
)
317 , m_ramSize(Options::forceRAMSize() ? Options::forceRAMSize() : ramSize())
318 , m_minBytesPerCycle(minHeapSize(m_heapType
, m_ramSize
))
319 , m_sizeAfterLastCollect(0)
320 , m_sizeAfterLastFullCollect(0)
321 , m_sizeBeforeLastFullCollect(0)
322 , m_sizeAfterLastEdenCollect(0)
323 , m_sizeBeforeLastEdenCollect(0)
324 , m_bytesAllocatedThisCycle(0)
325 , m_bytesAbandonedSinceLastFullCollect(0)
326 , m_maxEdenSize(m_minBytesPerCycle
)
327 , m_maxHeapSize(m_minBytesPerCycle
)
328 , m_shouldDoFullCollection(false)
329 , m_totalBytesVisited(0)
330 , m_totalBytesCopied(0)
331 , m_operationInProgress(NoOperation
)
332 , m_objectSpace(this)
333 , m_storageSpace(this)
334 , m_extraMemorySize(0)
335 , m_deprecatedExtraMemorySize(0)
336 , m_machineThreads(this)
338 , m_slotVisitor(m_sharedData
)
339 , m_copyVisitor(m_sharedData
)
341 , m_isSafeToCollect(false)
342 , m_writeBarrierBuffer(256)
344 // We seed with 10ms so that GCActivityCallback::didAllocate doesn't continuously
345 // schedule the timer if we've never done a collection.
346 , m_lastFullGCLength(0.01)
347 , m_lastEdenGCLength(0.01)
348 , m_lastCodeDiscardTime(WTF::monotonicallyIncreasingTime())
349 , m_fullActivityCallback(GCActivityCallback::createFullTimer(this))
351 , m_edenActivityCallback(GCActivityCallback::createEdenTimer(this))
353 , m_edenActivityCallback(m_fullActivityCallback
)
356 , m_sweeper(std::make_unique
<IncrementalSweeper
>(this, CFRunLoopGetCurrent()))
358 , m_sweeper(std::make_unique
<IncrementalSweeper
>(this->vm()))
362 , m_delayedReleaseRecursionCount(0)
365 m_storageSpace
.init();
366 if (Options::verifyHeap())
367 m_verifier
= std::make_unique
<HeapVerifier
>(this, Options::numberOfGCCyclesToRecordForVerification());
372 for (WeakBlock
* block
: m_logicallyEmptyWeakBlocks
)
373 WeakBlock::destroy(block
);
376 bool Heap::isPagedOut(double deadline
)
378 return m_objectSpace
.isPagedOut(deadline
) || m_storageSpace
.isPagedOut(deadline
);
381 // The VM is being destroyed and the collector will never run again.
382 // Run all pending finalizers now because we won't get another chance.
383 void Heap::lastChanceToFinalize()
385 RELEASE_ASSERT(!m_vm
->entryScope
);
386 RELEASE_ASSERT(m_operationInProgress
== NoOperation
);
388 m_objectSpace
.lastChanceToFinalize();
389 releaseDelayedReleasedObjects();
391 sweepAllLogicallyEmptyWeakBlocks();
394 void Heap::releaseDelayedReleasedObjects()
397 // We need to guard against the case that releasing an object can create more objects due to the
398 // release calling into JS. When those JS call(s) exit and all locks are being dropped we end up
399 // back here and could try to recursively release objects. We guard that with a recursive entry
400 // count. Only the initial call will release objects, recursive calls simple return and let the
401 // the initial call to the function take care of any objects created during release time.
402 // This also means that we need to loop until there are no objects in m_delayedReleaseObjects
403 // and use a temp Vector for the actual releasing.
404 if (!m_delayedReleaseRecursionCount
++) {
405 while (!m_delayedReleaseObjects
.isEmpty()) {
406 ASSERT(m_vm
->currentThreadIsHoldingAPILock());
408 Vector
<RetainPtr
<CFTypeRef
>> objectsToRelease
= WTF::move(m_delayedReleaseObjects
);
411 // We need to drop locks before calling out to arbitrary code.
412 JSLock::DropAllLocks
dropAllLocks(m_vm
);
414 objectsToRelease
.clear();
418 m_delayedReleaseRecursionCount
--;
422 void Heap::reportExtraMemoryAllocatedSlowCase(size_t size
)
425 collectIfNecessaryOrDefer();
428 void Heap::deprecatedReportExtraMemorySlowCase(size_t size
)
430 m_deprecatedExtraMemorySize
+= size
;
431 reportExtraMemoryAllocatedSlowCase(size
);
434 void Heap::reportAbandonedObjectGraph()
436 // Our clients don't know exactly how much memory they
437 // are abandoning so we just guess for them.
438 double abandonedBytes
= 0.1 * m_sizeAfterLastCollect
;
440 // We want to accelerate the next collection. Because memory has just
441 // been abandoned, the next collection has the potential to
442 // be more profitable. Since allocation is the trigger for collection,
443 // we hasten the next collection by pretending that we've allocated more memory.
444 didAbandon(abandonedBytes
);
447 void Heap::didAbandon(size_t bytes
)
449 if (m_fullActivityCallback
) {
450 m_fullActivityCallback
->didAllocate(
451 m_sizeAfterLastCollect
- m_sizeAfterLastFullCollect
+ m_bytesAllocatedThisCycle
+ m_bytesAbandonedSinceLastFullCollect
);
453 m_bytesAbandonedSinceLastFullCollect
+= bytes
;
456 void Heap::protect(JSValue k
)
459 ASSERT(m_vm
->currentThreadIsHoldingAPILock());
464 m_protectedValues
.add(k
.asCell());
467 bool Heap::unprotect(JSValue k
)
470 ASSERT(m_vm
->currentThreadIsHoldingAPILock());
475 return m_protectedValues
.remove(k
.asCell());
478 void Heap::addReference(JSCell
* cell
, ArrayBuffer
* buffer
)
480 if (m_arrayBuffers
.addReference(cell
, buffer
)) {
481 collectIfNecessaryOrDefer();
482 didAllocate(buffer
->gcSizeEstimateInBytes());
486 void Heap::harvestWeakReferences()
488 m_slotVisitor
.harvestWeakReferences();
491 void Heap::finalizeUnconditionalFinalizers()
493 GCPHASE(FinalizeUnconditionalFinalizers
);
494 m_slotVisitor
.finalizeUnconditionalFinalizers();
497 inline JSStack
& Heap::stack()
499 return m_vm
->interpreter
->stack();
502 void Heap::willStartIterating()
504 m_objectSpace
.willStartIterating();
507 void Heap::didFinishIterating()
509 m_objectSpace
.didFinishIterating();
512 void Heap::getConservativeRegisterRoots(HashSet
<JSCell
*>& roots
)
514 ASSERT(isValidThreadState(m_vm
));
515 ConservativeRoots
stackRoots(&m_objectSpace
.blocks(), &m_storageSpace
);
516 stack().gatherConservativeRoots(stackRoots
);
517 size_t stackRootCount
= stackRoots
.size();
518 JSCell
** registerRoots
= stackRoots
.roots();
519 for (size_t i
= 0; i
< stackRootCount
; i
++) {
520 setMarked(registerRoots
[i
]);
521 registerRoots
[i
]->setMarked();
522 roots
.add(registerRoots
[i
]);
526 void Heap::markRoots(double gcStartTime
, void* stackOrigin
, void* stackTop
, MachineThreads::RegisterState
& calleeSavedRegisters
)
528 SamplingRegion
samplingRegion("Garbage Collection: Marking");
531 ASSERT(isValidThreadState(m_vm
));
534 Vector
<const JSCell
*> rememberedSet(m_slotVisitor
.markStack().size());
535 m_slotVisitor
.markStack().fillVector(rememberedSet
);
537 Vector
<const JSCell
*> rememberedSet
;
540 if (m_operationInProgress
== EdenCollection
)
541 m_codeBlocks
.clearMarksForEdenCollection(rememberedSet
);
543 m_codeBlocks
.clearMarksForFullCollection();
545 // We gather conservative roots before clearing mark bits because conservative
546 // gathering uses the mark bits to determine whether a reference is valid.
547 ConservativeRoots
conservativeRoots(&m_objectSpace
.blocks(), &m_storageSpace
);
548 gatherStackRoots(conservativeRoots
, stackOrigin
, stackTop
, calleeSavedRegisters
);
549 gatherJSStackRoots(conservativeRoots
);
550 gatherScratchBufferRoots(conservativeRoots
);
554 m_sharedData
.didStartMarking();
555 m_slotVisitor
.didStartMarking();
556 HeapRootVisitor
heapRootVisitor(m_slotVisitor
);
559 ParallelModeEnabler
enabler(m_slotVisitor
);
561 visitExternalRememberedSet();
563 visitConservativeRoots(conservativeRoots
);
564 visitProtectedObjects(heapRootVisitor
);
565 visitArgumentBuffers(heapRootVisitor
);
566 visitException(heapRootVisitor
);
567 visitStrongHandles(heapRootVisitor
);
568 visitHandleStack(heapRootVisitor
);
569 traceCodeBlocksAndJITStubRoutines();
573 // Weak references must be marked last because their liveness depends on
574 // the liveness of the rest of the object graph.
575 visitWeakHandles(heapRootVisitor
);
577 clearRememberedSet(rememberedSet
);
578 m_sharedData
.didFinishMarking();
579 updateObjectCounts(gcStartTime
);
583 void Heap::copyBackingStores()
585 GCPHASE(CopyBackingStores
);
586 if (m_operationInProgress
== EdenCollection
)
587 m_storageSpace
.startedCopying
<EdenCollection
>();
589 ASSERT(m_operationInProgress
== FullCollection
);
590 m_storageSpace
.startedCopying
<FullCollection
>();
593 if (m_storageSpace
.shouldDoCopyPhase()) {
594 m_sharedData
.didStartCopying();
595 m_copyVisitor
.startCopying();
596 m_copyVisitor
.copyFromShared();
597 m_copyVisitor
.doneCopying();
598 // We need to wait for everybody to finish and return their CopiedBlocks
599 // before signaling that the phase is complete.
600 m_storageSpace
.doneCopying();
601 m_sharedData
.didFinishCopying();
603 m_storageSpace
.doneCopying();
606 void Heap::gatherStackRoots(ConservativeRoots
& roots
, void* stackOrigin
, void* stackTop
, MachineThreads::RegisterState
& calleeSavedRegisters
)
608 GCPHASE(GatherStackRoots
);
609 m_jitStubRoutines
.clearMarks();
610 m_machineThreads
.gatherConservativeRoots(roots
, m_jitStubRoutines
, m_codeBlocks
, stackOrigin
, stackTop
, calleeSavedRegisters
);
613 void Heap::gatherJSStackRoots(ConservativeRoots
& roots
)
616 GCPHASE(GatherJSStackRoots
);
617 stack().gatherConservativeRoots(roots
, m_jitStubRoutines
, m_codeBlocks
);
623 void Heap::gatherScratchBufferRoots(ConservativeRoots
& roots
)
626 GCPHASE(GatherScratchBufferRoots
);
627 m_vm
->gatherConservativeRoots(roots
);
633 void Heap::clearLivenessData()
635 GCPHASE(ClearLivenessData
);
636 m_objectSpace
.clearNewlyAllocated();
637 m_objectSpace
.clearMarks();
640 void Heap::visitExternalRememberedSet()
642 #if JSC_OBJC_API_ENABLED
643 scanExternalRememberedSet(*m_vm
, m_slotVisitor
);
647 void Heap::visitSmallStrings()
649 GCPHASE(VisitSmallStrings
);
650 if (!m_vm
->smallStrings
.needsToBeVisited(m_operationInProgress
))
653 m_vm
->smallStrings
.visitStrongReferences(m_slotVisitor
);
654 if (Options::logGC() == GCLogging::Verbose
)
655 dataLog("Small strings:\n", m_slotVisitor
);
656 m_slotVisitor
.donateAndDrain();
659 void Heap::visitConservativeRoots(ConservativeRoots
& roots
)
661 GCPHASE(VisitConservativeRoots
);
662 m_slotVisitor
.append(roots
);
664 if (Options::logGC() == GCLogging::Verbose
)
665 dataLog("Conservative Roots:\n", m_slotVisitor
);
667 m_slotVisitor
.donateAndDrain();
670 void Heap::visitCompilerWorklistWeakReferences()
673 for (auto worklist
: m_suspendedCompilerWorklists
)
674 worklist
->visitWeakReferences(m_slotVisitor
, m_codeBlocks
);
676 if (Options::logGC() == GCLogging::Verbose
)
677 dataLog("DFG Worklists:\n", m_slotVisitor
);
681 void Heap::removeDeadCompilerWorklistEntries()
684 GCPHASE(FinalizeDFGWorklists
);
685 for (auto worklist
: m_suspendedCompilerWorklists
)
686 worklist
->removeDeadPlans(*m_vm
);
690 void Heap::visitProtectedObjects(HeapRootVisitor
& heapRootVisitor
)
692 GCPHASE(VisitProtectedObjects
);
694 for (auto& pair
: m_protectedValues
)
695 heapRootVisitor
.visit(&pair
.key
);
697 if (Options::logGC() == GCLogging::Verbose
)
698 dataLog("Protected Objects:\n", m_slotVisitor
);
700 m_slotVisitor
.donateAndDrain();
703 void Heap::visitArgumentBuffers(HeapRootVisitor
& visitor
)
705 GCPHASE(MarkingArgumentBuffers
);
706 if (!m_markListSet
|| !m_markListSet
->size())
709 MarkedArgumentBuffer::markLists(visitor
, *m_markListSet
);
711 if (Options::logGC() == GCLogging::Verbose
)
712 dataLog("Argument Buffers:\n", m_slotVisitor
);
714 m_slotVisitor
.donateAndDrain();
717 void Heap::visitException(HeapRootVisitor
& visitor
)
719 GCPHASE(MarkingException
);
720 if (!m_vm
->exception() && !m_vm
->lastException())
723 visitor
.visit(m_vm
->addressOfException());
724 visitor
.visit(m_vm
->addressOfLastException());
726 if (Options::logGC() == GCLogging::Verbose
)
727 dataLog("Exceptions:\n", m_slotVisitor
);
729 m_slotVisitor
.donateAndDrain();
732 void Heap::visitStrongHandles(HeapRootVisitor
& visitor
)
734 GCPHASE(VisitStrongHandles
);
735 m_handleSet
.visitStrongHandles(visitor
);
737 if (Options::logGC() == GCLogging::Verbose
)
738 dataLog("Strong Handles:\n", m_slotVisitor
);
740 m_slotVisitor
.donateAndDrain();
743 void Heap::visitHandleStack(HeapRootVisitor
& visitor
)
745 GCPHASE(VisitHandleStack
);
746 m_handleStack
.visit(visitor
);
748 if (Options::logGC() == GCLogging::Verbose
)
749 dataLog("Handle Stack:\n", m_slotVisitor
);
751 m_slotVisitor
.donateAndDrain();
754 void Heap::traceCodeBlocksAndJITStubRoutines()
756 GCPHASE(TraceCodeBlocksAndJITStubRoutines
);
757 m_codeBlocks
.traceMarked(m_slotVisitor
);
758 m_jitStubRoutines
.traceMarkedStubRoutines(m_slotVisitor
);
760 if (Options::logGC() == GCLogging::Verbose
)
761 dataLog("Code Blocks and JIT Stub Routines:\n", m_slotVisitor
);
763 m_slotVisitor
.donateAndDrain();
766 void Heap::converge()
768 #if ENABLE(PARALLEL_GC)
769 GCPHASE(Convergence
);
770 m_slotVisitor
.drainFromShared(SlotVisitor::MasterDrain
);
774 void Heap::visitWeakHandles(HeapRootVisitor
& visitor
)
776 GCPHASE(VisitingLiveWeakHandles
);
778 m_objectSpace
.visitWeakSets(visitor
);
779 harvestWeakReferences();
780 visitCompilerWorklistWeakReferences();
781 m_codeBlocks
.traceMarked(m_slotVisitor
); // New "executing" code blocks may be discovered.
782 if (m_slotVisitor
.isEmpty())
785 if (Options::logGC() == GCLogging::Verbose
)
786 dataLog("Live Weak Handles:\n", m_slotVisitor
);
789 ParallelModeEnabler
enabler(m_slotVisitor
);
790 m_slotVisitor
.donateAndDrain();
791 #if ENABLE(PARALLEL_GC)
792 m_slotVisitor
.drainFromShared(SlotVisitor::MasterDrain
);
798 void Heap::clearRememberedSet(Vector
<const JSCell
*>& rememberedSet
)
801 GCPHASE(ClearRememberedSet
);
802 for (auto* cell
: rememberedSet
)
803 const_cast<JSCell
*>(cell
)->setRemembered(false);
805 UNUSED_PARAM(rememberedSet
);
809 void Heap::updateObjectCounts(double gcStartTime
)
811 GCCOUNTER(VisitedValueCount
, m_slotVisitor
.visitCount());
813 if (Options::logGC() == GCLogging::Verbose
) {
814 size_t visitCount
= m_slotVisitor
.visitCount();
815 #if ENABLE(PARALLEL_GC)
816 visitCount
+= m_sharedData
.childVisitCount();
818 dataLogF("\nNumber of live Objects after GC %lu, took %.6f secs\n", static_cast<unsigned long>(visitCount
), WTF::monotonicallyIncreasingTime() - gcStartTime
);
821 size_t bytesRemovedFromOldSpaceDueToReallocation
=
822 m_storageSpace
.takeBytesRemovedFromOldSpaceDueToReallocation();
824 if (m_operationInProgress
== FullCollection
) {
825 m_totalBytesVisited
= 0;
826 m_totalBytesCopied
= 0;
828 m_totalBytesCopied
-= bytesRemovedFromOldSpaceDueToReallocation
;
830 m_totalBytesVisited
+= m_slotVisitor
.bytesVisited();
831 m_totalBytesCopied
+= m_slotVisitor
.bytesCopied();
832 #if ENABLE(PARALLEL_GC)
833 m_totalBytesVisited
+= m_sharedData
.childBytesVisited();
834 m_totalBytesCopied
+= m_sharedData
.childBytesCopied();
838 void Heap::resetVisitors()
840 m_slotVisitor
.reset();
841 #if ENABLE(PARALLEL_GC)
842 m_sharedData
.resetChildren();
844 m_sharedData
.reset();
847 size_t Heap::objectCount()
849 return m_objectSpace
.objectCount();
852 size_t Heap::extraMemorySize()
854 return m_extraMemorySize
+ m_deprecatedExtraMemorySize
+ m_arrayBuffers
.size();
859 return m_objectSpace
.size() + m_storageSpace
.size() + extraMemorySize();
862 size_t Heap::capacity()
864 return m_objectSpace
.capacity() + m_storageSpace
.capacity() + extraMemorySize();
867 size_t Heap::sizeAfterCollect()
869 // The result here may not agree with the normal Heap::size().
870 // This is due to the fact that we only count live copied bytes
871 // rather than all used (including dead) copied bytes, thus it's
872 // always the case that m_totalBytesCopied <= m_storageSpace.size().
873 ASSERT(m_totalBytesCopied
<= m_storageSpace
.size());
874 return m_totalBytesVisited
+ m_totalBytesCopied
+ extraMemorySize();
877 size_t Heap::protectedGlobalObjectCount()
879 return forEachProtectedCell
<CountIfGlobalObject
>();
882 size_t Heap::globalObjectCount()
884 HeapIterationScope
iterationScope(*this);
885 return m_objectSpace
.forEachLiveCell
<CountIfGlobalObject
>(iterationScope
);
888 size_t Heap::protectedObjectCount()
890 return forEachProtectedCell
<Count
>();
893 std::unique_ptr
<TypeCountSet
> Heap::protectedObjectTypeCounts()
895 return forEachProtectedCell
<RecordType
>();
898 std::unique_ptr
<TypeCountSet
> Heap::objectTypeCounts()
900 HeapIterationScope
iterationScope(*this);
901 return m_objectSpace
.forEachLiveCell
<RecordType
>(iterationScope
);
904 void Heap::deleteAllCompiledCode()
906 // If JavaScript is running, it's not safe to delete code, since we'll end
907 // up deleting code that is live on the stack.
908 if (m_vm
->entryScope
)
911 // If we have things on any worklist, then don't delete code. This is kind of
912 // a weird heuristic. It's definitely not safe to throw away code that is on
913 // the worklist. But this change was made in a hurry so we just avoid throwing
914 // away any code if there is any code on any worklist. I suspect that this
915 // might not actually be too dumb: if there is code on worklists then that
916 // means that we are running some hot JS code right now. Maybe causing
917 // recompilations isn't a good idea.
919 for (unsigned i
= DFG::numberOfWorklists(); i
--;) {
920 if (DFG::Worklist
* worklist
= DFG::worklistForIndexOrNull(i
)) {
921 if (worklist
->isActiveForVM(*vm()))
925 #endif // ENABLE(DFG_JIT)
927 for (ExecutableBase
* current
: m_compiledCode
) {
928 if (!current
->isFunctionExecutable())
930 static_cast<FunctionExecutable
*>(current
)->clearCode();
933 ASSERT(m_operationInProgress
== FullCollection
|| m_operationInProgress
== NoOperation
);
934 m_codeBlocks
.clearMarksForFullCollection();
935 m_codeBlocks
.deleteUnmarkedAndUnreferenced(FullCollection
);
938 void Heap::deleteAllUnlinkedFunctionCode()
940 for (ExecutableBase
* current
: m_compiledCode
) {
941 if (!current
->isFunctionExecutable())
943 static_cast<FunctionExecutable
*>(current
)->clearUnlinkedCodeForRecompilation();
947 void Heap::clearUnmarkedExecutables()
949 GCPHASE(ClearUnmarkedExecutables
);
950 for (unsigned i
= m_compiledCode
.size(); i
--;) {
951 ExecutableBase
* current
= m_compiledCode
[i
];
952 if (isMarked(current
))
955 // We do this because executable memory is limited on some platforms and because
956 // CodeBlock requires eager finalization.
957 ExecutableBase::clearCodeVirtual(current
);
958 std::swap(m_compiledCode
[i
], m_compiledCode
.last());
959 m_compiledCode
.removeLast();
963 void Heap::deleteUnmarkedCompiledCode()
965 GCPHASE(DeleteCodeBlocks
);
966 clearUnmarkedExecutables();
967 m_codeBlocks
.deleteUnmarkedAndUnreferenced(m_operationInProgress
);
968 m_jitStubRoutines
.deleteUnmarkedJettisonedStubRoutines();
971 void Heap::addToRememberedSet(const JSCell
* cell
)
974 ASSERT(!Options::enableConcurrentJIT() || !isCompilationThread());
975 if (isRemembered(cell
))
977 const_cast<JSCell
*>(cell
)->setRemembered(true);
978 m_slotVisitor
.unconditionallyAppend(const_cast<JSCell
*>(cell
));
981 void Heap::collectAndSweep(HeapOperation collectionType
)
983 if (!m_isSafeToCollect
)
986 collect(collectionType
);
988 SamplingRegion
samplingRegion("Garbage Collection: Sweeping");
990 DeferGCForAWhile
deferGC(*this);
991 m_objectSpace
.sweep();
992 m_objectSpace
.shrink();
994 sweepAllLogicallyEmptyWeakBlocks();
997 static double minute
= 60.0;
999 NEVER_INLINE
void Heap::collect(HeapOperation collectionType
)
1002 ALLOCATE_AND_GET_REGISTER_STATE(registers
);
1004 collectImpl(collectionType
, wtfThreadData().stack().origin(), &stackTop
, registers
);
1006 sanitizeStackForVM(m_vm
);
1009 NEVER_INLINE
void Heap::collectImpl(HeapOperation collectionType
, void* stackOrigin
, void* stackTop
, MachineThreads::RegisterState
& calleeSavedRegisters
)
1011 #if ENABLE(ALLOCATION_LOGGING)
1012 dataLogF("JSC GC starting collection.\n");
1016 if (Options::logGC()) {
1018 before
= currentTimeMS();
1021 SamplingRegion
samplingRegion("Garbage Collection");
1023 if (vm()->typeProfiler()) {
1024 DeferGCForAWhile
awhile(*this);
1025 vm()->typeProfilerLog()->processLogEntries(ASCIILiteral("GC"));
1028 RELEASE_ASSERT(!m_deferralDepth
);
1029 ASSERT(vm()->currentThreadIsHoldingAPILock());
1030 RELEASE_ASSERT(vm()->atomicStringTable() == wtfThreadData().atomicStringTable());
1031 ASSERT(m_isSafeToCollect
);
1032 JAVASCRIPTCORE_GC_BEGIN();
1033 RELEASE_ASSERT(m_operationInProgress
== NoOperation
);
1035 suspendCompilerThreads();
1036 willStartCollection(collectionType
);
1039 double gcStartTime
= WTF::monotonicallyIncreasingTime();
1041 // Verify that live objects from the last GC cycle haven't been corrupted by
1042 // mutators before we begin this new GC cycle.
1043 m_verifier
->verify(HeapVerifier::Phase::BeforeGC
);
1045 m_verifier
->initializeGCCycle();
1046 m_verifier
->gatherLiveObjects(HeapVerifier::Phase::BeforeMarking
);
1049 deleteOldCode(gcStartTime
);
1050 flushOldStructureIDTables();
1052 flushWriteBarrierBuffer();
1054 markRoots(gcStartTime
, stackOrigin
, stackTop
, calleeSavedRegisters
);
1057 m_verifier
->gatherLiveObjects(HeapVerifier::Phase::AfterMarking
);
1058 m_verifier
->verify(HeapVerifier::Phase::AfterMarking
);
1060 JAVASCRIPTCORE_GC_MARKED();
1062 if (vm()->typeProfiler())
1063 vm()->typeProfiler()->invalidateTypeSetCache();
1066 pruneStaleEntriesFromWeakGCMaps();
1067 sweepArrayBuffers();
1068 snapshotMarkedSpace();
1070 copyBackingStores();
1072 finalizeUnconditionalFinalizers();
1073 removeDeadCompilerWorklistEntries();
1074 deleteUnmarkedCompiledCode();
1075 deleteSourceProviderCaches();
1076 notifyIncrementalSweeper();
1077 rememberCurrentlyExecutingCodeBlocks();
1080 updateAllocationLimits();
1081 didFinishCollection(gcStartTime
);
1082 resumeCompilerThreads();
1085 m_verifier
->trimDeadObjects();
1086 m_verifier
->verify(HeapVerifier::Phase::AfterGC
);
1089 if (Options::logGC()) {
1090 double after
= currentTimeMS();
1091 dataLog(after
- before
, " ms]\n");
1095 void Heap::suspendCompilerThreads()
1098 GCPHASE(SuspendCompilerThreads
);
1099 ASSERT(m_suspendedCompilerWorklists
.isEmpty());
1100 for (unsigned i
= DFG::numberOfWorklists(); i
--;) {
1101 if (DFG::Worklist
* worklist
= DFG::worklistForIndexOrNull(i
)) {
1102 m_suspendedCompilerWorklists
.append(worklist
);
1103 worklist
->suspendAllThreads();
1109 void Heap::willStartCollection(HeapOperation collectionType
)
1111 GCPHASE(StartingCollection
);
1112 if (shouldDoFullCollection(collectionType
)) {
1113 m_operationInProgress
= FullCollection
;
1114 m_slotVisitor
.clearMarkStack();
1115 m_shouldDoFullCollection
= false;
1116 if (Options::logGC())
1117 dataLog("FullCollection, ");
1119 m_operationInProgress
= EdenCollection
;
1120 if (Options::logGC())
1121 dataLog("EdenCollection, ");
1123 if (m_operationInProgress
== FullCollection
) {
1124 m_sizeBeforeLastFullCollect
= m_sizeAfterLastCollect
+ m_bytesAllocatedThisCycle
;
1125 m_extraMemorySize
= 0;
1126 m_deprecatedExtraMemorySize
= 0;
1128 if (m_fullActivityCallback
)
1129 m_fullActivityCallback
->willCollect();
1131 ASSERT(m_operationInProgress
== EdenCollection
);
1132 m_sizeBeforeLastEdenCollect
= m_sizeAfterLastCollect
+ m_bytesAllocatedThisCycle
;
1135 if (m_edenActivityCallback
)
1136 m_edenActivityCallback
->willCollect();
1139 void Heap::deleteOldCode(double gcStartTime
)
1141 if (m_operationInProgress
== EdenCollection
)
1144 GCPHASE(DeleteOldCode
);
1145 if (gcStartTime
- m_lastCodeDiscardTime
> minute
) {
1146 deleteAllCompiledCode();
1147 m_lastCodeDiscardTime
= WTF::monotonicallyIncreasingTime();
1151 void Heap::flushOldStructureIDTables()
1153 GCPHASE(FlushOldStructureIDTables
);
1154 m_structureIDTable
.flushOldTables();
1157 void Heap::flushWriteBarrierBuffer()
1159 GCPHASE(FlushWriteBarrierBuffer
);
1160 if (m_operationInProgress
== EdenCollection
) {
1161 m_writeBarrierBuffer
.flush(*this);
1164 m_writeBarrierBuffer
.reset();
1167 void Heap::stopAllocation()
1169 GCPHASE(StopAllocation
);
1170 m_objectSpace
.stopAllocating();
1171 if (m_operationInProgress
== FullCollection
)
1172 m_storageSpace
.didStartFullCollection();
1175 void Heap::reapWeakHandles()
1177 GCPHASE(ReapingWeakHandles
);
1178 m_objectSpace
.reapWeakSets();
1181 void Heap::pruneStaleEntriesFromWeakGCMaps()
1183 GCPHASE(PruningStaleEntriesFromWeakGCMaps
);
1184 if (m_operationInProgress
!= FullCollection
)
1186 for (auto& pruneCallback
: m_weakGCMaps
.values())
1190 void Heap::sweepArrayBuffers()
1192 GCPHASE(SweepingArrayBuffers
);
1193 m_arrayBuffers
.sweep();
1196 struct MarkedBlockSnapshotFunctor
: public MarkedBlock::VoidFunctor
{
1197 MarkedBlockSnapshotFunctor(Vector
<MarkedBlock
*>& blocks
)
1203 void operator()(MarkedBlock
* block
) { m_blocks
[m_index
++] = block
; }
1206 Vector
<MarkedBlock
*>& m_blocks
;
1209 void Heap::snapshotMarkedSpace()
1211 GCPHASE(SnapshotMarkedSpace
);
1213 if (m_operationInProgress
== EdenCollection
) {
1214 m_blockSnapshot
.appendVector(m_objectSpace
.blocksWithNewObjects());
1215 // Sort and deduplicate the block snapshot since we might be appending to an unfinished work list.
1216 std::sort(m_blockSnapshot
.begin(), m_blockSnapshot
.end());
1217 m_blockSnapshot
.shrink(std::unique(m_blockSnapshot
.begin(), m_blockSnapshot
.end()) - m_blockSnapshot
.begin());
1219 m_blockSnapshot
.resizeToFit(m_objectSpace
.blocks().set().size());
1220 MarkedBlockSnapshotFunctor
functor(m_blockSnapshot
);
1221 m_objectSpace
.forEachBlock(functor
);
1225 void Heap::deleteSourceProviderCaches()
1227 GCPHASE(DeleteSourceProviderCaches
);
1228 m_vm
->clearSourceProviderCaches();
1231 void Heap::notifyIncrementalSweeper()
1233 GCPHASE(NotifyIncrementalSweeper
);
1235 if (m_operationInProgress
== FullCollection
) {
1236 if (!m_logicallyEmptyWeakBlocks
.isEmpty())
1237 m_indexOfNextLogicallyEmptyWeakBlockToSweep
= 0;
1240 m_sweeper
->startSweeping();
1243 void Heap::rememberCurrentlyExecutingCodeBlocks()
1245 GCPHASE(RememberCurrentlyExecutingCodeBlocks
);
1246 m_codeBlocks
.rememberCurrentlyExecutingCodeBlocks(this);
1249 void Heap::resetAllocators()
1251 GCPHASE(ResetAllocators
);
1252 m_objectSpace
.resetAllocators();
1255 void Heap::updateAllocationLimits()
1257 GCPHASE(UpdateAllocationLimits
);
1258 size_t currentHeapSize
= sizeAfterCollect();
1259 if (Options::gcMaxHeapSize() && currentHeapSize
> Options::gcMaxHeapSize())
1260 HeapStatistics::exitWithFailure();
1262 if (m_operationInProgress
== FullCollection
) {
1263 // To avoid pathological GC churn in very small and very large heaps, we set
1264 // the new allocation limit based on the current size of the heap, with a
1266 m_maxHeapSize
= max(minHeapSize(m_heapType
, m_ramSize
), proportionalHeapSize(currentHeapSize
, m_ramSize
));
1267 m_maxEdenSize
= m_maxHeapSize
- currentHeapSize
;
1268 m_sizeAfterLastFullCollect
= currentHeapSize
;
1269 m_bytesAbandonedSinceLastFullCollect
= 0;
1271 ASSERT(currentHeapSize
>= m_sizeAfterLastCollect
);
1272 m_maxEdenSize
= m_maxHeapSize
- currentHeapSize
;
1273 m_sizeAfterLastEdenCollect
= currentHeapSize
;
1274 double edenToOldGenerationRatio
= (double)m_maxEdenSize
/ (double)m_maxHeapSize
;
1275 double minEdenToOldGenerationRatio
= 1.0 / 3.0;
1276 if (edenToOldGenerationRatio
< minEdenToOldGenerationRatio
)
1277 m_shouldDoFullCollection
= true;
1278 m_maxHeapSize
+= currentHeapSize
- m_sizeAfterLastCollect
;
1279 m_maxEdenSize
= m_maxHeapSize
- currentHeapSize
;
1280 if (m_fullActivityCallback
) {
1281 ASSERT(currentHeapSize
>= m_sizeAfterLastFullCollect
);
1282 m_fullActivityCallback
->didAllocate(currentHeapSize
- m_sizeAfterLastFullCollect
);
1286 m_sizeAfterLastCollect
= currentHeapSize
;
1287 m_bytesAllocatedThisCycle
= 0;
1289 if (Options::logGC())
1290 dataLog(currentHeapSize
/ 1024, " kb, ");
1293 void Heap::didFinishCollection(double gcStartTime
)
1295 GCPHASE(FinishingCollection
);
1296 double gcEndTime
= WTF::monotonicallyIncreasingTime();
1297 if (m_operationInProgress
== FullCollection
)
1298 m_lastFullGCLength
= gcEndTime
- gcStartTime
;
1300 m_lastEdenGCLength
= gcEndTime
- gcStartTime
;
1302 if (Options::recordGCPauseTimes())
1303 HeapStatistics::recordGCPauseTime(gcStartTime
, gcEndTime
);
1305 if (Options::useZombieMode())
1306 zombifyDeadObjects();
1308 if (Options::objectsAreImmortal())
1311 if (Options::showObjectStatistics())
1312 HeapStatistics::showObjectStatistics(this);
1314 if (Options::logGC() == GCLogging::Verbose
)
1315 GCLogging::dumpObjectGraph(this);
1317 RELEASE_ASSERT(m_operationInProgress
== EdenCollection
|| m_operationInProgress
== FullCollection
);
1318 m_operationInProgress
= NoOperation
;
1319 JAVASCRIPTCORE_GC_END();
1322 void Heap::resumeCompilerThreads()
1325 GCPHASE(ResumeCompilerThreads
);
1326 for (auto worklist
: m_suspendedCompilerWorklists
)
1327 worklist
->resumeAllThreads();
1328 m_suspendedCompilerWorklists
.clear();
1332 void Heap::markDeadObjects()
1334 HeapIterationScope
iterationScope(*this);
1335 m_objectSpace
.forEachDeadCell
<MarkObject
>(iterationScope
);
1338 void Heap::setFullActivityCallback(PassRefPtr
<FullGCActivityCallback
> activityCallback
)
1340 m_fullActivityCallback
= activityCallback
;
1343 void Heap::setEdenActivityCallback(PassRefPtr
<EdenGCActivityCallback
> activityCallback
)
1345 m_edenActivityCallback
= activityCallback
;
1348 GCActivityCallback
* Heap::fullActivityCallback()
1350 return m_fullActivityCallback
.get();
1353 GCActivityCallback
* Heap::edenActivityCallback()
1355 return m_edenActivityCallback
.get();
1358 void Heap::setIncrementalSweeper(std::unique_ptr
<IncrementalSweeper
> sweeper
)
1360 m_sweeper
= WTF::move(sweeper
);
1363 IncrementalSweeper
* Heap::sweeper()
1365 return m_sweeper
.get();
1368 void Heap::setGarbageCollectionTimerEnabled(bool enable
)
1370 if (m_fullActivityCallback
)
1371 m_fullActivityCallback
->setEnabled(enable
);
1372 if (m_edenActivityCallback
)
1373 m_edenActivityCallback
->setEnabled(enable
);
1376 void Heap::didAllocate(size_t bytes
)
1378 if (m_edenActivityCallback
)
1379 m_edenActivityCallback
->didAllocate(m_bytesAllocatedThisCycle
+ m_bytesAbandonedSinceLastFullCollect
);
1380 m_bytesAllocatedThisCycle
+= bytes
;
1383 bool Heap::isValidAllocation(size_t)
1385 if (!isValidThreadState(m_vm
))
1388 if (m_operationInProgress
!= NoOperation
)
1394 void Heap::addFinalizer(JSCell
* cell
, Finalizer finalizer
)
1396 WeakSet::allocate(cell
, &m_finalizerOwner
, reinterpret_cast<void*>(finalizer
)); // Balanced by FinalizerOwner::finalize().
1399 void Heap::FinalizerOwner::finalize(Handle
<Unknown
> handle
, void* context
)
1401 HandleSlot slot
= handle
.slot();
1402 Finalizer finalizer
= reinterpret_cast<Finalizer
>(context
);
1403 finalizer(slot
->asCell());
1404 WeakSet::deallocate(WeakImpl::asWeakImpl(slot
));
1407 void Heap::addCompiledCode(ExecutableBase
* executable
)
1409 m_compiledCode
.append(executable
);
1412 void Heap::collectAllGarbageIfNotDoneRecently()
1414 if (!m_fullActivityCallback
) {
1415 collectAllGarbage();
1419 if (m_fullActivityCallback
->didSyncGCRecently()) {
1420 // A synchronous GC was already requested recently so we merely accelerate next collection.
1421 reportAbandonedObjectGraph();
1425 m_fullActivityCallback
->setDidSyncGCRecently();
1426 collectAllGarbage();
1429 class Zombify
: public MarkedBlock::VoidFunctor
{
1431 inline void visit(JSCell
* cell
)
1433 void** current
= reinterpret_cast<void**>(cell
);
1435 // We want to maintain zapped-ness because that's how we know if we've called
1437 if (cell
->isZapped())
1440 void* limit
= static_cast<void*>(reinterpret_cast<char*>(cell
) + MarkedBlock::blockFor(cell
)->cellSize());
1441 for (; current
< limit
; current
++)
1442 *current
= zombifiedBits
;
1444 IterationStatus
operator()(JSCell
* cell
)
1447 return IterationStatus::Continue
;
1451 void Heap::zombifyDeadObjects()
1453 // Sweep now because destructors will crash once we're zombified.
1455 SamplingRegion
samplingRegion("Garbage Collection: Sweeping");
1456 m_objectSpace
.zombifySweep();
1458 HeapIterationScope
iterationScope(*this);
1459 m_objectSpace
.forEachDeadCell
<Zombify
>(iterationScope
);
1462 void Heap::flushWriteBarrierBuffer(JSCell
* cell
)
1465 m_writeBarrierBuffer
.flush(*this);
1466 m_writeBarrierBuffer
.add(cell
);
1472 bool Heap::shouldDoFullCollection(HeapOperation requestedCollectionType
) const
1475 if (Options::alwaysDoFullCollection())
1478 switch (requestedCollectionType
) {
1479 case EdenCollection
:
1481 case FullCollection
:
1484 return m_shouldDoFullCollection
;
1486 RELEASE_ASSERT_NOT_REACHED();
1489 RELEASE_ASSERT_NOT_REACHED();
1492 UNUSED_PARAM(requestedCollectionType
);
1497 void Heap::addLogicallyEmptyWeakBlock(WeakBlock
* block
)
1499 m_logicallyEmptyWeakBlocks
.append(block
);
1502 void Heap::sweepAllLogicallyEmptyWeakBlocks()
1504 if (m_logicallyEmptyWeakBlocks
.isEmpty())
1507 m_indexOfNextLogicallyEmptyWeakBlockToSweep
= 0;
1508 while (sweepNextLogicallyEmptyWeakBlock()) { }
1511 bool Heap::sweepNextLogicallyEmptyWeakBlock()
1513 if (m_indexOfNextLogicallyEmptyWeakBlockToSweep
== WTF::notFound
)
1516 WeakBlock
* block
= m_logicallyEmptyWeakBlocks
[m_indexOfNextLogicallyEmptyWeakBlockToSweep
];
1519 if (block
->isEmpty()) {
1520 std::swap(m_logicallyEmptyWeakBlocks
[m_indexOfNextLogicallyEmptyWeakBlockToSweep
], m_logicallyEmptyWeakBlocks
.last());
1521 m_logicallyEmptyWeakBlocks
.removeLast();
1522 WeakBlock::destroy(block
);
1524 m_indexOfNextLogicallyEmptyWeakBlockToSweep
++;
1526 if (m_indexOfNextLogicallyEmptyWeakBlockToSweep
>= m_logicallyEmptyWeakBlocks
.size()) {
1527 m_indexOfNextLogicallyEmptyWeakBlockToSweep
= WTF::notFound
;