2 * Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2011, 2013, 2014 Apple Inc. All rights reserved.
3 * Copyright (C) 2007 Eric Seidel <eric@webkit.org>
5 * This library is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Lesser General Public
7 * License as published by the Free Software Foundation; either
8 * version 2 of the License, or (at your option) any later version.
10 * This library is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Lesser General Public License for more details.
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this library; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
24 #include "CodeBlock.h"
25 #include "ConservativeRoots.h"
26 #include "CopiedSpace.h"
27 #include "CopiedSpaceInlines.h"
28 #include "CopyVisitorInlines.h"
29 #include "DFGWorklist.h"
30 #include "DelayedReleaseScope.h"
31 #include "EdenGCActivityCallback.h"
32 #include "FullGCActivityCallback.h"
33 #include "GCActivityCallback.h"
34 #include "GCIncomingRefCountedSetInlines.h"
35 #include "HeapIterationScope.h"
36 #include "HeapRootVisitor.h"
37 #include "HeapStatistics.h"
38 #include "IncrementalSweeper.h"
39 #include "Interpreter.h"
40 #include "JSGlobalObject.h"
42 #include "JSONObject.h"
43 #include "JSCInlines.h"
44 #include "JSVirtualMachineInternal.h"
45 #include "RecursiveAllocationScope.h"
47 #include "UnlinkedCodeBlock.h"
49 #include "WeakSetInlines.h"
51 #include <wtf/RAMSize.h>
52 #include <wtf/CurrentTime.h>
53 #include <wtf/ProcessID.h>
62 static const size_t largeHeapSize
= 32 * MB
; // About 1.5X the average webpage.
63 static const size_t smallHeapSize
= 1 * MB
; // Matches the FastMalloc per-thread cache.
65 #define ENABLE_GC_LOGGING 0
67 #if ENABLE(GC_LOGGING)
69 #define DEFINE_GC_LOGGING_GLOBAL(type, name, arguments) \
70 _Pragma("clang diagnostic push") \
71 _Pragma("clang diagnostic ignored \"-Wglobal-constructors\"") \
72 _Pragma("clang diagnostic ignored \"-Wexit-time-destructors\"") \
73 static type name arguments; \
74 _Pragma("clang diagnostic pop")
76 #define DEFINE_GC_LOGGING_GLOBAL(type, name, arguments) \
77 static type name arguments;
78 #endif // COMPILER(CLANG)
81 GCTimer(const char* name
)
87 logData(m_allCollectionData
, "(All)");
88 logData(m_edenCollectionData
, "(Eden)");
89 logData(m_fullCollectionData
, "(Full)");
95 , m_min(std::numeric_limits
<double>::infinity())
107 void logData(const TimeRecord
& data
, const char* extra
)
109 dataLogF("[%d] %s %s: %.2lfms (avg. %.2lf, min. %.2lf, max. %.2lf, count %lu)\n",
110 getCurrentProcessID(),
113 data
.m_time
* 1000 / data
.m_count
,
119 void updateData(TimeRecord
& data
, double duration
)
121 if (duration
< data
.m_min
)
122 data
.m_min
= duration
;
123 if (duration
> data
.m_max
)
124 data
.m_max
= duration
;
126 data
.m_time
+= duration
;
129 void didFinishPhase(HeapOperation collectionType
, double duration
)
131 TimeRecord
& data
= collectionType
== EdenCollection
? m_edenCollectionData
: m_fullCollectionData
;
132 updateData(data
, duration
);
133 updateData(m_allCollectionData
, duration
);
136 TimeRecord m_allCollectionData
;
137 TimeRecord m_fullCollectionData
;
138 TimeRecord m_edenCollectionData
;
142 struct GCTimerScope
{
143 GCTimerScope(GCTimer
* timer
, HeapOperation collectionType
)
145 , m_start(WTF::monotonicallyIncreasingTime())
146 , m_collectionType(collectionType
)
151 double delta
= WTF::monotonicallyIncreasingTime() - m_start
;
152 m_timer
->didFinishPhase(m_collectionType
, delta
);
156 HeapOperation m_collectionType
;
160 GCCounter(const char* name
)
169 void count(size_t amount
)
180 dataLogF("[%d] %s: %zu values (avg. %zu, min. %zu, max. %zu)\n", getCurrentProcessID(), m_name
, m_total
, m_total
/ m_count
, m_min
, m_max
);
189 #define GCPHASE(name) DEFINE_GC_LOGGING_GLOBAL(GCTimer, name##Timer, (#name)); GCTimerScope name##TimerScope(&name##Timer, m_operationInProgress)
190 #define GCCOUNTER(name, value) do { DEFINE_GC_LOGGING_GLOBAL(GCCounter, name##Counter, (#name)); name##Counter.count(value); } while (false)
194 #define GCPHASE(name) do { } while (false)
195 #define GCCOUNTER(name, value) do { } while (false)
198 static inline size_t minHeapSize(HeapType heapType
, size_t ramSize
)
200 if (heapType
== LargeHeap
)
201 return min(largeHeapSize
, ramSize
/ 4);
202 return smallHeapSize
;
205 static inline size_t proportionalHeapSize(size_t heapSize
, size_t ramSize
)
207 // Try to stay under 1/2 RAM size to leave room for the DOM, rendering, networking, etc.
208 if (heapSize
< ramSize
/ 4)
210 if (heapSize
< ramSize
/ 2)
211 return 1.5 * heapSize
;
212 return 1.25 * heapSize
;
215 static inline bool isValidSharedInstanceThreadState(VM
* vm
)
217 return vm
->currentThreadIsHoldingAPILock();
220 static inline bool isValidThreadState(VM
* vm
)
222 if (vm
->atomicStringTable() != wtfThreadData().atomicStringTable())
225 if (vm
->isSharedInstance() && !isValidSharedInstanceThreadState(vm
))
231 struct MarkObject
: public MarkedBlock::VoidFunctor
{
232 void operator()(JSCell
* cell
)
234 if (cell
->isZapped())
236 Heap::heap(cell
)->setMarked(cell
);
240 struct Count
: public MarkedBlock::CountFunctor
{
241 void operator()(JSCell
*) { count(1); }
244 struct CountIfGlobalObject
: MarkedBlock::CountFunctor
{
245 void operator()(JSCell
* cell
) {
246 if (!cell
->isObject())
248 if (!asObject(cell
)->isGlobalObject())
256 typedef PassOwnPtr
<TypeCountSet
> ReturnType
;
259 void operator()(JSCell
*);
260 ReturnType
returnValue();
263 const char* typeName(JSCell
*);
264 OwnPtr
<TypeCountSet
> m_typeCountSet
;
267 inline RecordType::RecordType()
268 : m_typeCountSet(adoptPtr(new TypeCountSet
))
272 inline const char* RecordType::typeName(JSCell
* cell
)
274 const ClassInfo
* info
= cell
->classInfo();
275 if (!info
|| !info
->className
)
277 return info
->className
;
280 inline void RecordType::operator()(JSCell
* cell
)
282 m_typeCountSet
->add(typeName(cell
));
285 inline PassOwnPtr
<TypeCountSet
> RecordType::returnValue()
287 return m_typeCountSet
.release();
290 } // anonymous namespace
292 Heap::Heap(VM
* vm
, HeapType heapType
)
293 : m_heapType(heapType
)
294 , m_ramSize(ramSize())
295 , m_minBytesPerCycle(minHeapSize(m_heapType
, m_ramSize
))
296 , m_sizeAfterLastCollect(0)
297 , m_sizeAfterLastFullCollect(0)
298 , m_sizeBeforeLastFullCollect(0)
299 , m_sizeAfterLastEdenCollect(0)
300 , m_sizeBeforeLastEdenCollect(0)
301 , m_bytesAllocatedThisCycle(0)
302 , m_bytesAbandonedSinceLastFullCollect(0)
303 , m_maxEdenSize(m_minBytesPerCycle
)
304 , m_maxHeapSize(m_minBytesPerCycle
)
305 , m_shouldDoFullCollection(false)
306 , m_totalBytesVisited(0)
307 , m_totalBytesCopied(0)
308 , m_operationInProgress(NoOperation
)
310 , m_objectSpace(this)
311 , m_storageSpace(this)
312 , m_extraMemoryUsage(0)
313 , m_machineThreads(this)
315 , m_slotVisitor(m_sharedData
)
316 , m_copyVisitor(m_sharedData
)
318 , m_codeBlocks(m_blockAllocator
)
319 , m_isSafeToCollect(false)
320 , m_writeBarrierBuffer(256)
322 // We seed with 10ms so that GCActivityCallback::didAllocate doesn't continuously
323 // schedule the timer if we've never done a collection.
324 , m_lastFullGCLength(0.01)
325 , m_lastEdenGCLength(0.01)
326 , m_lastCodeDiscardTime(WTF::monotonicallyIncreasingTime())
327 , m_fullActivityCallback(GCActivityCallback::createFullTimer(this))
329 , m_edenActivityCallback(GCActivityCallback::createEdenTimer(this))
331 , m_edenActivityCallback(m_fullActivityCallback
)
333 , m_sweeper(IncrementalSweeper::create(this))
336 m_storageSpace
.init();
343 bool Heap::isPagedOut(double deadline
)
345 return m_objectSpace
.isPagedOut(deadline
) || m_storageSpace
.isPagedOut(deadline
);
348 // The VM is being destroyed and the collector will never run again.
349 // Run all pending finalizers now because we won't get another chance.
350 void Heap::lastChanceToFinalize()
352 RELEASE_ASSERT(!m_vm
->entryScope
);
353 RELEASE_ASSERT(m_operationInProgress
== NoOperation
);
355 m_objectSpace
.lastChanceToFinalize();
358 void Heap::reportExtraMemoryCostSlowCase(size_t cost
)
360 // Our frequency of garbage collection tries to balance memory use against speed
361 // by collecting based on the number of newly created values. However, for values
362 // that hold on to a great deal of memory that's not in the form of other JS values,
363 // that is not good enough - in some cases a lot of those objects can pile up and
364 // use crazy amounts of memory without a GC happening. So we track these extra
365 // memory costs. Only unusually large objects are noted, and we only keep track
366 // of this extra cost until the next GC. In garbage collected languages, most values
367 // are either very short lived temporaries, or have extremely long lifetimes. So
368 // if a large value survives one garbage collection, there is not much point to
369 // collecting more frequently as long as it stays alive.
372 collectIfNecessaryOrDefer();
375 void Heap::reportAbandonedObjectGraph()
377 // Our clients don't know exactly how much memory they
378 // are abandoning so we just guess for them.
379 double abandonedBytes
= 0.1 * m_sizeAfterLastCollect
;
381 // We want to accelerate the next collection. Because memory has just
382 // been abandoned, the next collection has the potential to
383 // be more profitable. Since allocation is the trigger for collection,
384 // we hasten the next collection by pretending that we've allocated more memory.
385 didAbandon(abandonedBytes
);
388 void Heap::didAbandon(size_t bytes
)
390 if (m_fullActivityCallback
) {
391 m_fullActivityCallback
->didAllocate(
392 m_sizeAfterLastCollect
- m_sizeAfterLastFullCollect
+ m_bytesAllocatedThisCycle
+ m_bytesAbandonedSinceLastFullCollect
);
394 m_bytesAbandonedSinceLastFullCollect
+= bytes
;
397 void Heap::protect(JSValue k
)
400 ASSERT(m_vm
->currentThreadIsHoldingAPILock());
405 m_protectedValues
.add(k
.asCell());
408 bool Heap::unprotect(JSValue k
)
411 ASSERT(m_vm
->currentThreadIsHoldingAPILock());
416 return m_protectedValues
.remove(k
.asCell());
419 void Heap::addReference(JSCell
* cell
, ArrayBuffer
* buffer
)
421 if (m_arrayBuffers
.addReference(cell
, buffer
)) {
422 collectIfNecessaryOrDefer();
423 didAllocate(buffer
->gcSizeEstimateInBytes());
427 void Heap::pushTempSortVector(Vector
<ValueStringPair
, 0, UnsafeVectorOverflow
>* tempVector
)
429 m_tempSortingVectors
.append(tempVector
);
432 void Heap::popTempSortVector(Vector
<ValueStringPair
, 0, UnsafeVectorOverflow
>* tempVector
)
434 ASSERT_UNUSED(tempVector
, tempVector
== m_tempSortingVectors
.last());
435 m_tempSortingVectors
.removeLast();
438 void Heap::harvestWeakReferences()
440 m_slotVisitor
.harvestWeakReferences();
443 void Heap::finalizeUnconditionalFinalizers()
445 GCPHASE(FinalizeUnconditionalFinalizers
);
446 m_slotVisitor
.finalizeUnconditionalFinalizers();
449 inline JSStack
& Heap::stack()
451 return m_vm
->interpreter
->stack();
454 void Heap::willStartIterating()
456 m_objectSpace
.willStartIterating();
459 void Heap::didFinishIterating()
461 m_objectSpace
.didFinishIterating();
464 void Heap::getConservativeRegisterRoots(HashSet
<JSCell
*>& roots
)
466 ASSERT(isValidThreadState(m_vm
));
467 ConservativeRoots
stackRoots(&m_objectSpace
.blocks(), &m_storageSpace
);
468 stack().gatherConservativeRoots(stackRoots
);
469 size_t stackRootCount
= stackRoots
.size();
470 JSCell
** registerRoots
= stackRoots
.roots();
471 for (size_t i
= 0; i
< stackRootCount
; i
++) {
472 setMarked(registerRoots
[i
]);
473 registerRoots
[i
]->setMarked();
474 roots
.add(registerRoots
[i
]);
478 void Heap::markRoots(double gcStartTime
)
480 SamplingRegion
samplingRegion("Garbage Collection: Marking");
483 ASSERT(isValidThreadState(m_vm
));
486 Vector
<const JSCell
*> rememberedSet(m_slotVisitor
.markStack().size());
487 m_slotVisitor
.markStack().fillVector(rememberedSet
);
489 Vector
<const JSCell
*> rememberedSet
;
492 if (m_operationInProgress
== EdenCollection
)
493 m_codeBlocks
.clearMarksForEdenCollection(rememberedSet
);
495 m_codeBlocks
.clearMarksForFullCollection();
497 // We gather conservative roots before clearing mark bits because conservative
498 // gathering uses the mark bits to determine whether a reference is valid.
500 ALLOCATE_AND_GET_REGISTER_STATE(registers
);
501 ConservativeRoots
conservativeRoots(&m_objectSpace
.blocks(), &m_storageSpace
);
502 gatherStackRoots(conservativeRoots
, &dummy
, registers
);
503 gatherJSStackRoots(conservativeRoots
);
504 gatherScratchBufferRoots(conservativeRoots
);
506 sanitizeStackForVM(m_vm
);
510 m_sharedData
.didStartMarking();
511 m_slotVisitor
.didStartMarking();
512 HeapRootVisitor
heapRootVisitor(m_slotVisitor
);
515 ParallelModeEnabler
enabler(m_slotVisitor
);
517 visitExternalRememberedSet();
519 visitConservativeRoots(conservativeRoots
);
520 visitProtectedObjects(heapRootVisitor
);
521 visitTempSortVectors(heapRootVisitor
);
522 visitArgumentBuffers(heapRootVisitor
);
523 visitException(heapRootVisitor
);
524 visitStrongHandles(heapRootVisitor
);
525 visitHandleStack(heapRootVisitor
);
526 traceCodeBlocksAndJITStubRoutines();
530 // Weak references must be marked last because their liveness depends on
531 // the liveness of the rest of the object graph.
532 visitWeakHandles(heapRootVisitor
);
534 clearRememberedSet(rememberedSet
);
535 m_sharedData
.didFinishMarking();
536 updateObjectCounts(gcStartTime
);
540 void Heap::copyBackingStores()
542 if (m_operationInProgress
== EdenCollection
)
543 m_storageSpace
.startedCopying
<EdenCollection
>();
545 ASSERT(m_operationInProgress
== FullCollection
);
546 m_storageSpace
.startedCopying
<FullCollection
>();
549 if (m_storageSpace
.shouldDoCopyPhase()) {
550 m_sharedData
.didStartCopying();
551 m_copyVisitor
.startCopying();
552 m_copyVisitor
.copyFromShared();
553 m_copyVisitor
.doneCopying();
554 // We need to wait for everybody to finish and return their CopiedBlocks
555 // before signaling that the phase is complete.
556 m_storageSpace
.doneCopying();
557 m_sharedData
.didFinishCopying();
559 m_storageSpace
.doneCopying();
562 void Heap::gatherStackRoots(ConservativeRoots
& roots
, void** dummy
, MachineThreads::RegisterState
& registers
)
564 GCPHASE(GatherStackRoots
);
565 m_jitStubRoutines
.clearMarks();
566 m_machineThreads
.gatherConservativeRoots(roots
, m_jitStubRoutines
, m_codeBlocks
, dummy
, registers
);
569 void Heap::gatherJSStackRoots(ConservativeRoots
& roots
)
572 GCPHASE(GatherJSStackRoots
);
573 stack().gatherConservativeRoots(roots
, m_jitStubRoutines
, m_codeBlocks
);
579 void Heap::gatherScratchBufferRoots(ConservativeRoots
& roots
)
582 GCPHASE(GatherScratchBufferRoots
);
583 m_vm
->gatherConservativeRoots(roots
);
589 void Heap::clearLivenessData()
591 GCPHASE(ClearLivenessData
);
592 m_objectSpace
.clearNewlyAllocated();
593 m_objectSpace
.clearMarks();
596 void Heap::visitExternalRememberedSet()
598 #if JSC_OBJC_API_ENABLED
599 scanExternalRememberedSet(*m_vm
, m_slotVisitor
);
603 void Heap::visitSmallStrings()
605 GCPHASE(VisitSmallStrings
);
606 m_vm
->smallStrings
.visitStrongReferences(m_slotVisitor
);
608 if (Options::logGC() == GCLogging::Verbose
)
609 dataLog("Small strings:\n", m_slotVisitor
);
611 m_slotVisitor
.donateAndDrain();
614 void Heap::visitConservativeRoots(ConservativeRoots
& roots
)
616 GCPHASE(VisitConservativeRoots
);
617 m_slotVisitor
.append(roots
);
619 if (Options::logGC() == GCLogging::Verbose
)
620 dataLog("Conservative Roots:\n", m_slotVisitor
);
622 m_slotVisitor
.donateAndDrain();
625 void Heap::visitCompilerWorklistWeakReferences()
628 for (auto worklist
: m_suspendedCompilerWorklists
)
629 worklist
->visitWeakReferences(m_slotVisitor
, m_codeBlocks
);
631 if (Options::logGC() == GCLogging::Verbose
)
632 dataLog("DFG Worklists:\n", m_slotVisitor
);
636 void Heap::removeDeadCompilerWorklistEntries()
639 GCPHASE(FinalizeDFGWorklists
);
640 for (auto worklist
: m_suspendedCompilerWorklists
)
641 worklist
->removeDeadPlans(*m_vm
);
645 void Heap::visitProtectedObjects(HeapRootVisitor
& heapRootVisitor
)
647 GCPHASE(VisitProtectedObjects
);
649 for (auto& pair
: m_protectedValues
)
650 heapRootVisitor
.visit(&pair
.key
);
652 if (Options::logGC() == GCLogging::Verbose
)
653 dataLog("Protected Objects:\n", m_slotVisitor
);
655 m_slotVisitor
.donateAndDrain();
658 void Heap::visitTempSortVectors(HeapRootVisitor
& heapRootVisitor
)
660 GCPHASE(VisitTempSortVectors
);
661 typedef Vector
<Vector
<ValueStringPair
, 0, UnsafeVectorOverflow
>*> VectorOfValueStringVectors
;
663 for (auto* vector
: m_tempSortingVectors
) {
664 for (auto& valueStringPair
: *vector
) {
665 if (valueStringPair
.first
)
666 heapRootVisitor
.visit(&valueStringPair
.first
);
670 if (Options::logGC() == GCLogging::Verbose
)
671 dataLog("Temp Sort Vectors:\n", m_slotVisitor
);
673 m_slotVisitor
.donateAndDrain();
676 void Heap::visitArgumentBuffers(HeapRootVisitor
& visitor
)
678 GCPHASE(MarkingArgumentBuffers
);
679 if (!m_markListSet
|| !m_markListSet
->size())
682 MarkedArgumentBuffer::markLists(visitor
, *m_markListSet
);
684 if (Options::logGC() == GCLogging::Verbose
)
685 dataLog("Argument Buffers:\n", m_slotVisitor
);
687 m_slotVisitor
.donateAndDrain();
690 void Heap::visitException(HeapRootVisitor
& visitor
)
692 GCPHASE(MarkingException
);
693 if (!m_vm
->exception())
696 visitor
.visit(m_vm
->addressOfException());
698 if (Options::logGC() == GCLogging::Verbose
)
699 dataLog("Exceptions:\n", m_slotVisitor
);
701 m_slotVisitor
.donateAndDrain();
704 void Heap::visitStrongHandles(HeapRootVisitor
& visitor
)
706 GCPHASE(VisitStrongHandles
);
707 m_handleSet
.visitStrongHandles(visitor
);
709 if (Options::logGC() == GCLogging::Verbose
)
710 dataLog("Strong Handles:\n", m_slotVisitor
);
712 m_slotVisitor
.donateAndDrain();
715 void Heap::visitHandleStack(HeapRootVisitor
& visitor
)
717 GCPHASE(VisitHandleStack
);
718 m_handleStack
.visit(visitor
);
720 if (Options::logGC() == GCLogging::Verbose
)
721 dataLog("Handle Stack:\n", m_slotVisitor
);
723 m_slotVisitor
.donateAndDrain();
726 void Heap::traceCodeBlocksAndJITStubRoutines()
728 GCPHASE(TraceCodeBlocksAndJITStubRoutines
);
729 m_codeBlocks
.traceMarked(m_slotVisitor
);
730 m_jitStubRoutines
.traceMarkedStubRoutines(m_slotVisitor
);
732 if (Options::logGC() == GCLogging::Verbose
)
733 dataLog("Code Blocks and JIT Stub Routines:\n", m_slotVisitor
);
735 m_slotVisitor
.donateAndDrain();
738 void Heap::converge()
740 #if ENABLE(PARALLEL_GC)
741 GCPHASE(Convergence
);
742 m_slotVisitor
.drainFromShared(SlotVisitor::MasterDrain
);
746 void Heap::visitWeakHandles(HeapRootVisitor
& visitor
)
748 GCPHASE(VisitingLiveWeakHandles
);
750 m_objectSpace
.visitWeakSets(visitor
);
751 harvestWeakReferences();
752 visitCompilerWorklistWeakReferences();
753 m_codeBlocks
.traceMarked(m_slotVisitor
); // New "executing" code blocks may be discovered.
754 if (m_slotVisitor
.isEmpty())
757 if (Options::logGC() == GCLogging::Verbose
)
758 dataLog("Live Weak Handles:\n", m_slotVisitor
);
761 ParallelModeEnabler
enabler(m_slotVisitor
);
762 m_slotVisitor
.donateAndDrain();
763 #if ENABLE(PARALLEL_GC)
764 m_slotVisitor
.drainFromShared(SlotVisitor::MasterDrain
);
770 void Heap::clearRememberedSet(Vector
<const JSCell
*>& rememberedSet
)
773 GCPHASE(ClearRememberedSet
);
774 for (auto* cell
: rememberedSet
) {
775 MarkedBlock::blockFor(cell
)->clearRemembered(cell
);
776 const_cast<JSCell
*>(cell
)->setRemembered(false);
779 UNUSED_PARAM(rememberedSet
);
783 void Heap::updateObjectCounts(double gcStartTime
)
785 GCCOUNTER(VisitedValueCount
, m_slotVisitor
.visitCount());
787 if (Options::logGC() == GCLogging::Verbose
) {
788 size_t visitCount
= m_slotVisitor
.visitCount();
789 #if ENABLE(PARALLEL_GC)
790 visitCount
+= m_sharedData
.childVisitCount();
792 dataLogF("\nNumber of live Objects after GC %lu, took %.6f secs\n", static_cast<unsigned long>(visitCount
), WTF::monotonicallyIncreasingTime() - gcStartTime
);
795 if (m_operationInProgress
== EdenCollection
) {
796 m_totalBytesVisited
+= m_slotVisitor
.bytesVisited();
797 m_totalBytesCopied
+= m_slotVisitor
.bytesCopied();
799 ASSERT(m_operationInProgress
== FullCollection
);
800 m_totalBytesVisited
= m_slotVisitor
.bytesVisited();
801 m_totalBytesCopied
= m_slotVisitor
.bytesCopied();
803 #if ENABLE(PARALLEL_GC)
804 m_totalBytesVisited
+= m_sharedData
.childBytesVisited();
805 m_totalBytesCopied
+= m_sharedData
.childBytesCopied();
809 void Heap::resetVisitors()
811 m_slotVisitor
.reset();
812 #if ENABLE(PARALLEL_GC)
813 m_sharedData
.resetChildren();
815 m_sharedData
.reset();
818 size_t Heap::objectCount()
820 return m_objectSpace
.objectCount();
823 size_t Heap::extraSize()
825 return m_extraMemoryUsage
+ m_arrayBuffers
.size();
830 return m_objectSpace
.size() + m_storageSpace
.size() + extraSize();
833 size_t Heap::capacity()
835 return m_objectSpace
.capacity() + m_storageSpace
.capacity() + extraSize();
838 size_t Heap::sizeAfterCollect()
840 // The result here may not agree with the normal Heap::size().
841 // This is due to the fact that we only count live copied bytes
842 // rather than all used (including dead) copied bytes, thus it's
843 // always the case that m_totalBytesCopied <= m_storageSpace.size().
844 ASSERT(m_totalBytesCopied
<= m_storageSpace
.size());
845 return m_totalBytesVisited
+ m_totalBytesCopied
+ extraSize();
848 size_t Heap::protectedGlobalObjectCount()
850 return forEachProtectedCell
<CountIfGlobalObject
>();
853 size_t Heap::globalObjectCount()
855 HeapIterationScope
iterationScope(*this);
856 return m_objectSpace
.forEachLiveCell
<CountIfGlobalObject
>(iterationScope
);
859 size_t Heap::protectedObjectCount()
861 return forEachProtectedCell
<Count
>();
864 PassOwnPtr
<TypeCountSet
> Heap::protectedObjectTypeCounts()
866 return forEachProtectedCell
<RecordType
>();
869 PassOwnPtr
<TypeCountSet
> Heap::objectTypeCounts()
871 HeapIterationScope
iterationScope(*this);
872 return m_objectSpace
.forEachLiveCell
<RecordType
>(iterationScope
);
875 void Heap::deleteAllCompiledCode()
877 // If JavaScript is running, it's not safe to delete code, since we'll end
878 // up deleting code that is live on the stack.
879 if (m_vm
->entryScope
)
882 // If we have things on any worklist, then don't delete code. This is kind of
883 // a weird heuristic. It's definitely not safe to throw away code that is on
884 // the worklist. But this change was made in a hurry so we just avoid throwing
885 // away any code if there is any code on any worklist. I suspect that this
886 // might not actually be too dumb: if there is code on worklists then that
887 // means that we are running some hot JS code right now. Maybe causing
888 // recompilations isn't a good idea.
890 for (unsigned i
= DFG::numberOfWorklists(); i
--;) {
891 if (DFG::Worklist
* worklist
= DFG::worklistForIndexOrNull(i
)) {
892 if (worklist
->isActiveForVM(*vm()))
896 #endif // ENABLE(DFG_JIT)
898 for (ExecutableBase
* current
= m_compiledCode
.head(); current
; current
= current
->next()) {
899 if (!current
->isFunctionExecutable())
901 static_cast<FunctionExecutable
*>(current
)->clearCodeIfNotCompiling();
904 ASSERT(m_operationInProgress
== FullCollection
|| m_operationInProgress
== NoOperation
);
905 m_codeBlocks
.clearMarksForFullCollection();
906 m_codeBlocks
.deleteUnmarkedAndUnreferenced(FullCollection
);
909 void Heap::deleteAllUnlinkedFunctionCode()
911 for (ExecutableBase
* current
= m_compiledCode
.head(); current
; current
= current
->next()) {
912 if (!current
->isFunctionExecutable())
914 static_cast<FunctionExecutable
*>(current
)->clearUnlinkedCodeForRecompilationIfNotCompiling();
918 void Heap::clearUnmarkedExecutables()
920 GCPHASE(ClearUnmarkedExecutables
);
921 ExecutableBase
* next
;
922 for (ExecutableBase
* current
= m_compiledCode
.head(); current
; current
= next
) {
923 next
= current
->next();
924 if (isMarked(current
))
927 // We do this because executable memory is limited on some platforms and because
928 // CodeBlock requires eager finalization.
929 ExecutableBase::clearCodeVirtual(current
);
930 m_compiledCode
.remove(current
);
934 void Heap::deleteUnmarkedCompiledCode()
936 GCPHASE(DeleteCodeBlocks
);
937 clearUnmarkedExecutables();
938 m_codeBlocks
.deleteUnmarkedAndUnreferenced(m_operationInProgress
);
939 m_jitStubRoutines
.deleteUnmarkedJettisonedStubRoutines();
942 void Heap::addToRememberedSet(const JSCell
* cell
)
945 ASSERT(!Options::enableConcurrentJIT() || !isCompilationThread());
946 if (isRemembered(cell
))
948 MarkedBlock::blockFor(cell
)->setRemembered(cell
);
949 const_cast<JSCell
*>(cell
)->setRemembered(true);
950 m_slotVisitor
.unconditionallyAppend(const_cast<JSCell
*>(cell
));
953 void Heap::collectAllGarbage()
955 if (!m_isSafeToCollect
)
958 collect(FullCollection
);
960 SamplingRegion
samplingRegion("Garbage Collection: Sweeping");
961 DelayedReleaseScope
delayedReleaseScope(m_objectSpace
);
962 m_objectSpace
.sweep();
963 m_objectSpace
.shrink();
966 static double minute
= 60.0;
968 void Heap::collect(HeapOperation collectionType
)
970 #if ENABLE(ALLOCATION_LOGGING)
971 dataLogF("JSC GC starting collection.\n");
975 if (Options::logGC()) {
977 before
= currentTimeMS();
980 SamplingRegion
samplingRegion("Garbage Collection");
982 RELEASE_ASSERT(!m_deferralDepth
);
983 ASSERT(vm()->currentThreadIsHoldingAPILock());
984 RELEASE_ASSERT(vm()->atomicStringTable() == wtfThreadData().atomicStringTable());
985 ASSERT(m_isSafeToCollect
);
986 JAVASCRIPTCORE_GC_BEGIN();
987 RELEASE_ASSERT(m_operationInProgress
== NoOperation
);
989 suspendCompilerThreads();
990 willStartCollection(collectionType
);
993 double gcStartTime
= WTF::monotonicallyIncreasingTime();
995 deleteOldCode(gcStartTime
);
996 flushOldStructureIDTables();
998 flushWriteBarrierBuffer();
1000 markRoots(gcStartTime
);
1002 JAVASCRIPTCORE_GC_MARKED();
1005 sweepArrayBuffers();
1006 snapshotMarkedSpace();
1008 copyBackingStores();
1010 finalizeUnconditionalFinalizers();
1011 removeDeadCompilerWorklistEntries();
1012 deleteUnmarkedCompiledCode();
1013 deleteSourceProviderCaches();
1014 notifyIncrementalSweeper();
1015 rememberCurrentlyExecutingCodeBlocks();
1018 updateAllocationLimits();
1019 didFinishCollection(gcStartTime
);
1020 resumeCompilerThreads();
1022 if (Options::logGC()) {
1023 double after
= currentTimeMS();
1024 dataLog(after
- before
, " ms]\n");
1028 void Heap::suspendCompilerThreads()
1031 GCPHASE(SuspendCompilerThreads
);
1032 ASSERT(m_suspendedCompilerWorklists
.isEmpty());
1033 for (unsigned i
= DFG::numberOfWorklists(); i
--;) {
1034 if (DFG::Worklist
* worklist
= DFG::worklistForIndexOrNull(i
)) {
1035 m_suspendedCompilerWorklists
.append(worklist
);
1036 worklist
->suspendAllThreads();
1042 void Heap::willStartCollection(HeapOperation collectionType
)
1044 GCPHASE(StartingCollection
);
1045 if (shouldDoFullCollection(collectionType
)) {
1046 m_operationInProgress
= FullCollection
;
1047 m_slotVisitor
.clearMarkStack();
1048 m_shouldDoFullCollection
= false;
1049 if (Options::logGC())
1050 dataLog("FullCollection, ");
1052 m_operationInProgress
= EdenCollection
;
1053 if (Options::logGC())
1054 dataLog("EdenCollection, ");
1056 if (m_operationInProgress
== FullCollection
) {
1057 m_sizeBeforeLastFullCollect
= m_sizeAfterLastCollect
+ m_bytesAllocatedThisCycle
;
1058 m_extraMemoryUsage
= 0;
1060 if (m_fullActivityCallback
)
1061 m_fullActivityCallback
->willCollect();
1063 ASSERT(m_operationInProgress
== EdenCollection
);
1064 m_sizeBeforeLastEdenCollect
= m_sizeAfterLastCollect
+ m_bytesAllocatedThisCycle
;
1067 if (m_edenActivityCallback
)
1068 m_edenActivityCallback
->willCollect();
1071 void Heap::deleteOldCode(double gcStartTime
)
1073 if (m_operationInProgress
== EdenCollection
)
1076 GCPHASE(DeleteOldCode
);
1077 if (gcStartTime
- m_lastCodeDiscardTime
> minute
) {
1078 deleteAllCompiledCode();
1079 m_lastCodeDiscardTime
= WTF::monotonicallyIncreasingTime();
1083 void Heap::flushOldStructureIDTables()
1085 GCPHASE(FlushOldStructureIDTables
);
1086 m_structureIDTable
.flushOldTables();
1089 void Heap::flushWriteBarrierBuffer()
1091 GCPHASE(FlushWriteBarrierBuffer
);
1092 if (m_operationInProgress
== EdenCollection
) {
1093 m_writeBarrierBuffer
.flush(*this);
1096 m_writeBarrierBuffer
.reset();
1099 void Heap::stopAllocation()
1101 GCPHASE(StopAllocation
);
1102 m_objectSpace
.stopAllocating();
1103 if (m_operationInProgress
== FullCollection
)
1104 m_storageSpace
.didStartFullCollection();
1107 void Heap::reapWeakHandles()
1109 GCPHASE(ReapingWeakHandles
);
1110 m_objectSpace
.reapWeakSets();
1113 void Heap::sweepArrayBuffers()
1115 GCPHASE(SweepingArrayBuffers
);
1116 m_arrayBuffers
.sweep();
1119 struct MarkedBlockSnapshotFunctor
: public MarkedBlock::VoidFunctor
{
1120 MarkedBlockSnapshotFunctor(Vector
<MarkedBlock
*>& blocks
)
1126 void operator()(MarkedBlock
* block
) { m_blocks
[m_index
++] = block
; }
1129 Vector
<MarkedBlock
*>& m_blocks
;
1132 void Heap::snapshotMarkedSpace()
1134 GCPHASE(SnapshotMarkedSpace
);
1135 if (m_operationInProgress
!= FullCollection
)
1138 m_blockSnapshot
.resize(m_objectSpace
.blocks().set().size());
1139 MarkedBlockSnapshotFunctor
functor(m_blockSnapshot
);
1140 m_objectSpace
.forEachBlock(functor
);
1143 void Heap::deleteSourceProviderCaches()
1145 GCPHASE(DeleteSourceProviderCaches
);
1146 m_vm
->clearSourceProviderCaches();
1149 void Heap::notifyIncrementalSweeper()
1151 GCPHASE(NotifyIncrementalSweeper
);
1152 if (m_operationInProgress
!= FullCollection
)
1154 m_sweeper
->startSweeping(m_blockSnapshot
);
1157 void Heap::rememberCurrentlyExecutingCodeBlocks()
1159 GCPHASE(RememberCurrentlyExecutingCodeBlocks
);
1160 m_codeBlocks
.rememberCurrentlyExecutingCodeBlocks(this);
1163 void Heap::resetAllocators()
1165 GCPHASE(ResetAllocators
);
1166 m_objectSpace
.resetAllocators();
1169 void Heap::updateAllocationLimits()
1171 GCPHASE(UpdateAllocationLimits
);
1172 size_t currentHeapSize
= sizeAfterCollect();
1173 if (Options::gcMaxHeapSize() && currentHeapSize
> Options::gcMaxHeapSize())
1174 HeapStatistics::exitWithFailure();
1176 if (m_operationInProgress
== FullCollection
) {
1177 // To avoid pathological GC churn in very small and very large heaps, we set
1178 // the new allocation limit based on the current size of the heap, with a
1180 m_maxHeapSize
= max(minHeapSize(m_heapType
, m_ramSize
), proportionalHeapSize(currentHeapSize
, m_ramSize
));
1181 m_maxEdenSize
= m_maxHeapSize
- currentHeapSize
;
1182 m_sizeAfterLastFullCollect
= currentHeapSize
;
1183 m_bytesAbandonedSinceLastFullCollect
= 0;
1185 ASSERT(currentHeapSize
>= m_sizeAfterLastCollect
);
1186 m_maxEdenSize
= m_maxHeapSize
- currentHeapSize
;
1187 m_sizeAfterLastEdenCollect
= currentHeapSize
;
1188 double edenToOldGenerationRatio
= (double)m_maxEdenSize
/ (double)m_maxHeapSize
;
1189 double minEdenToOldGenerationRatio
= 1.0 / 3.0;
1190 if (edenToOldGenerationRatio
< minEdenToOldGenerationRatio
)
1191 m_shouldDoFullCollection
= true;
1192 m_maxHeapSize
+= currentHeapSize
- m_sizeAfterLastCollect
;
1193 m_maxEdenSize
= m_maxHeapSize
- currentHeapSize
;
1194 if (m_fullActivityCallback
) {
1195 ASSERT(currentHeapSize
>= m_sizeAfterLastFullCollect
);
1196 m_fullActivityCallback
->didAllocate(currentHeapSize
- m_sizeAfterLastFullCollect
);
1200 m_sizeAfterLastCollect
= currentHeapSize
;
1201 m_bytesAllocatedThisCycle
= 0;
1203 if (Options::logGC())
1204 dataLog(currentHeapSize
/ 1024, " kb, ");
1207 void Heap::didFinishCollection(double gcStartTime
)
1209 GCPHASE(FinishingCollection
);
1210 double gcEndTime
= WTF::monotonicallyIncreasingTime();
1211 if (m_operationInProgress
== FullCollection
)
1212 m_lastFullGCLength
= gcEndTime
- gcStartTime
;
1214 m_lastEdenGCLength
= gcEndTime
- gcStartTime
;
1216 if (Options::recordGCPauseTimes())
1217 HeapStatistics::recordGCPauseTime(gcStartTime
, gcEndTime
);
1218 RELEASE_ASSERT(m_operationInProgress
== EdenCollection
|| m_operationInProgress
== FullCollection
);
1220 m_operationInProgress
= NoOperation
;
1221 JAVASCRIPTCORE_GC_END();
1223 if (Options::useZombieMode())
1224 zombifyDeadObjects();
1226 if (Options::objectsAreImmortal())
1229 if (Options::showObjectStatistics())
1230 HeapStatistics::showObjectStatistics(this);
1232 if (Options::logGC() == GCLogging::Verbose
)
1233 GCLogging::dumpObjectGraph(this);
1236 void Heap::resumeCompilerThreads()
1239 GCPHASE(ResumeCompilerThreads
);
1240 for (auto worklist
: m_suspendedCompilerWorklists
)
1241 worklist
->resumeAllThreads();
1242 m_suspendedCompilerWorklists
.clear();
1246 void Heap::markDeadObjects()
1248 HeapIterationScope
iterationScope(*this);
1249 m_objectSpace
.forEachDeadCell
<MarkObject
>(iterationScope
);
1252 void Heap::setFullActivityCallback(PassRefPtr
<FullGCActivityCallback
> activityCallback
)
1254 m_fullActivityCallback
= activityCallback
;
1257 void Heap::setEdenActivityCallback(PassRefPtr
<EdenGCActivityCallback
> activityCallback
)
1259 m_edenActivityCallback
= activityCallback
;
1262 GCActivityCallback
* Heap::fullActivityCallback()
1264 return m_fullActivityCallback
.get();
1267 GCActivityCallback
* Heap::edenActivityCallback()
1269 return m_edenActivityCallback
.get();
1272 void Heap::setIncrementalSweeper(PassOwnPtr
<IncrementalSweeper
> sweeper
)
1274 m_sweeper
= sweeper
;
1277 IncrementalSweeper
* Heap::sweeper()
1279 return m_sweeper
.get();
1282 void Heap::setGarbageCollectionTimerEnabled(bool enable
)
1284 if (m_fullActivityCallback
)
1285 m_fullActivityCallback
->setEnabled(enable
);
1286 if (m_edenActivityCallback
)
1287 m_edenActivityCallback
->setEnabled(enable
);
1290 void Heap::didAllocate(size_t bytes
)
1292 if (m_edenActivityCallback
)
1293 m_edenActivityCallback
->didAllocate(m_bytesAllocatedThisCycle
+ m_bytesAbandonedSinceLastFullCollect
);
1294 m_bytesAllocatedThisCycle
+= bytes
;
1297 bool Heap::isValidAllocation(size_t)
1299 if (!isValidThreadState(m_vm
))
1302 if (m_operationInProgress
!= NoOperation
)
1308 void Heap::addFinalizer(JSCell
* cell
, Finalizer finalizer
)
1310 WeakSet::allocate(cell
, &m_finalizerOwner
, reinterpret_cast<void*>(finalizer
)); // Balanced by FinalizerOwner::finalize().
1313 void Heap::FinalizerOwner::finalize(Handle
<Unknown
> handle
, void* context
)
1315 HandleSlot slot
= handle
.slot();
1316 Finalizer finalizer
= reinterpret_cast<Finalizer
>(context
);
1317 finalizer(slot
->asCell());
1318 WeakSet::deallocate(WeakImpl::asWeakImpl(slot
));
1321 void Heap::addCompiledCode(ExecutableBase
* executable
)
1323 m_compiledCode
.append(executable
);
1326 class Zombify
: public MarkedBlock::VoidFunctor
{
1328 void operator()(JSCell
* cell
)
1330 void** current
= reinterpret_cast<void**>(cell
);
1332 // We want to maintain zapped-ness because that's how we know if we've called
1334 if (cell
->isZapped())
1337 void* limit
= static_cast<void*>(reinterpret_cast<char*>(cell
) + MarkedBlock::blockFor(cell
)->cellSize());
1338 for (; current
< limit
; current
++)
1339 *current
= zombifiedBits
;
1343 void Heap::zombifyDeadObjects()
1345 // Sweep now because destructors will crash once we're zombified.
1347 SamplingRegion
samplingRegion("Garbage Collection: Sweeping");
1348 DelayedReleaseScope
delayedReleaseScope(m_objectSpace
);
1349 m_objectSpace
.zombifySweep();
1351 HeapIterationScope
iterationScope(*this);
1352 m_objectSpace
.forEachDeadCell
<Zombify
>(iterationScope
);
1355 void Heap::flushWriteBarrierBuffer(JSCell
* cell
)
1358 m_writeBarrierBuffer
.flush(*this);
1359 m_writeBarrierBuffer
.add(cell
);
1365 bool Heap::shouldDoFullCollection(HeapOperation requestedCollectionType
) const
1368 if (Options::alwaysDoFullCollection())
1371 switch (requestedCollectionType
) {
1372 case EdenCollection
:
1374 case FullCollection
:
1377 return m_shouldDoFullCollection
;
1379 RELEASE_ASSERT_NOT_REACHED();
1382 RELEASE_ASSERT_NOT_REACHED();
1385 UNUSED_PARAM(requestedCollectionType
);