2 * Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2011, 2013, 2014 Apple Inc. All rights reserved.
3 * Copyright (C) 2007 Eric Seidel <eric@webkit.org>
5 * This library is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Lesser General Public
7 * License as published by the Free Software Foundation; either
8 * version 2 of the License, or (at your option) any later version.
10 * This library is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Lesser General Public License for more details.
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this library; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
24 #include "CodeBlock.h"
25 #include "ConservativeRoots.h"
26 #include "CopiedSpace.h"
27 #include "CopiedSpaceInlines.h"
28 #include "CopyVisitorInlines.h"
29 #include "DFGWorklist.h"
30 #include "DelayedReleaseScope.h"
31 #include "EdenGCActivityCallback.h"
32 #include "FullGCActivityCallback.h"
33 #include "GCActivityCallback.h"
34 #include "GCIncomingRefCountedSetInlines.h"
35 #include "HeapIterationScope.h"
36 #include "HeapRootVisitor.h"
37 #include "HeapStatistics.h"
38 #include "IncrementalSweeper.h"
39 #include "Interpreter.h"
40 #include "JSGlobalObject.h"
42 #include "JSONObject.h"
43 #include "JSCInlines.h"
44 #include "JSVirtualMachineInternal.h"
45 #include "RecursiveAllocationScope.h"
47 #include "UnlinkedCodeBlock.h"
49 #include "WeakSetInlines.h"
51 #include <wtf/RAMSize.h>
52 #include <wtf/CurrentTime.h>
53 #include <wtf/ProcessID.h>
62 static const size_t largeHeapSize
= 32 * MB
; // About 1.5X the average webpage.
63 static const size_t smallHeapSize
= 1 * MB
; // Matches the FastMalloc per-thread cache.
65 #define ENABLE_GC_LOGGING 0
67 #if ENABLE(GC_LOGGING)
69 #define DEFINE_GC_LOGGING_GLOBAL(type, name, arguments) \
70 _Pragma("clang diagnostic push") \
71 _Pragma("clang diagnostic ignored \"-Wglobal-constructors\"") \
72 _Pragma("clang diagnostic ignored \"-Wexit-time-destructors\"") \
73 static type name arguments; \
74 _Pragma("clang diagnostic pop")
76 #define DEFINE_GC_LOGGING_GLOBAL(type, name, arguments) \
77 static type name arguments;
78 #endif // COMPILER(CLANG)
81 GCTimer(const char* name
)
87 logData(m_allCollectionData
, "(All)");
88 logData(m_edenCollectionData
, "(Eden)");
89 logData(m_fullCollectionData
, "(Full)");
95 , m_min(std::numeric_limits
<double>::infinity())
107 void logData(const TimeRecord
& data
, const char* extra
)
109 dataLogF("[%d] %s %s: %.2lfms (avg. %.2lf, min. %.2lf, max. %.2lf, count %lu)\n",
110 getCurrentProcessID(),
113 data
.m_time
* 1000 / data
.m_count
,
119 void updateData(TimeRecord
& data
, double duration
)
121 if (duration
< data
.m_min
)
122 data
.m_min
= duration
;
123 if (duration
> data
.m_max
)
124 data
.m_max
= duration
;
126 data
.m_time
+= duration
;
129 void didFinishPhase(HeapOperation collectionType
, double duration
)
131 TimeRecord
& data
= collectionType
== EdenCollection
? m_edenCollectionData
: m_fullCollectionData
;
132 updateData(data
, duration
);
133 updateData(m_allCollectionData
, duration
);
136 TimeRecord m_allCollectionData
;
137 TimeRecord m_fullCollectionData
;
138 TimeRecord m_edenCollectionData
;
142 struct GCTimerScope
{
143 GCTimerScope(GCTimer
* timer
, HeapOperation collectionType
)
145 , m_start(WTF::monotonicallyIncreasingTime())
146 , m_collectionType(collectionType
)
151 double delta
= WTF::monotonicallyIncreasingTime() - m_start
;
152 m_timer
->didFinishPhase(m_collectionType
, delta
);
156 HeapOperation m_collectionType
;
160 GCCounter(const char* name
)
169 void count(size_t amount
)
180 dataLogF("[%d] %s: %zu values (avg. %zu, min. %zu, max. %zu)\n", getCurrentProcessID(), m_name
, m_total
, m_total
/ m_count
, m_min
, m_max
);
189 #define GCPHASE(name) DEFINE_GC_LOGGING_GLOBAL(GCTimer, name##Timer, (#name)); GCTimerScope name##TimerScope(&name##Timer, m_operationInProgress)
190 #define GCCOUNTER(name, value) do { DEFINE_GC_LOGGING_GLOBAL(GCCounter, name##Counter, (#name)); name##Counter.count(value); } while (false)
194 #define GCPHASE(name) do { } while (false)
195 #define GCCOUNTER(name, value) do { } while (false)
198 static inline size_t minHeapSize(HeapType heapType
, size_t ramSize
)
200 if (heapType
== LargeHeap
)
201 return min(largeHeapSize
, ramSize
/ 4);
202 return smallHeapSize
;
205 static inline size_t proportionalHeapSize(size_t heapSize
, size_t ramSize
)
207 // Try to stay under 1/2 RAM size to leave room for the DOM, rendering, networking, etc.
208 if (heapSize
< ramSize
/ 4)
210 if (heapSize
< ramSize
/ 2)
211 return 1.5 * heapSize
;
212 return 1.25 * heapSize
;
215 static inline bool isValidSharedInstanceThreadState(VM
* vm
)
217 return vm
->currentThreadIsHoldingAPILock();
220 static inline bool isValidThreadState(VM
* vm
)
222 if (vm
->atomicStringTable() != wtfThreadData().atomicStringTable())
225 if (vm
->isSharedInstance() && !isValidSharedInstanceThreadState(vm
))
231 struct MarkObject
: public MarkedBlock::VoidFunctor
{
232 void operator()(JSCell
* cell
)
234 if (cell
->isZapped())
236 Heap::heap(cell
)->setMarked(cell
);
240 struct Count
: public MarkedBlock::CountFunctor
{
241 void operator()(JSCell
*) { count(1); }
244 struct CountIfGlobalObject
: MarkedBlock::CountFunctor
{
245 void operator()(JSCell
* cell
) {
246 if (!cell
->isObject())
248 if (!asObject(cell
)->isGlobalObject())
256 typedef PassOwnPtr
<TypeCountSet
> ReturnType
;
259 void operator()(JSCell
*);
260 ReturnType
returnValue();
263 const char* typeName(JSCell
*);
264 OwnPtr
<TypeCountSet
> m_typeCountSet
;
267 inline RecordType::RecordType()
268 : m_typeCountSet(adoptPtr(new TypeCountSet
))
272 inline const char* RecordType::typeName(JSCell
* cell
)
274 const ClassInfo
* info
= cell
->classInfo();
275 if (!info
|| !info
->className
)
277 return info
->className
;
280 inline void RecordType::operator()(JSCell
* cell
)
282 m_typeCountSet
->add(typeName(cell
));
285 inline PassOwnPtr
<TypeCountSet
> RecordType::returnValue()
287 return m_typeCountSet
.release();
290 } // anonymous namespace
292 Heap::Heap(VM
* vm
, HeapType heapType
)
293 : m_heapType(heapType
)
294 , m_ramSize(ramSize())
295 , m_minBytesPerCycle(minHeapSize(m_heapType
, m_ramSize
))
296 , m_sizeAfterLastCollect(0)
297 , m_sizeAfterLastFullCollect(0)
298 , m_sizeBeforeLastFullCollect(0)
299 , m_sizeAfterLastEdenCollect(0)
300 , m_sizeBeforeLastEdenCollect(0)
301 , m_bytesAllocatedThisCycle(0)
302 , m_bytesAbandonedSinceLastFullCollect(0)
303 , m_maxEdenSize(m_minBytesPerCycle
)
304 , m_maxHeapSize(m_minBytesPerCycle
)
305 , m_shouldDoFullCollection(false)
306 , m_totalBytesVisited(0)
307 , m_totalBytesCopied(0)
308 , m_operationInProgress(NoOperation
)
310 , m_objectSpace(this)
311 , m_storageSpace(this)
312 , m_extraMemoryUsage(0)
313 , m_machineThreads(this)
315 , m_slotVisitor(m_sharedData
)
316 , m_copyVisitor(m_sharedData
)
318 , m_codeBlocks(m_blockAllocator
)
319 , m_isSafeToCollect(false)
320 , m_writeBarrierBuffer(256)
322 // We seed with 10ms so that GCActivityCallback::didAllocate doesn't continuously
323 // schedule the timer if we've never done a collection.
324 , m_lastFullGCLength(0.01)
325 , m_lastEdenGCLength(0.01)
326 , m_lastCodeDiscardTime(WTF::monotonicallyIncreasingTime())
327 , m_fullActivityCallback(GCActivityCallback::createFullTimer(this))
329 , m_edenActivityCallback(GCActivityCallback::createEdenTimer(this))
331 , m_edenActivityCallback(m_fullActivityCallback
)
333 , m_sweeper(IncrementalSweeper::create(this))
336 m_storageSpace
.init();
343 bool Heap::isPagedOut(double deadline
)
345 return m_objectSpace
.isPagedOut(deadline
) || m_storageSpace
.isPagedOut(deadline
);
348 // The VM is being destroyed and the collector will never run again.
349 // Run all pending finalizers now because we won't get another chance.
350 void Heap::lastChanceToFinalize()
352 RELEASE_ASSERT(!m_vm
->entryScope
);
353 RELEASE_ASSERT(m_operationInProgress
== NoOperation
);
355 m_objectSpace
.lastChanceToFinalize();
358 void Heap::reportExtraMemoryCostSlowCase(size_t cost
)
360 // Our frequency of garbage collection tries to balance memory use against speed
361 // by collecting based on the number of newly created values. However, for values
362 // that hold on to a great deal of memory that's not in the form of other JS values,
363 // that is not good enough - in some cases a lot of those objects can pile up and
364 // use crazy amounts of memory without a GC happening. So we track these extra
365 // memory costs. Only unusually large objects are noted, and we only keep track
366 // of this extra cost until the next GC. In garbage collected languages, most values
367 // are either very short lived temporaries, or have extremely long lifetimes. So
368 // if a large value survives one garbage collection, there is not much point to
369 // collecting more frequently as long as it stays alive.
372 collectIfNecessaryOrDefer();
375 void Heap::reportAbandonedObjectGraph()
377 // Our clients don't know exactly how much memory they
378 // are abandoning so we just guess for them.
379 double abandonedBytes
= 0.1 * m_sizeAfterLastCollect
;
381 // We want to accelerate the next collection. Because memory has just
382 // been abandoned, the next collection has the potential to
383 // be more profitable. Since allocation is the trigger for collection,
384 // we hasten the next collection by pretending that we've allocated more memory.
385 didAbandon(abandonedBytes
);
388 void Heap::didAbandon(size_t bytes
)
390 if (m_fullActivityCallback
) {
391 m_fullActivityCallback
->didAllocate(
392 m_sizeAfterLastCollect
- m_sizeAfterLastFullCollect
+ m_bytesAllocatedThisCycle
+ m_bytesAbandonedSinceLastFullCollect
);
394 m_bytesAbandonedSinceLastFullCollect
+= bytes
;
397 void Heap::protect(JSValue k
)
400 ASSERT(m_vm
->currentThreadIsHoldingAPILock());
405 m_protectedValues
.add(k
.asCell());
408 bool Heap::unprotect(JSValue k
)
411 ASSERT(m_vm
->currentThreadIsHoldingAPILock());
416 return m_protectedValues
.remove(k
.asCell());
419 void Heap::addReference(JSCell
* cell
, ArrayBuffer
* buffer
)
421 if (m_arrayBuffers
.addReference(cell
, buffer
)) {
422 collectIfNecessaryOrDefer();
423 didAllocate(buffer
->gcSizeEstimateInBytes());
427 void Heap::pushTempSortVector(Vector
<ValueStringPair
, 0, UnsafeVectorOverflow
>* tempVector
)
429 m_tempSortingVectors
.append(tempVector
);
432 void Heap::popTempSortVector(Vector
<ValueStringPair
, 0, UnsafeVectorOverflow
>* tempVector
)
434 ASSERT_UNUSED(tempVector
, tempVector
== m_tempSortingVectors
.last());
435 m_tempSortingVectors
.removeLast();
438 void Heap::harvestWeakReferences()
440 m_slotVisitor
.harvestWeakReferences();
443 void Heap::finalizeUnconditionalFinalizers()
445 GCPHASE(FinalizeUnconditionalFinalizers
);
446 m_slotVisitor
.finalizeUnconditionalFinalizers();
449 inline JSStack
& Heap::stack()
451 return m_vm
->interpreter
->stack();
454 void Heap::willStartIterating()
456 m_objectSpace
.willStartIterating();
459 void Heap::didFinishIterating()
461 m_objectSpace
.didFinishIterating();
464 void Heap::getConservativeRegisterRoots(HashSet
<JSCell
*>& roots
)
466 ASSERT(isValidThreadState(m_vm
));
467 ConservativeRoots
stackRoots(&m_objectSpace
.blocks(), &m_storageSpace
);
468 stack().gatherConservativeRoots(stackRoots
);
469 size_t stackRootCount
= stackRoots
.size();
470 JSCell
** registerRoots
= stackRoots
.roots();
471 for (size_t i
= 0; i
< stackRootCount
; i
++) {
472 setMarked(registerRoots
[i
]);
473 registerRoots
[i
]->setMarked();
474 roots
.add(registerRoots
[i
]);
478 void Heap::markRoots(double gcStartTime
)
480 SamplingRegion
samplingRegion("Garbage Collection: Marking");
483 ASSERT(isValidThreadState(m_vm
));
486 Vector
<const JSCell
*> rememberedSet(m_slotVisitor
.markStack().size());
487 m_slotVisitor
.markStack().fillVector(rememberedSet
);
489 Vector
<const JSCell
*> rememberedSet
;
492 if (m_operationInProgress
== EdenCollection
)
493 m_codeBlocks
.clearMarksForEdenCollection(rememberedSet
);
495 m_codeBlocks
.clearMarksForFullCollection();
497 // We gather conservative roots before clearing mark bits because conservative
498 // gathering uses the mark bits to determine whether a reference is valid.
500 ConservativeRoots
conservativeRoots(&m_objectSpace
.blocks(), &m_storageSpace
);
501 gatherStackRoots(conservativeRoots
, &dummy
);
502 gatherJSStackRoots(conservativeRoots
);
503 gatherScratchBufferRoots(conservativeRoots
);
505 sanitizeStackForVM(m_vm
);
509 m_sharedData
.didStartMarking();
510 m_slotVisitor
.didStartMarking();
511 HeapRootVisitor
heapRootVisitor(m_slotVisitor
);
514 ParallelModeEnabler
enabler(m_slotVisitor
);
516 visitExternalRememberedSet();
518 visitConservativeRoots(conservativeRoots
);
519 visitProtectedObjects(heapRootVisitor
);
520 visitTempSortVectors(heapRootVisitor
);
521 visitArgumentBuffers(heapRootVisitor
);
522 visitException(heapRootVisitor
);
523 visitStrongHandles(heapRootVisitor
);
524 visitHandleStack(heapRootVisitor
);
525 traceCodeBlocksAndJITStubRoutines();
529 // Weak references must be marked last because their liveness depends on
530 // the liveness of the rest of the object graph.
531 visitWeakHandles(heapRootVisitor
);
533 clearRememberedSet(rememberedSet
);
534 m_sharedData
.didFinishMarking();
535 updateObjectCounts(gcStartTime
);
539 void Heap::copyBackingStores()
541 if (m_operationInProgress
== EdenCollection
)
542 m_storageSpace
.startedCopying
<EdenCollection
>();
544 ASSERT(m_operationInProgress
== FullCollection
);
545 m_storageSpace
.startedCopying
<FullCollection
>();
548 if (m_storageSpace
.shouldDoCopyPhase()) {
549 m_sharedData
.didStartCopying();
550 m_copyVisitor
.startCopying();
551 m_copyVisitor
.copyFromShared();
552 m_copyVisitor
.doneCopying();
553 // We need to wait for everybody to finish and return their CopiedBlocks
554 // before signaling that the phase is complete.
555 m_storageSpace
.doneCopying();
556 m_sharedData
.didFinishCopying();
558 m_storageSpace
.doneCopying();
561 void Heap::gatherStackRoots(ConservativeRoots
& roots
, void** dummy
)
563 GCPHASE(GatherStackRoots
);
564 m_jitStubRoutines
.clearMarks();
565 m_machineThreads
.gatherConservativeRoots(roots
, m_jitStubRoutines
, m_codeBlocks
, dummy
);
568 void Heap::gatherJSStackRoots(ConservativeRoots
& roots
)
571 GCPHASE(GatherJSStackRoots
);
572 stack().gatherConservativeRoots(roots
, m_jitStubRoutines
, m_codeBlocks
);
578 void Heap::gatherScratchBufferRoots(ConservativeRoots
& roots
)
581 GCPHASE(GatherScratchBufferRoots
);
582 m_vm
->gatherConservativeRoots(roots
);
588 void Heap::clearLivenessData()
590 GCPHASE(ClearLivenessData
);
591 m_objectSpace
.clearNewlyAllocated();
592 m_objectSpace
.clearMarks();
595 void Heap::visitExternalRememberedSet()
597 #if JSC_OBJC_API_ENABLED
598 scanExternalRememberedSet(*m_vm
, m_slotVisitor
);
602 void Heap::visitSmallStrings()
604 GCPHASE(VisitSmallStrings
);
605 m_vm
->smallStrings
.visitStrongReferences(m_slotVisitor
);
607 if (Options::logGC() == GCLogging::Verbose
)
608 dataLog("Small strings:\n", m_slotVisitor
);
610 m_slotVisitor
.donateAndDrain();
613 void Heap::visitConservativeRoots(ConservativeRoots
& roots
)
615 GCPHASE(VisitConservativeRoots
);
616 m_slotVisitor
.append(roots
);
618 if (Options::logGC() == GCLogging::Verbose
)
619 dataLog("Conservative Roots:\n", m_slotVisitor
);
621 m_slotVisitor
.donateAndDrain();
624 void Heap::visitCompilerWorklistWeakReferences()
627 for (auto worklist
: m_suspendedCompilerWorklists
)
628 worklist
->visitWeakReferences(m_slotVisitor
, m_codeBlocks
);
630 if (Options::logGC() == GCLogging::Verbose
)
631 dataLog("DFG Worklists:\n", m_slotVisitor
);
635 void Heap::removeDeadCompilerWorklistEntries()
638 GCPHASE(FinalizeDFGWorklists
);
639 for (auto worklist
: m_suspendedCompilerWorklists
)
640 worklist
->removeDeadPlans(*m_vm
);
644 void Heap::visitProtectedObjects(HeapRootVisitor
& heapRootVisitor
)
646 GCPHASE(VisitProtectedObjects
);
648 for (auto& pair
: m_protectedValues
)
649 heapRootVisitor
.visit(&pair
.key
);
651 if (Options::logGC() == GCLogging::Verbose
)
652 dataLog("Protected Objects:\n", m_slotVisitor
);
654 m_slotVisitor
.donateAndDrain();
657 void Heap::visitTempSortVectors(HeapRootVisitor
& heapRootVisitor
)
659 GCPHASE(VisitTempSortVectors
);
660 typedef Vector
<Vector
<ValueStringPair
, 0, UnsafeVectorOverflow
>*> VectorOfValueStringVectors
;
662 for (auto* vector
: m_tempSortingVectors
) {
663 for (auto& valueStringPair
: *vector
) {
664 if (valueStringPair
.first
)
665 heapRootVisitor
.visit(&valueStringPair
.first
);
669 if (Options::logGC() == GCLogging::Verbose
)
670 dataLog("Temp Sort Vectors:\n", m_slotVisitor
);
672 m_slotVisitor
.donateAndDrain();
675 void Heap::visitArgumentBuffers(HeapRootVisitor
& visitor
)
677 GCPHASE(MarkingArgumentBuffers
);
678 if (!m_markListSet
|| !m_markListSet
->size())
681 MarkedArgumentBuffer::markLists(visitor
, *m_markListSet
);
683 if (Options::logGC() == GCLogging::Verbose
)
684 dataLog("Argument Buffers:\n", m_slotVisitor
);
686 m_slotVisitor
.donateAndDrain();
689 void Heap::visitException(HeapRootVisitor
& visitor
)
691 GCPHASE(MarkingException
);
692 if (!m_vm
->exception())
695 visitor
.visit(m_vm
->addressOfException());
697 if (Options::logGC() == GCLogging::Verbose
)
698 dataLog("Exceptions:\n", m_slotVisitor
);
700 m_slotVisitor
.donateAndDrain();
703 void Heap::visitStrongHandles(HeapRootVisitor
& visitor
)
705 GCPHASE(VisitStrongHandles
);
706 m_handleSet
.visitStrongHandles(visitor
);
708 if (Options::logGC() == GCLogging::Verbose
)
709 dataLog("Strong Handles:\n", m_slotVisitor
);
711 m_slotVisitor
.donateAndDrain();
714 void Heap::visitHandleStack(HeapRootVisitor
& visitor
)
716 GCPHASE(VisitHandleStack
);
717 m_handleStack
.visit(visitor
);
719 if (Options::logGC() == GCLogging::Verbose
)
720 dataLog("Handle Stack:\n", m_slotVisitor
);
722 m_slotVisitor
.donateAndDrain();
725 void Heap::traceCodeBlocksAndJITStubRoutines()
727 GCPHASE(TraceCodeBlocksAndJITStubRoutines
);
728 m_codeBlocks
.traceMarked(m_slotVisitor
);
729 m_jitStubRoutines
.traceMarkedStubRoutines(m_slotVisitor
);
731 if (Options::logGC() == GCLogging::Verbose
)
732 dataLog("Code Blocks and JIT Stub Routines:\n", m_slotVisitor
);
734 m_slotVisitor
.donateAndDrain();
737 void Heap::converge()
739 #if ENABLE(PARALLEL_GC)
740 GCPHASE(Convergence
);
741 m_slotVisitor
.drainFromShared(SlotVisitor::MasterDrain
);
745 void Heap::visitWeakHandles(HeapRootVisitor
& visitor
)
747 GCPHASE(VisitingLiveWeakHandles
);
749 m_objectSpace
.visitWeakSets(visitor
);
750 harvestWeakReferences();
751 visitCompilerWorklistWeakReferences();
752 m_codeBlocks
.traceMarked(m_slotVisitor
); // New "executing" code blocks may be discovered.
753 if (m_slotVisitor
.isEmpty())
756 if (Options::logGC() == GCLogging::Verbose
)
757 dataLog("Live Weak Handles:\n", m_slotVisitor
);
760 ParallelModeEnabler
enabler(m_slotVisitor
);
761 m_slotVisitor
.donateAndDrain();
762 #if ENABLE(PARALLEL_GC)
763 m_slotVisitor
.drainFromShared(SlotVisitor::MasterDrain
);
769 void Heap::clearRememberedSet(Vector
<const JSCell
*>& rememberedSet
)
772 GCPHASE(ClearRememberedSet
);
773 for (auto* cell
: rememberedSet
) {
774 MarkedBlock::blockFor(cell
)->clearRemembered(cell
);
775 const_cast<JSCell
*>(cell
)->setRemembered(false);
778 UNUSED_PARAM(rememberedSet
);
782 void Heap::updateObjectCounts(double gcStartTime
)
784 GCCOUNTER(VisitedValueCount
, m_slotVisitor
.visitCount());
786 if (Options::logGC() == GCLogging::Verbose
) {
787 size_t visitCount
= m_slotVisitor
.visitCount();
788 #if ENABLE(PARALLEL_GC)
789 visitCount
+= m_sharedData
.childVisitCount();
791 dataLogF("\nNumber of live Objects after GC %lu, took %.6f secs\n", static_cast<unsigned long>(visitCount
), WTF::monotonicallyIncreasingTime() - gcStartTime
);
794 if (m_operationInProgress
== EdenCollection
) {
795 m_totalBytesVisited
+= m_slotVisitor
.bytesVisited();
796 m_totalBytesCopied
+= m_slotVisitor
.bytesCopied();
798 ASSERT(m_operationInProgress
== FullCollection
);
799 m_totalBytesVisited
= m_slotVisitor
.bytesVisited();
800 m_totalBytesCopied
= m_slotVisitor
.bytesCopied();
802 #if ENABLE(PARALLEL_GC)
803 m_totalBytesVisited
+= m_sharedData
.childBytesVisited();
804 m_totalBytesCopied
+= m_sharedData
.childBytesCopied();
808 void Heap::resetVisitors()
810 m_slotVisitor
.reset();
811 #if ENABLE(PARALLEL_GC)
812 m_sharedData
.resetChildren();
814 m_sharedData
.reset();
817 size_t Heap::objectCount()
819 return m_objectSpace
.objectCount();
822 size_t Heap::extraSize()
824 return m_extraMemoryUsage
+ m_arrayBuffers
.size();
829 return m_objectSpace
.size() + m_storageSpace
.size() + extraSize();
832 size_t Heap::capacity()
834 return m_objectSpace
.capacity() + m_storageSpace
.capacity() + extraSize();
837 size_t Heap::sizeAfterCollect()
839 // The result here may not agree with the normal Heap::size().
840 // This is due to the fact that we only count live copied bytes
841 // rather than all used (including dead) copied bytes, thus it's
842 // always the case that m_totalBytesCopied <= m_storageSpace.size().
843 ASSERT(m_totalBytesCopied
<= m_storageSpace
.size());
844 return m_totalBytesVisited
+ m_totalBytesCopied
+ extraSize();
847 size_t Heap::protectedGlobalObjectCount()
849 return forEachProtectedCell
<CountIfGlobalObject
>();
852 size_t Heap::globalObjectCount()
854 HeapIterationScope
iterationScope(*this);
855 return m_objectSpace
.forEachLiveCell
<CountIfGlobalObject
>(iterationScope
);
858 size_t Heap::protectedObjectCount()
860 return forEachProtectedCell
<Count
>();
863 PassOwnPtr
<TypeCountSet
> Heap::protectedObjectTypeCounts()
865 return forEachProtectedCell
<RecordType
>();
868 PassOwnPtr
<TypeCountSet
> Heap::objectTypeCounts()
870 HeapIterationScope
iterationScope(*this);
871 return m_objectSpace
.forEachLiveCell
<RecordType
>(iterationScope
);
874 void Heap::deleteAllCompiledCode()
876 // If JavaScript is running, it's not safe to delete code, since we'll end
877 // up deleting code that is live on the stack.
878 if (m_vm
->entryScope
)
881 // If we have things on any worklist, then don't delete code. This is kind of
882 // a weird heuristic. It's definitely not safe to throw away code that is on
883 // the worklist. But this change was made in a hurry so we just avoid throwing
884 // away any code if there is any code on any worklist. I suspect that this
885 // might not actually be too dumb: if there is code on worklists then that
886 // means that we are running some hot JS code right now. Maybe causing
887 // recompilations isn't a good idea.
889 for (unsigned i
= DFG::numberOfWorklists(); i
--;) {
890 if (DFG::Worklist
* worklist
= DFG::worklistForIndexOrNull(i
)) {
891 if (worklist
->isActiveForVM(*vm()))
895 #endif // ENABLE(DFG_JIT)
897 for (ExecutableBase
* current
= m_compiledCode
.head(); current
; current
= current
->next()) {
898 if (!current
->isFunctionExecutable())
900 static_cast<FunctionExecutable
*>(current
)->clearCodeIfNotCompiling();
903 ASSERT(m_operationInProgress
== FullCollection
|| m_operationInProgress
== NoOperation
);
904 m_codeBlocks
.clearMarksForFullCollection();
905 m_codeBlocks
.deleteUnmarkedAndUnreferenced(FullCollection
);
908 void Heap::deleteAllUnlinkedFunctionCode()
910 for (ExecutableBase
* current
= m_compiledCode
.head(); current
; current
= current
->next()) {
911 if (!current
->isFunctionExecutable())
913 static_cast<FunctionExecutable
*>(current
)->clearUnlinkedCodeForRecompilationIfNotCompiling();
917 void Heap::clearUnmarkedExecutables()
919 GCPHASE(ClearUnmarkedExecutables
);
920 ExecutableBase
* next
;
921 for (ExecutableBase
* current
= m_compiledCode
.head(); current
; current
= next
) {
922 next
= current
->next();
923 if (isMarked(current
))
926 // We do this because executable memory is limited on some platforms and because
927 // CodeBlock requires eager finalization.
928 ExecutableBase::clearCodeVirtual(current
);
929 m_compiledCode
.remove(current
);
933 void Heap::deleteUnmarkedCompiledCode()
935 GCPHASE(DeleteCodeBlocks
);
936 clearUnmarkedExecutables();
937 m_codeBlocks
.deleteUnmarkedAndUnreferenced(m_operationInProgress
);
938 m_jitStubRoutines
.deleteUnmarkedJettisonedStubRoutines();
941 void Heap::addToRememberedSet(const JSCell
* cell
)
944 ASSERT(!Options::enableConcurrentJIT() || !isCompilationThread());
945 if (isRemembered(cell
))
947 MarkedBlock::blockFor(cell
)->setRemembered(cell
);
948 const_cast<JSCell
*>(cell
)->setRemembered(true);
949 m_slotVisitor
.unconditionallyAppend(const_cast<JSCell
*>(cell
));
952 void Heap::collectAllGarbage()
954 if (!m_isSafeToCollect
)
957 collect(FullCollection
);
959 SamplingRegion
samplingRegion("Garbage Collection: Sweeping");
960 DelayedReleaseScope
delayedReleaseScope(m_objectSpace
);
961 m_objectSpace
.sweep();
962 m_objectSpace
.shrink();
965 static double minute
= 60.0;
967 void Heap::collect(HeapOperation collectionType
)
969 #if ENABLE(ALLOCATION_LOGGING)
970 dataLogF("JSC GC starting collection.\n");
974 if (Options::logGC()) {
976 before
= currentTimeMS();
979 SamplingRegion
samplingRegion("Garbage Collection");
981 RELEASE_ASSERT(!m_deferralDepth
);
982 ASSERT(vm()->currentThreadIsHoldingAPILock());
983 RELEASE_ASSERT(vm()->atomicStringTable() == wtfThreadData().atomicStringTable());
984 ASSERT(m_isSafeToCollect
);
985 JAVASCRIPTCORE_GC_BEGIN();
986 RELEASE_ASSERT(m_operationInProgress
== NoOperation
);
988 suspendCompilerThreads();
989 willStartCollection(collectionType
);
992 double gcStartTime
= WTF::monotonicallyIncreasingTime();
994 deleteOldCode(gcStartTime
);
995 flushOldStructureIDTables();
997 flushWriteBarrierBuffer();
999 markRoots(gcStartTime
);
1001 JAVASCRIPTCORE_GC_MARKED();
1004 sweepArrayBuffers();
1005 snapshotMarkedSpace();
1007 copyBackingStores();
1009 finalizeUnconditionalFinalizers();
1010 removeDeadCompilerWorklistEntries();
1011 deleteUnmarkedCompiledCode();
1012 deleteSourceProviderCaches();
1013 notifyIncrementalSweeper();
1014 rememberCurrentlyExecutingCodeBlocks();
1017 updateAllocationLimits();
1018 didFinishCollection(gcStartTime
);
1019 resumeCompilerThreads();
1021 if (Options::logGC()) {
1022 double after
= currentTimeMS();
1023 dataLog(after
- before
, " ms]\n");
1027 void Heap::suspendCompilerThreads()
1030 GCPHASE(SuspendCompilerThreads
);
1031 ASSERT(m_suspendedCompilerWorklists
.isEmpty());
1032 for (unsigned i
= DFG::numberOfWorklists(); i
--;) {
1033 if (DFG::Worklist
* worklist
= DFG::worklistForIndexOrNull(i
)) {
1034 m_suspendedCompilerWorklists
.append(worklist
);
1035 worklist
->suspendAllThreads();
1041 void Heap::willStartCollection(HeapOperation collectionType
)
1043 GCPHASE(StartingCollection
);
1044 if (shouldDoFullCollection(collectionType
)) {
1045 m_operationInProgress
= FullCollection
;
1046 m_slotVisitor
.clearMarkStack();
1047 m_shouldDoFullCollection
= false;
1048 if (Options::logGC())
1049 dataLog("FullCollection, ");
1051 m_operationInProgress
= EdenCollection
;
1052 if (Options::logGC())
1053 dataLog("EdenCollection, ");
1055 if (m_operationInProgress
== FullCollection
) {
1056 m_sizeBeforeLastFullCollect
= m_sizeAfterLastCollect
+ m_bytesAllocatedThisCycle
;
1057 m_extraMemoryUsage
= 0;
1059 if (m_fullActivityCallback
)
1060 m_fullActivityCallback
->willCollect();
1062 ASSERT(m_operationInProgress
== EdenCollection
);
1063 m_sizeBeforeLastEdenCollect
= m_sizeAfterLastCollect
+ m_bytesAllocatedThisCycle
;
1066 if (m_edenActivityCallback
)
1067 m_edenActivityCallback
->willCollect();
1070 void Heap::deleteOldCode(double gcStartTime
)
1072 if (m_operationInProgress
== EdenCollection
)
1075 GCPHASE(DeleteOldCode
);
1076 if (gcStartTime
- m_lastCodeDiscardTime
> minute
) {
1077 deleteAllCompiledCode();
1078 m_lastCodeDiscardTime
= WTF::monotonicallyIncreasingTime();
1082 void Heap::flushOldStructureIDTables()
1084 GCPHASE(FlushOldStructureIDTables
);
1085 m_structureIDTable
.flushOldTables();
1088 void Heap::flushWriteBarrierBuffer()
1090 GCPHASE(FlushWriteBarrierBuffer
);
1091 if (m_operationInProgress
== EdenCollection
) {
1092 m_writeBarrierBuffer
.flush(*this);
1095 m_writeBarrierBuffer
.reset();
1098 void Heap::stopAllocation()
1100 GCPHASE(StopAllocation
);
1101 m_objectSpace
.stopAllocating();
1102 if (m_operationInProgress
== FullCollection
)
1103 m_storageSpace
.didStartFullCollection();
1106 void Heap::reapWeakHandles()
1108 GCPHASE(ReapingWeakHandles
);
1109 m_objectSpace
.reapWeakSets();
1112 void Heap::sweepArrayBuffers()
1114 GCPHASE(SweepingArrayBuffers
);
1115 m_arrayBuffers
.sweep();
1118 struct MarkedBlockSnapshotFunctor
: public MarkedBlock::VoidFunctor
{
1119 MarkedBlockSnapshotFunctor(Vector
<MarkedBlock
*>& blocks
)
1125 void operator()(MarkedBlock
* block
) { m_blocks
[m_index
++] = block
; }
1128 Vector
<MarkedBlock
*>& m_blocks
;
1131 void Heap::snapshotMarkedSpace()
1133 GCPHASE(SnapshotMarkedSpace
);
1134 if (m_operationInProgress
!= FullCollection
)
1137 m_blockSnapshot
.resize(m_objectSpace
.blocks().set().size());
1138 MarkedBlockSnapshotFunctor
functor(m_blockSnapshot
);
1139 m_objectSpace
.forEachBlock(functor
);
1142 void Heap::deleteSourceProviderCaches()
1144 GCPHASE(DeleteSourceProviderCaches
);
1145 m_vm
->clearSourceProviderCaches();
1148 void Heap::notifyIncrementalSweeper()
1150 GCPHASE(NotifyIncrementalSweeper
);
1151 if (m_operationInProgress
!= FullCollection
)
1153 m_sweeper
->startSweeping(m_blockSnapshot
);
1156 void Heap::rememberCurrentlyExecutingCodeBlocks()
1158 GCPHASE(RememberCurrentlyExecutingCodeBlocks
);
1159 m_codeBlocks
.rememberCurrentlyExecutingCodeBlocks(this);
1162 void Heap::resetAllocators()
1164 GCPHASE(ResetAllocators
);
1165 m_objectSpace
.resetAllocators();
1168 void Heap::updateAllocationLimits()
1170 GCPHASE(UpdateAllocationLimits
);
1171 size_t currentHeapSize
= sizeAfterCollect();
1172 if (Options::gcMaxHeapSize() && currentHeapSize
> Options::gcMaxHeapSize())
1173 HeapStatistics::exitWithFailure();
1175 if (m_operationInProgress
== FullCollection
) {
1176 // To avoid pathological GC churn in very small and very large heaps, we set
1177 // the new allocation limit based on the current size of the heap, with a
1179 m_maxHeapSize
= max(minHeapSize(m_heapType
, m_ramSize
), proportionalHeapSize(currentHeapSize
, m_ramSize
));
1180 m_maxEdenSize
= m_maxHeapSize
- currentHeapSize
;
1181 m_sizeAfterLastFullCollect
= currentHeapSize
;
1182 m_bytesAbandonedSinceLastFullCollect
= 0;
1184 ASSERT(currentHeapSize
>= m_sizeAfterLastCollect
);
1185 m_maxEdenSize
= m_maxHeapSize
- currentHeapSize
;
1186 m_sizeAfterLastEdenCollect
= currentHeapSize
;
1187 double edenToOldGenerationRatio
= (double)m_maxEdenSize
/ (double)m_maxHeapSize
;
1188 double minEdenToOldGenerationRatio
= 1.0 / 3.0;
1189 if (edenToOldGenerationRatio
< minEdenToOldGenerationRatio
)
1190 m_shouldDoFullCollection
= true;
1191 m_maxHeapSize
+= currentHeapSize
- m_sizeAfterLastCollect
;
1192 m_maxEdenSize
= m_maxHeapSize
- currentHeapSize
;
1193 if (m_fullActivityCallback
) {
1194 ASSERT(currentHeapSize
>= m_sizeAfterLastFullCollect
);
1195 m_fullActivityCallback
->didAllocate(currentHeapSize
- m_sizeAfterLastFullCollect
);
1199 m_sizeAfterLastCollect
= currentHeapSize
;
1200 m_bytesAllocatedThisCycle
= 0;
1202 if (Options::logGC())
1203 dataLog(currentHeapSize
/ 1024, " kb, ");
1206 void Heap::didFinishCollection(double gcStartTime
)
1208 GCPHASE(FinishingCollection
);
1209 double gcEndTime
= WTF::monotonicallyIncreasingTime();
1210 if (m_operationInProgress
== FullCollection
)
1211 m_lastFullGCLength
= gcEndTime
- gcStartTime
;
1213 m_lastEdenGCLength
= gcEndTime
- gcStartTime
;
1215 if (Options::recordGCPauseTimes())
1216 HeapStatistics::recordGCPauseTime(gcStartTime
, gcEndTime
);
1217 RELEASE_ASSERT(m_operationInProgress
== EdenCollection
|| m_operationInProgress
== FullCollection
);
1219 m_operationInProgress
= NoOperation
;
1220 JAVASCRIPTCORE_GC_END();
1222 if (Options::useZombieMode())
1223 zombifyDeadObjects();
1225 if (Options::objectsAreImmortal())
1228 if (Options::showObjectStatistics())
1229 HeapStatistics::showObjectStatistics(this);
1231 if (Options::logGC() == GCLogging::Verbose
)
1232 GCLogging::dumpObjectGraph(this);
1235 void Heap::resumeCompilerThreads()
1238 GCPHASE(ResumeCompilerThreads
);
1239 for (auto worklist
: m_suspendedCompilerWorklists
)
1240 worklist
->resumeAllThreads();
1241 m_suspendedCompilerWorklists
.clear();
1245 void Heap::markDeadObjects()
1247 HeapIterationScope
iterationScope(*this);
1248 m_objectSpace
.forEachDeadCell
<MarkObject
>(iterationScope
);
1251 void Heap::setFullActivityCallback(PassRefPtr
<FullGCActivityCallback
> activityCallback
)
1253 m_fullActivityCallback
= activityCallback
;
1256 void Heap::setEdenActivityCallback(PassRefPtr
<EdenGCActivityCallback
> activityCallback
)
1258 m_edenActivityCallback
= activityCallback
;
1261 GCActivityCallback
* Heap::fullActivityCallback()
1263 return m_fullActivityCallback
.get();
1266 GCActivityCallback
* Heap::edenActivityCallback()
1268 return m_edenActivityCallback
.get();
1271 void Heap::setIncrementalSweeper(PassOwnPtr
<IncrementalSweeper
> sweeper
)
1273 m_sweeper
= sweeper
;
1276 IncrementalSweeper
* Heap::sweeper()
1278 return m_sweeper
.get();
1281 void Heap::setGarbageCollectionTimerEnabled(bool enable
)
1283 if (m_fullActivityCallback
)
1284 m_fullActivityCallback
->setEnabled(enable
);
1285 if (m_edenActivityCallback
)
1286 m_edenActivityCallback
->setEnabled(enable
);
1289 void Heap::didAllocate(size_t bytes
)
1291 if (m_edenActivityCallback
)
1292 m_edenActivityCallback
->didAllocate(m_bytesAllocatedThisCycle
+ m_bytesAbandonedSinceLastFullCollect
);
1293 m_bytesAllocatedThisCycle
+= bytes
;
1296 bool Heap::isValidAllocation(size_t)
1298 if (!isValidThreadState(m_vm
))
1301 if (m_operationInProgress
!= NoOperation
)
1307 void Heap::addFinalizer(JSCell
* cell
, Finalizer finalizer
)
1309 WeakSet::allocate(cell
, &m_finalizerOwner
, reinterpret_cast<void*>(finalizer
)); // Balanced by FinalizerOwner::finalize().
1312 void Heap::FinalizerOwner::finalize(Handle
<Unknown
> handle
, void* context
)
1314 HandleSlot slot
= handle
.slot();
1315 Finalizer finalizer
= reinterpret_cast<Finalizer
>(context
);
1316 finalizer(slot
->asCell());
1317 WeakSet::deallocate(WeakImpl::asWeakImpl(slot
));
1320 void Heap::addCompiledCode(ExecutableBase
* executable
)
1322 m_compiledCode
.append(executable
);
1325 class Zombify
: public MarkedBlock::VoidFunctor
{
1327 void operator()(JSCell
* cell
)
1329 void** current
= reinterpret_cast<void**>(cell
);
1331 // We want to maintain zapped-ness because that's how we know if we've called
1333 if (cell
->isZapped())
1336 void* limit
= static_cast<void*>(reinterpret_cast<char*>(cell
) + MarkedBlock::blockFor(cell
)->cellSize());
1337 for (; current
< limit
; current
++)
1338 *current
= zombifiedBits
;
1342 void Heap::zombifyDeadObjects()
1344 // Sweep now because destructors will crash once we're zombified.
1346 SamplingRegion
samplingRegion("Garbage Collection: Sweeping");
1347 DelayedReleaseScope
delayedReleaseScope(m_objectSpace
);
1348 m_objectSpace
.zombifySweep();
1350 HeapIterationScope
iterationScope(*this);
1351 m_objectSpace
.forEachDeadCell
<Zombify
>(iterationScope
);
1354 void Heap::flushWriteBarrierBuffer(JSCell
* cell
)
1357 m_writeBarrierBuffer
.flush(*this);
1358 m_writeBarrierBuffer
.add(cell
);
1364 bool Heap::shouldDoFullCollection(HeapOperation requestedCollectionType
) const
1367 if (Options::alwaysDoFullCollection())
1370 switch (requestedCollectionType
) {
1371 case EdenCollection
:
1373 case FullCollection
:
1376 return m_shouldDoFullCollection
;
1378 RELEASE_ASSERT_NOT_REACHED();
1381 RELEASE_ASSERT_NOT_REACHED();
1384 UNUSED_PARAM(requestedCollectionType
);