2 * Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2011 Apple Inc. All rights reserved.
3 * Copyright (C) 2007 Eric Seidel <eric@webkit.org>
5 * This library is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Lesser General Public
7 * License as published by the Free Software Foundation; either
8 * version 2 of the License, or (at your option) any later version.
10 * This library is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Lesser General Public License for more details.
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this library; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
24 #include "CopiedSpace.h"
25 #include "CopiedSpaceInlineMethods.h"
26 #include "CodeBlock.h"
27 #include "ConservativeRoots.h"
28 #include "GCActivityCallback.h"
29 #include "HeapRootVisitor.h"
30 #include "Interpreter.h"
31 #include "JSGlobalData.h"
32 #include "JSGlobalObject.h"
34 #include "JSONObject.h"
36 #include "WeakSetInlines.h"
38 #include <wtf/CurrentTime.h>
48 #if CPU(X86) || CPU(X86_64)
49 static const size_t largeHeapSize
= 16 * 1024 * 1024;
51 static const size_t largeHeapSize
= 8 * 1024 * 1024;
53 static const size_t smallHeapSize
= 512 * 1024;
55 #if ENABLE(GC_LOGGING)
57 #define DEFINE_GC_LOGGING_GLOBAL(type, name, arguments) \
58 _Pragma("clang diagnostic push") \
59 _Pragma("clang diagnostic ignored \"-Wglobal-constructors\"") \
60 _Pragma("clang diagnostic ignored \"-Wexit-time-destructors\"") \
61 static type name arguments; \
62 _Pragma("clang diagnostic pop")
64 #define DEFINE_GC_LOGGING_GLOBAL(type, name, arguments) \
65 static type name arguments;
66 #endif // COMPILER(CLANG)
69 GCTimer(const char* name
)
79 dataLog("%s: %.2lfms (avg. %.2lf, min. %.2lf, max. %.2lf)\n", m_name
, m_time
* 1000, m_time
* 1000 / m_count
, m_min
*1000, m_max
*1000);
89 GCTimerScope(GCTimer
* timer
)
91 , m_start(WTF::currentTime())
96 double delta
= WTF::currentTime() - m_start
;
97 if (delta
< m_timer
->m_min
)
98 m_timer
->m_min
= delta
;
99 if (delta
> m_timer
->m_max
)
100 m_timer
->m_max
= delta
;
102 m_timer
->m_time
+= delta
;
109 GCCounter(const char* name
)
118 void count(size_t amount
)
129 dataLog("%s: %zu values (avg. %zu, min. %zu, max. %zu)\n", m_name
, m_total
, m_total
/ m_count
, m_min
, m_max
);
138 #define GCPHASE(name) DEFINE_GC_LOGGING_GLOBAL(GCTimer, name##Timer, (#name)); GCTimerScope name##TimerScope(&name##Timer)
139 #define COND_GCPHASE(cond, name1, name2) DEFINE_GC_LOGGING_GLOBAL(GCTimer, name1##Timer, (#name1)); DEFINE_GC_LOGGING_GLOBAL(GCTimer, name2##Timer, (#name2)); GCTimerScope name1##CondTimerScope(cond ? &name1##Timer : &name2##Timer)
140 #define GCCOUNTER(name, value) do { DEFINE_GC_LOGGING_GLOBAL(GCCounter, name##Counter, (#name)); name##Counter.count(value); } while (false)
144 #define GCPHASE(name) do { } while (false)
145 #define COND_GCPHASE(cond, name1, name2) do { } while (false)
146 #define GCCOUNTER(name, value) do { } while (false)
149 static size_t heapSizeForHint(HeapSize heapSize
)
151 if (heapSize
== LargeHeap
)
152 return largeHeapSize
;
153 ASSERT(heapSize
== SmallHeap
);
154 return smallHeapSize
;
157 static inline bool isValidSharedInstanceThreadState(JSGlobalData
* globalData
)
159 return globalData
->apiLock().currentThreadIsHoldingLock();
162 static inline bool isValidThreadState(JSGlobalData
* globalData
)
164 if (globalData
->identifierTable
!= wtfThreadData().currentIdentifierTable())
167 if (globalData
->isSharedInstance() && !isValidSharedInstanceThreadState(globalData
))
175 typedef size_t ReturnType
;
179 ReturnType
returnValue();
185 inline CountFunctor::CountFunctor()
190 inline void CountFunctor::count(size_t count
)
195 inline CountFunctor::ReturnType
CountFunctor::returnValue()
200 struct ClearMarks
: MarkedBlock::VoidFunctor
{
201 void operator()(MarkedBlock
*);
204 inline void ClearMarks::operator()(MarkedBlock
* block
)
209 struct Sweep
: MarkedBlock::VoidFunctor
{
210 void operator()(MarkedBlock
*);
213 inline void Sweep::operator()(MarkedBlock
* block
)
218 struct MarkCount
: CountFunctor
{
219 void operator()(MarkedBlock
*);
222 inline void MarkCount::operator()(MarkedBlock
* block
)
224 count(block
->markCount());
227 struct Size
: CountFunctor
{
228 void operator()(MarkedBlock
*);
231 inline void Size::operator()(MarkedBlock
* block
)
233 count(block
->markCount() * block
->cellSize());
236 struct Capacity
: CountFunctor
{
237 void operator()(MarkedBlock
*);
240 inline void Capacity::operator()(MarkedBlock
* block
)
242 count(block
->capacity());
245 struct Count
: public CountFunctor
{
246 void operator()(JSCell
*);
249 inline void Count::operator()(JSCell
*)
254 struct CountIfGlobalObject
: CountFunctor
{
255 void operator()(JSCell
*);
258 inline void CountIfGlobalObject::operator()(JSCell
* cell
)
260 if (!cell
->isObject())
262 if (!asObject(cell
)->isGlobalObject())
269 typedef PassOwnPtr
<TypeCountSet
> ReturnType
;
272 void operator()(JSCell
*);
273 ReturnType
returnValue();
276 const char* typeName(JSCell
*);
277 OwnPtr
<TypeCountSet
> m_typeCountSet
;
280 inline RecordType::RecordType()
281 : m_typeCountSet(adoptPtr(new TypeCountSet
))
285 inline const char* RecordType::typeName(JSCell
* cell
)
287 const ClassInfo
* info
= cell
->classInfo();
288 if (!info
|| !info
->className
)
290 return info
->className
;
293 inline void RecordType::operator()(JSCell
* cell
)
295 m_typeCountSet
->add(typeName(cell
));
298 inline PassOwnPtr
<TypeCountSet
> RecordType::returnValue()
300 return m_typeCountSet
.release();
303 } // anonymous namespace
305 Heap::Heap(JSGlobalData
* globalData
, HeapSize heapSize
)
306 : m_heapSize(heapSize
)
307 , m_minBytesPerCycle(heapSizeForHint(heapSize
))
308 , m_sizeAfterLastCollect(0)
309 , m_bytesAllocatedLimit(m_minBytesPerCycle
)
310 , m_bytesAllocated(0)
311 , m_bytesAbandoned(0)
312 , m_operationInProgress(NoOperation
)
313 , m_objectSpace(this)
314 , m_storageSpace(this)
316 , m_machineThreads(this)
317 , m_sharedData(globalData
)
318 , m_slotVisitor(m_sharedData
)
320 , m_handleSet(globalData
)
321 , m_isSafeToCollect(false)
322 , m_globalData(globalData
)
324 , m_lastCodeDiscardTime(WTF::currentTime())
325 , m_activityCallback(DefaultGCActivityCallback::create(this))
327 m_storageSpace
.init();
332 delete m_markListSet
;
334 m_objectSpace
.shrink();
335 m_storageSpace
.freeAllBlocks();
341 bool Heap::isPagedOut(double deadline
)
343 return m_objectSpace
.isPagedOut(deadline
) || m_storageSpace
.isPagedOut(deadline
);
346 // The JSGlobalData is being destroyed and the collector will never run again.
347 // Run all pending finalizers now because we won't get another chance.
348 void Heap::lastChanceToFinalize()
350 ASSERT(!m_globalData
->dynamicGlobalObject
);
351 ASSERT(m_operationInProgress
== NoOperation
);
353 // FIXME: Make this a release-mode crash once we're sure no one's doing this.
354 if (size_t size
= m_protectedValues
.size())
355 WTFLogAlways("ERROR: JavaScriptCore heap deallocated while %ld values were still protected", static_cast<unsigned long>(size
));
357 m_weakSet
.finalizeAll();
358 canonicalizeCellLivenessData();
361 m_globalData
->smallStrings
.finalizeSmallStrings();
363 #if ENABLE(SIMPLE_HEAP_PROFILING)
364 m_slotVisitor
.m_visitedTypeCounts
.dump(WTF::dataFile(), "Visited Type Counts");
365 m_destroyedTypeCounts
.dump(WTF::dataFile(), "Destroyed Type Counts");
369 void Heap::reportExtraMemoryCostSlowCase(size_t cost
)
371 // Our frequency of garbage collection tries to balance memory use against speed
372 // by collecting based on the number of newly created values. However, for values
373 // that hold on to a great deal of memory that's not in the form of other JS values,
374 // that is not good enough - in some cases a lot of those objects can pile up and
375 // use crazy amounts of memory without a GC happening. So we track these extra
376 // memory costs. Only unusually large objects are noted, and we only keep track
377 // of this extra cost until the next GC. In garbage collected languages, most values
378 // are either very short lived temporaries, or have extremely long lifetimes. So
379 // if a large value survives one garbage collection, there is not much point to
380 // collecting more frequently as long as it stays alive.
387 void Heap::reportAbandonedObjectGraph()
389 // Our clients don't know exactly how much memory they
390 // are abandoning so we just guess for them.
391 double abandonedBytes
= 0.10 * m_sizeAfterLastCollect
;
393 // We want to accelerate the next collection. Because memory has just
394 // been abandoned, the next collection has the potential to
395 // be more profitable. Since allocation is the trigger for collection,
396 // we hasten the next collection by pretending that we've allocated more memory.
397 didAbandon(abandonedBytes
);
400 void Heap::didAbandon(size_t bytes
)
402 if (m_activityCallback
)
403 m_activityCallback
->didAllocate(m_bytesAllocated
+ m_bytesAbandoned
);
404 m_bytesAbandoned
+= bytes
;
407 void Heap::protect(JSValue k
)
410 ASSERT(m_globalData
->apiLock().currentThreadIsHoldingLock());
415 m_protectedValues
.add(k
.asCell());
418 bool Heap::unprotect(JSValue k
)
421 ASSERT(m_globalData
->apiLock().currentThreadIsHoldingLock());
426 return m_protectedValues
.remove(k
.asCell());
429 void Heap::jettisonDFGCodeBlock(PassOwnPtr
<CodeBlock
> codeBlock
)
431 m_dfgCodeBlocks
.jettison(codeBlock
);
434 void Heap::markProtectedObjects(HeapRootVisitor
& heapRootVisitor
)
436 ProtectCountSet::iterator end
= m_protectedValues
.end();
437 for (ProtectCountSet::iterator it
= m_protectedValues
.begin(); it
!= end
; ++it
)
438 heapRootVisitor
.visit(&it
->first
);
441 void Heap::pushTempSortVector(Vector
<ValueStringPair
>* tempVector
)
443 m_tempSortingVectors
.append(tempVector
);
446 void Heap::popTempSortVector(Vector
<ValueStringPair
>* tempVector
)
448 ASSERT_UNUSED(tempVector
, tempVector
== m_tempSortingVectors
.last());
449 m_tempSortingVectors
.removeLast();
452 void Heap::markTempSortVectors(HeapRootVisitor
& heapRootVisitor
)
454 typedef Vector
<Vector
<ValueStringPair
>* > VectorOfValueStringVectors
;
456 VectorOfValueStringVectors::iterator end
= m_tempSortingVectors
.end();
457 for (VectorOfValueStringVectors::iterator it
= m_tempSortingVectors
.begin(); it
!= end
; ++it
) {
458 Vector
<ValueStringPair
>* tempSortingVector
= *it
;
460 Vector
<ValueStringPair
>::iterator vectorEnd
= tempSortingVector
->end();
461 for (Vector
<ValueStringPair
>::iterator vectorIt
= tempSortingVector
->begin(); vectorIt
!= vectorEnd
; ++vectorIt
) {
463 heapRootVisitor
.visit(&vectorIt
->first
);
468 void Heap::harvestWeakReferences()
470 m_slotVisitor
.harvestWeakReferences();
473 void Heap::finalizeUnconditionalFinalizers()
475 m_slotVisitor
.finalizeUnconditionalFinalizers();
478 inline RegisterFile
& Heap::registerFile()
480 return m_globalData
->interpreter
->registerFile();
483 void Heap::getConservativeRegisterRoots(HashSet
<JSCell
*>& roots
)
485 ASSERT(isValidThreadState(m_globalData
));
486 ConservativeRoots
registerFileRoots(&m_objectSpace
.blocks(), &m_storageSpace
);
487 registerFile().gatherConservativeRoots(registerFileRoots
);
488 size_t registerFileRootCount
= registerFileRoots
.size();
489 JSCell
** registerRoots
= registerFileRoots
.roots();
490 for (size_t i
= 0; i
< registerFileRootCount
; i
++) {
491 setMarked(registerRoots
[i
]);
492 roots
.add(registerRoots
[i
]);
496 void Heap::markRoots(bool fullGC
)
498 SamplingRegion
samplingRegion("Garbage Collection: Tracing");
500 COND_GCPHASE(fullGC
, MarkFullRoots
, MarkYoungRoots
);
501 UNUSED_PARAM(fullGC
);
502 ASSERT(isValidThreadState(m_globalData
));
506 // We gather conservative roots before clearing mark bits because conservative
507 // gathering uses the mark bits to determine whether a reference is valid.
508 ConservativeRoots
machineThreadRoots(&m_objectSpace
.blocks(), &m_storageSpace
);
510 GCPHASE(GatherConservativeRoots
);
511 m_machineThreads
.gatherConservativeRoots(machineThreadRoots
, &dummy
);
514 ConservativeRoots
registerFileRoots(&m_objectSpace
.blocks(), &m_storageSpace
);
515 m_dfgCodeBlocks
.clearMarks();
517 GCPHASE(GatherRegisterFileRoots
);
518 registerFile().gatherConservativeRoots(registerFileRoots
, m_dfgCodeBlocks
);
522 ConservativeRoots
scratchBufferRoots(&m_objectSpace
.blocks(), &m_storageSpace
);
524 GCPHASE(GatherScratchBufferRoots
);
525 m_globalData
->gatherConservativeRoots(scratchBufferRoots
);
530 MarkedBlock::DirtyCellVector dirtyCells
;
532 GCPHASE(GatheringDirtyCells
);
533 m_objectSpace
.gatherDirtyCells(dirtyCells
);
541 m_storageSpace
.startedCopying();
542 SlotVisitor
& visitor
= m_slotVisitor
;
543 HeapRootVisitor
heapRootVisitor(visitor
);
546 ParallelModeEnabler
enabler(visitor
);
549 size_t dirtyCellCount
= dirtyCells
.size();
550 GCPHASE(VisitDirtyCells
);
551 GCCOUNTER(DirtyCellCount
, dirtyCellCount
);
552 for (size_t i
= 0; i
< dirtyCellCount
; i
++) {
553 heapRootVisitor
.visitChildren(dirtyCells
[i
]);
554 visitor
.donateAndDrain();
559 if (m_globalData
->codeBlocksBeingCompiled
.size()) {
560 GCPHASE(VisitActiveCodeBlock
);
561 for (size_t i
= 0; i
< m_globalData
->codeBlocksBeingCompiled
.size(); i
++)
562 m_globalData
->codeBlocksBeingCompiled
[i
]->visitAggregate(visitor
);
566 GCPHASE(VisitMachineRoots
);
567 visitor
.append(machineThreadRoots
);
568 visitor
.donateAndDrain();
571 GCPHASE(VisitRegisterFileRoots
);
572 visitor
.append(registerFileRoots
);
573 visitor
.donateAndDrain();
577 GCPHASE(VisitScratchBufferRoots
);
578 visitor
.append(scratchBufferRoots
);
579 visitor
.donateAndDrain();
583 GCPHASE(VisitProtectedObjects
);
584 markProtectedObjects(heapRootVisitor
);
585 visitor
.donateAndDrain();
588 GCPHASE(VisitTempSortVectors
);
589 markTempSortVectors(heapRootVisitor
);
590 visitor
.donateAndDrain();
594 GCPHASE(MarkingArgumentBuffers
);
595 if (m_markListSet
&& m_markListSet
->size()) {
596 MarkedArgumentBuffer::markLists(heapRootVisitor
, *m_markListSet
);
597 visitor
.donateAndDrain();
600 if (m_globalData
->exception
) {
601 GCPHASE(MarkingException
);
602 heapRootVisitor
.visit(&m_globalData
->exception
);
603 visitor
.donateAndDrain();
607 GCPHASE(VisitStrongHandles
);
608 m_handleSet
.visitStrongHandles(heapRootVisitor
);
609 visitor
.donateAndDrain();
613 GCPHASE(HandleStack
);
614 m_handleStack
.visit(heapRootVisitor
);
615 visitor
.donateAndDrain();
619 GCPHASE(TraceCodeBlocks
);
620 m_dfgCodeBlocks
.traceMarkedCodeBlocks(visitor
);
621 visitor
.donateAndDrain();
624 #if ENABLE(PARALLEL_GC)
626 GCPHASE(Convergence
);
627 visitor
.drainFromShared(SlotVisitor::MasterDrain
);
632 // Weak references must be marked last because their liveness depends on
633 // the liveness of the rest of the object graph.
635 GCPHASE(VisitingLiveWeakHandles
);
637 m_weakSet
.visitLiveWeakImpls(heapRootVisitor
);
638 harvestWeakReferences();
639 if (visitor
.isEmpty())
642 ParallelModeEnabler
enabler(visitor
);
643 visitor
.donateAndDrain();
644 #if ENABLE(PARALLEL_GC)
645 visitor
.drainFromShared(SlotVisitor::MasterDrain
);
652 GCPHASE(VisitingDeadWeakHandles
);
653 m_weakSet
.visitDeadWeakImpls(heapRootVisitor
);
656 GCCOUNTER(VisitedValueCount
, visitor
.visitCount());
658 visitor
.doneCopying();
660 m_sharedData
.reset();
661 m_storageSpace
.doneCopying();
665 void Heap::clearMarks()
667 m_objectSpace
.forEachBlock
<ClearMarks
>();
672 m_objectSpace
.forEachBlock
<Sweep
>();
675 size_t Heap::objectCount()
677 return m_objectSpace
.forEachBlock
<MarkCount
>();
682 return m_objectSpace
.forEachBlock
<Size
>() + m_storageSpace
.size();
685 size_t Heap::capacity()
687 return m_objectSpace
.forEachBlock
<Capacity
>() + m_storageSpace
.capacity();
690 size_t Heap::protectedGlobalObjectCount()
692 return forEachProtectedCell
<CountIfGlobalObject
>();
695 size_t Heap::globalObjectCount()
697 return m_objectSpace
.forEachCell
<CountIfGlobalObject
>();
700 size_t Heap::protectedObjectCount()
702 return forEachProtectedCell
<Count
>();
705 PassOwnPtr
<TypeCountSet
> Heap::protectedObjectTypeCounts()
707 return forEachProtectedCell
<RecordType
>();
710 PassOwnPtr
<TypeCountSet
> Heap::objectTypeCounts()
712 return m_objectSpace
.forEachCell
<RecordType
>();
715 void Heap::discardAllCompiledCode()
717 // If JavaScript is running, it's not safe to recompile, since we'll end
718 // up throwing away code that is live on the stack.
719 if (m_globalData
->dynamicGlobalObject
)
722 for (FunctionExecutable
* current
= m_functions
.head(); current
; current
= current
->next())
723 current
->discardCode();
726 void Heap::collectAllGarbage()
728 if (!m_isSafeToCollect
)
734 static double minute
= 60.0;
736 void Heap::collect(SweepToggle sweepToggle
)
738 SamplingRegion
samplingRegion("Garbage Collection");
741 ASSERT(globalData()->apiLock().currentThreadIsHoldingLock());
742 ASSERT(globalData()->identifierTable
== wtfThreadData().currentIdentifierTable());
743 ASSERT(m_isSafeToCollect
);
744 JAVASCRIPTCORE_GC_BEGIN();
745 if (m_operationInProgress
!= NoOperation
)
747 m_operationInProgress
= Collection
;
749 if (m_activityCallback
)
750 m_activityCallback
->willCollect();
752 double lastGCStartTime
= WTF::currentTime();
753 if (lastGCStartTime
- m_lastCodeDiscardTime
> minute
) {
754 discardAllCompiledCode();
755 m_lastCodeDiscardTime
= WTF::currentTime();
759 bool fullGC
= sweepToggle
== DoSweep
;
761 fullGC
= (capacity() > 4 * m_sizeAfterLastCollect
);
766 GCPHASE(Canonicalize
);
767 canonicalizeCellLivenessData();
773 GCPHASE(FinalizeUnconditionalFinalizers
);
774 finalizeUnconditionalFinalizers();
778 GCPHASE(FinalizeWeakHandles
);
780 m_globalData
->smallStrings
.finalizeSmallStrings();
783 JAVASCRIPTCORE_GC_MARKED();
786 GCPHASE(ResetAllocator
);
791 GCPHASE(DeleteCodeBlocks
);
792 m_dfgCodeBlocks
.deleteUnmarkedJettisonedCodeBlocks();
795 if (sweepToggle
== DoSweep
) {
796 SamplingRegion
samplingRegion("Garbage Collection: Sweeping");
799 m_objectSpace
.shrink();
801 m_bytesAbandoned
= 0;
804 // To avoid pathological GC churn in large heaps, we set the new allocation
805 // limit to be the current size of the heap. This heuristic
806 // is a bit arbitrary. Using the current size of the heap after this
807 // collection gives us a 2X multiplier, which is a 1:1 (heap size :
808 // new bytes allocated) proportion, and seems to work well in benchmarks.
809 size_t newSize
= size();
811 m_sizeAfterLastCollect
= newSize
;
812 m_bytesAllocatedLimit
= max(newSize
, m_minBytesPerCycle
);
814 m_bytesAllocated
= 0;
815 double lastGCEndTime
= WTF::currentTime();
816 m_lastGCLength
= lastGCEndTime
- lastGCStartTime
;
817 if (m_operationInProgress
!= Collection
)
819 m_operationInProgress
= NoOperation
;
820 JAVASCRIPTCORE_GC_END();
823 void Heap::canonicalizeCellLivenessData()
825 m_objectSpace
.canonicalizeCellLivenessData();
828 void Heap::resetAllocators()
830 m_objectSpace
.resetAllocators();
831 m_weakSet
.resetAllocator();
834 void Heap::setActivityCallback(GCActivityCallback
* activityCallback
)
836 m_activityCallback
= activityCallback
;
839 GCActivityCallback
* Heap::activityCallback()
841 return m_activityCallback
;
844 void Heap::didAllocate(size_t bytes
)
846 if (m_activityCallback
)
847 m_activityCallback
->didAllocate(m_bytesAllocated
+ m_bytesAbandoned
);
848 m_bytesAllocated
+= bytes
;
851 bool Heap::isValidAllocation(size_t bytes
)
853 if (!isValidThreadState(m_globalData
))
856 if (bytes
> MarkedSpace::maxCellSize
)
859 if (m_operationInProgress
!= NoOperation
)
865 void Heap::addFinalizer(JSCell
* cell
, Finalizer finalizer
)
867 WeakSet::allocate(cell
, &m_finalizerOwner
, reinterpret_cast<void*>(finalizer
)); // Balanced by FinalizerOwner::finalize().
870 void Heap::FinalizerOwner::finalize(Handle
<Unknown
> handle
, void* context
)
872 HandleSlot slot
= handle
.slot();
873 Finalizer finalizer
= reinterpret_cast<Finalizer
>(context
);
874 finalizer(slot
->asCell());
875 WeakSet::deallocate(WeakImpl::asWeakImpl(slot
));
878 void Heap::addFunctionExecutable(FunctionExecutable
* executable
)
880 m_functions
.append(executable
);
883 void Heap::removeFunctionExecutable(FunctionExecutable
* executable
)
885 m_functions
.remove(executable
);