2 * Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2011 Apple Inc. All rights reserved.
3 * Copyright (C) 2007 Eric Seidel <eric@webkit.org>
5 * This library is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Lesser General Public
7 * License as published by the Free Software Foundation; either
8 * version 2 of the License, or (at your option) any later version.
10 * This library is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Lesser General Public License for more details.
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this library; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
24 #include "CodeBlock.h"
25 #include "ConservativeRoots.h"
26 #include "CopiedSpace.h"
27 #include "CopiedSpaceInlines.h"
28 #include "CopyVisitorInlines.h"
29 #include "GCActivityCallback.h"
30 #include "HeapRootVisitor.h"
31 #include "HeapStatistics.h"
32 #include "IncrementalSweeper.h"
33 #include "Interpreter.h"
35 #include "JSGlobalObject.h"
37 #include "JSONObject.h"
38 #include "Operations.h"
40 #include "UnlinkedCodeBlock.h"
41 #include "WeakSetInlines.h"
43 #include <wtf/RAMSize.h>
44 #include <wtf/CurrentTime.h>
53 static const size_t largeHeapSize
= 32 * MB
; // About 1.5X the average webpage.
54 static const size_t smallHeapSize
= 1 * MB
; // Matches the FastMalloc per-thread cache.
56 #if ENABLE(GC_LOGGING)
58 #define DEFINE_GC_LOGGING_GLOBAL(type, name, arguments) \
59 _Pragma("clang diagnostic push") \
60 _Pragma("clang diagnostic ignored \"-Wglobal-constructors\"") \
61 _Pragma("clang diagnostic ignored \"-Wexit-time-destructors\"") \
62 static type name arguments; \
63 _Pragma("clang diagnostic pop")
65 #define DEFINE_GC_LOGGING_GLOBAL(type, name, arguments) \
66 static type name arguments;
67 #endif // COMPILER(CLANG)
70 GCTimer(const char* name
)
80 dataLogF("%s: %.2lfms (avg. %.2lf, min. %.2lf, max. %.2lf)\n", m_name
, m_time
* 1000, m_time
* 1000 / m_count
, m_min
*1000, m_max
*1000);
90 GCTimerScope(GCTimer
* timer
)
92 , m_start(WTF::currentTime())
97 double delta
= WTF::currentTime() - m_start
;
98 if (delta
< m_timer
->m_min
)
99 m_timer
->m_min
= delta
;
100 if (delta
> m_timer
->m_max
)
101 m_timer
->m_max
= delta
;
103 m_timer
->m_time
+= delta
;
110 GCCounter(const char* name
)
119 void count(size_t amount
)
130 dataLogF("%s: %zu values (avg. %zu, min. %zu, max. %zu)\n", m_name
, m_total
, m_total
/ m_count
, m_min
, m_max
);
139 #define GCPHASE(name) DEFINE_GC_LOGGING_GLOBAL(GCTimer, name##Timer, (#name)); GCTimerScope name##TimerScope(&name##Timer)
140 #define COND_GCPHASE(cond, name1, name2) DEFINE_GC_LOGGING_GLOBAL(GCTimer, name1##Timer, (#name1)); DEFINE_GC_LOGGING_GLOBAL(GCTimer, name2##Timer, (#name2)); GCTimerScope name1##CondTimerScope(cond ? &name1##Timer : &name2##Timer)
141 #define GCCOUNTER(name, value) do { DEFINE_GC_LOGGING_GLOBAL(GCCounter, name##Counter, (#name)); name##Counter.count(value); } while (false)
145 #define GCPHASE(name) do { } while (false)
146 #define COND_GCPHASE(cond, name1, name2) do { } while (false)
147 #define GCCOUNTER(name, value) do { } while (false)
150 static inline size_t minHeapSize(HeapType heapType
, size_t ramSize
)
152 if (heapType
== LargeHeap
)
153 return min(largeHeapSize
, ramSize
/ 4);
154 return smallHeapSize
;
157 static inline size_t proportionalHeapSize(size_t heapSize
, size_t ramSize
)
159 // Try to stay under 1/2 RAM size to leave room for the DOM, rendering, networking, etc.
160 if (heapSize
< ramSize
/ 4)
162 if (heapSize
< ramSize
/ 2)
163 return 1.5 * heapSize
;
164 return 1.25 * heapSize
;
167 static inline bool isValidSharedInstanceThreadState(VM
* vm
)
169 return vm
->apiLock().currentThreadIsHoldingLock();
172 static inline bool isValidThreadState(VM
* vm
)
174 if (vm
->identifierTable
!= wtfThreadData().currentIdentifierTable())
177 if (vm
->isSharedInstance() && !isValidSharedInstanceThreadState(vm
))
183 struct MarkObject
: public MarkedBlock::VoidFunctor
{
184 void operator()(JSCell
* cell
)
186 if (cell
->isZapped())
188 Heap::heap(cell
)->setMarked(cell
);
192 struct Count
: public MarkedBlock::CountFunctor
{
193 void operator()(JSCell
*) { count(1); }
196 struct CountIfGlobalObject
: MarkedBlock::CountFunctor
{
197 void operator()(JSCell
* cell
) {
198 if (!cell
->isObject())
200 if (!asObject(cell
)->isGlobalObject())
208 typedef PassOwnPtr
<TypeCountSet
> ReturnType
;
211 void operator()(JSCell
*);
212 ReturnType
returnValue();
215 const char* typeName(JSCell
*);
216 OwnPtr
<TypeCountSet
> m_typeCountSet
;
219 inline RecordType::RecordType()
220 : m_typeCountSet(adoptPtr(new TypeCountSet
))
224 inline const char* RecordType::typeName(JSCell
* cell
)
226 const ClassInfo
* info
= cell
->classInfo();
227 if (!info
|| !info
->className
)
229 return info
->className
;
232 inline void RecordType::operator()(JSCell
* cell
)
234 m_typeCountSet
->add(typeName(cell
));
237 inline PassOwnPtr
<TypeCountSet
> RecordType::returnValue()
239 return m_typeCountSet
.release();
242 } // anonymous namespace
244 Heap::Heap(VM
* vm
, HeapType heapType
)
245 : m_heapType(heapType
)
246 , m_ramSize(ramSize())
247 , m_minBytesPerCycle(minHeapSize(m_heapType
, m_ramSize
))
248 , m_sizeAfterLastCollect(0)
249 , m_bytesAllocatedLimit(m_minBytesPerCycle
)
250 , m_bytesAllocated(0)
251 , m_bytesAbandoned(0)
252 , m_operationInProgress(NoOperation
)
254 , m_objectSpace(this)
255 , m_storageSpace(this)
256 , m_machineThreads(this)
258 , m_slotVisitor(m_sharedData
)
259 , m_copyVisitor(m_sharedData
)
261 , m_isSafeToCollect(false)
264 , m_lastCodeDiscardTime(WTF::currentTime())
265 , m_activityCallback(DefaultGCActivityCallback::create(this))
266 , m_sweeper(IncrementalSweeper::create(this))
268 m_storageSpace
.init();
275 bool Heap::isPagedOut(double deadline
)
277 return m_objectSpace
.isPagedOut(deadline
) || m_storageSpace
.isPagedOut(deadline
);
280 // The VM is being destroyed and the collector will never run again.
281 // Run all pending finalizers now because we won't get another chance.
282 void Heap::lastChanceToFinalize()
284 RELEASE_ASSERT(!m_vm
->dynamicGlobalObject
);
285 RELEASE_ASSERT(m_operationInProgress
== NoOperation
);
287 m_objectSpace
.lastChanceToFinalize();
289 #if ENABLE(SIMPLE_HEAP_PROFILING)
290 m_slotVisitor
.m_visitedTypeCounts
.dump(WTF::dataFile(), "Visited Type Counts");
291 m_destroyedTypeCounts
.dump(WTF::dataFile(), "Destroyed Type Counts");
295 void Heap::reportExtraMemoryCostSlowCase(size_t cost
)
297 // Our frequency of garbage collection tries to balance memory use against speed
298 // by collecting based on the number of newly created values. However, for values
299 // that hold on to a great deal of memory that's not in the form of other JS values,
300 // that is not good enough - in some cases a lot of those objects can pile up and
301 // use crazy amounts of memory without a GC happening. So we track these extra
302 // memory costs. Only unusually large objects are noted, and we only keep track
303 // of this extra cost until the next GC. In garbage collected languages, most values
304 // are either very short lived temporaries, or have extremely long lifetimes. So
305 // if a large value survives one garbage collection, there is not much point to
306 // collecting more frequently as long as it stays alive.
313 void Heap::reportAbandonedObjectGraph()
315 // Our clients don't know exactly how much memory they
316 // are abandoning so we just guess for them.
317 double abandonedBytes
= 0.10 * m_sizeAfterLastCollect
;
319 // We want to accelerate the next collection. Because memory has just
320 // been abandoned, the next collection has the potential to
321 // be more profitable. Since allocation is the trigger for collection,
322 // we hasten the next collection by pretending that we've allocated more memory.
323 didAbandon(abandonedBytes
);
326 void Heap::didAbandon(size_t bytes
)
329 if (m_activityCallback
)
330 m_activityCallback
->didAllocate(m_bytesAllocated
+ m_bytesAbandoned
);
332 m_activityCallback
->didAllocate(m_bytesAllocated
+ m_bytesAbandoned
);
333 #endif // PLATFORM(IOS)
334 m_bytesAbandoned
+= bytes
;
337 void Heap::protect(JSValue k
)
340 ASSERT(m_vm
->apiLock().currentThreadIsHoldingLock());
345 m_protectedValues
.add(k
.asCell());
348 bool Heap::unprotect(JSValue k
)
351 ASSERT(m_vm
->apiLock().currentThreadIsHoldingLock());
356 return m_protectedValues
.remove(k
.asCell());
359 void Heap::jettisonDFGCodeBlock(PassOwnPtr
<CodeBlock
> codeBlock
)
361 m_dfgCodeBlocks
.jettison(codeBlock
);
364 void Heap::markProtectedObjects(HeapRootVisitor
& heapRootVisitor
)
366 ProtectCountSet::iterator end
= m_protectedValues
.end();
367 for (ProtectCountSet::iterator it
= m_protectedValues
.begin(); it
!= end
; ++it
)
368 heapRootVisitor
.visit(&it
->key
);
371 void Heap::pushTempSortVector(Vector
<ValueStringPair
, 0, UnsafeVectorOverflow
>* tempVector
)
373 m_tempSortingVectors
.append(tempVector
);
376 void Heap::popTempSortVector(Vector
<ValueStringPair
, 0, UnsafeVectorOverflow
>* tempVector
)
378 ASSERT_UNUSED(tempVector
, tempVector
== m_tempSortingVectors
.last());
379 m_tempSortingVectors
.removeLast();
382 void Heap::markTempSortVectors(HeapRootVisitor
& heapRootVisitor
)
384 typedef Vector
<Vector
<ValueStringPair
, 0, UnsafeVectorOverflow
>* > VectorOfValueStringVectors
;
386 VectorOfValueStringVectors::iterator end
= m_tempSortingVectors
.end();
387 for (VectorOfValueStringVectors::iterator it
= m_tempSortingVectors
.begin(); it
!= end
; ++it
) {
388 Vector
<ValueStringPair
, 0, UnsafeVectorOverflow
>* tempSortingVector
= *it
;
390 Vector
<ValueStringPair
>::iterator vectorEnd
= tempSortingVector
->end();
391 for (Vector
<ValueStringPair
>::iterator vectorIt
= tempSortingVector
->begin(); vectorIt
!= vectorEnd
; ++vectorIt
) {
393 heapRootVisitor
.visit(&vectorIt
->first
);
398 void Heap::harvestWeakReferences()
400 m_slotVisitor
.harvestWeakReferences();
403 void Heap::finalizeUnconditionalFinalizers()
405 m_slotVisitor
.finalizeUnconditionalFinalizers();
408 inline JSStack
& Heap::stack()
410 return m_vm
->interpreter
->stack();
413 void Heap::canonicalizeCellLivenessData()
415 m_objectSpace
.canonicalizeCellLivenessData();
418 void Heap::getConservativeRegisterRoots(HashSet
<JSCell
*>& roots
)
420 ASSERT(isValidThreadState(m_vm
));
421 ConservativeRoots
stackRoots(&m_objectSpace
.blocks(), &m_storageSpace
);
422 stack().gatherConservativeRoots(stackRoots
);
423 size_t stackRootCount
= stackRoots
.size();
424 JSCell
** registerRoots
= stackRoots
.roots();
425 for (size_t i
= 0; i
< stackRootCount
; i
++) {
426 setMarked(registerRoots
[i
]);
427 roots
.add(registerRoots
[i
]);
431 void Heap::markRoots()
433 SamplingRegion
samplingRegion("Garbage Collection: Tracing");
436 ASSERT(isValidThreadState(m_vm
));
438 #if ENABLE(OBJECT_MARK_LOGGING)
439 double gcStartTime
= WTF::currentTime();
444 // We gather conservative roots before clearing mark bits because conservative
445 // gathering uses the mark bits to determine whether a reference is valid.
446 ConservativeRoots
machineThreadRoots(&m_objectSpace
.blocks(), &m_storageSpace
);
447 m_jitStubRoutines
.clearMarks();
449 GCPHASE(GatherConservativeRoots
);
450 m_machineThreads
.gatherConservativeRoots(machineThreadRoots
, &dummy
);
453 ConservativeRoots
stackRoots(&m_objectSpace
.blocks(), &m_storageSpace
);
454 m_dfgCodeBlocks
.clearMarks();
456 GCPHASE(GatherStackRoots
);
457 stack().gatherConservativeRoots(
458 stackRoots
, m_jitStubRoutines
, m_dfgCodeBlocks
);
462 ConservativeRoots
scratchBufferRoots(&m_objectSpace
.blocks(), &m_storageSpace
);
464 GCPHASE(GatherScratchBufferRoots
);
465 m_vm
->gatherConservativeRoots(scratchBufferRoots
);
471 m_objectSpace
.clearMarks();
474 m_sharedData
.didStartMarking();
475 SlotVisitor
& visitor
= m_slotVisitor
;
477 HeapRootVisitor
heapRootVisitor(visitor
);
480 ParallelModeEnabler
enabler(visitor
);
482 if (m_vm
->codeBlocksBeingCompiled
.size()) {
483 GCPHASE(VisitActiveCodeBlock
);
484 for (size_t i
= 0; i
< m_vm
->codeBlocksBeingCompiled
.size(); i
++)
485 m_vm
->codeBlocksBeingCompiled
[i
]->visitAggregate(visitor
);
488 m_vm
->smallStrings
.visitStrongReferences(visitor
);
491 GCPHASE(VisitMachineRoots
);
492 MARK_LOG_ROOT(visitor
, "C++ Stack");
493 visitor
.append(machineThreadRoots
);
494 visitor
.donateAndDrain();
497 GCPHASE(VisitStackRoots
);
498 MARK_LOG_ROOT(visitor
, "Stack");
499 visitor
.append(stackRoots
);
500 visitor
.donateAndDrain();
504 GCPHASE(VisitScratchBufferRoots
);
505 MARK_LOG_ROOT(visitor
, "Scratch Buffers");
506 visitor
.append(scratchBufferRoots
);
507 visitor
.donateAndDrain();
511 GCPHASE(VisitProtectedObjects
);
512 MARK_LOG_ROOT(visitor
, "Protected Objects");
513 markProtectedObjects(heapRootVisitor
);
514 visitor
.donateAndDrain();
517 GCPHASE(VisitTempSortVectors
);
518 MARK_LOG_ROOT(visitor
, "Temp Sort Vectors");
519 markTempSortVectors(heapRootVisitor
);
520 visitor
.donateAndDrain();
524 GCPHASE(MarkingArgumentBuffers
);
525 if (m_markListSet
&& m_markListSet
->size()) {
526 MARK_LOG_ROOT(visitor
, "Argument Buffers");
527 MarkedArgumentBuffer::markLists(heapRootVisitor
, *m_markListSet
);
528 visitor
.donateAndDrain();
531 if (m_vm
->exception
) {
532 GCPHASE(MarkingException
);
533 MARK_LOG_ROOT(visitor
, "Exceptions");
534 heapRootVisitor
.visit(&m_vm
->exception
);
535 visitor
.donateAndDrain();
539 GCPHASE(VisitStrongHandles
);
540 MARK_LOG_ROOT(visitor
, "Strong Handles");
541 m_handleSet
.visitStrongHandles(heapRootVisitor
);
542 visitor
.donateAndDrain();
546 GCPHASE(HandleStack
);
547 MARK_LOG_ROOT(visitor
, "Handle Stack");
548 m_handleStack
.visit(heapRootVisitor
);
549 visitor
.donateAndDrain();
553 GCPHASE(TraceCodeBlocksAndJITStubRoutines
);
554 MARK_LOG_ROOT(visitor
, "Trace Code Blocks and JIT Stub Routines");
555 m_dfgCodeBlocks
.traceMarkedCodeBlocks(visitor
);
556 m_jitStubRoutines
.traceMarkedStubRoutines(visitor
);
557 visitor
.donateAndDrain();
560 #if ENABLE(PARALLEL_GC)
562 GCPHASE(Convergence
);
563 visitor
.drainFromShared(SlotVisitor::MasterDrain
);
568 // Weak references must be marked last because their liveness depends on
569 // the liveness of the rest of the object graph.
571 GCPHASE(VisitingLiveWeakHandles
);
572 MARK_LOG_ROOT(visitor
, "Live Weak Handles");
574 m_objectSpace
.visitWeakSets(heapRootVisitor
);
575 harvestWeakReferences();
576 if (visitor
.isEmpty())
579 ParallelModeEnabler
enabler(visitor
);
580 visitor
.donateAndDrain();
581 #if ENABLE(PARALLEL_GC)
582 visitor
.drainFromShared(SlotVisitor::MasterDrain
);
588 GCCOUNTER(VisitedValueCount
, visitor
.visitCount());
590 m_sharedData
.didFinishMarking();
591 #if ENABLE(OBJECT_MARK_LOGGING)
592 size_t visitCount
= visitor
.visitCount();
593 #if ENABLE(PARALLEL_GC)
594 visitCount
+= m_sharedData
.childVisitCount();
596 MARK_LOG_MESSAGE2("\nNumber of live Objects after full GC %lu, took %.6f secs\n", visitCount
, WTF::currentTime() - gcStartTime
);
600 #if ENABLE(PARALLEL_GC)
601 m_sharedData
.resetChildren();
603 m_sharedData
.reset();
606 void Heap::copyBackingStores()
608 m_storageSpace
.startedCopying();
609 if (m_storageSpace
.shouldDoCopyPhase()) {
610 m_sharedData
.didStartCopying();
611 m_copyVisitor
.startCopying();
612 m_copyVisitor
.copyFromShared();
613 m_copyVisitor
.doneCopying();
614 // We need to wait for everybody to finish and return their CopiedBlocks
615 // before signaling that the phase is complete.
616 m_storageSpace
.doneCopying();
617 m_sharedData
.didFinishCopying();
619 m_storageSpace
.doneCopying();
622 size_t Heap::objectCount()
624 return m_objectSpace
.objectCount();
629 return m_objectSpace
.size() + m_storageSpace
.size();
632 size_t Heap::capacity()
634 return m_objectSpace
.capacity() + m_storageSpace
.capacity();
637 size_t Heap::protectedGlobalObjectCount()
639 return forEachProtectedCell
<CountIfGlobalObject
>();
642 size_t Heap::globalObjectCount()
644 return m_objectSpace
.forEachLiveCell
<CountIfGlobalObject
>();
647 size_t Heap::protectedObjectCount()
649 return forEachProtectedCell
<Count
>();
652 PassOwnPtr
<TypeCountSet
> Heap::protectedObjectTypeCounts()
654 return forEachProtectedCell
<RecordType
>();
657 PassOwnPtr
<TypeCountSet
> Heap::objectTypeCounts()
659 return m_objectSpace
.forEachLiveCell
<RecordType
>();
662 void Heap::deleteAllCompiledCode()
664 // If JavaScript is running, it's not safe to delete code, since we'll end
665 // up deleting code that is live on the stack.
666 if (m_vm
->dynamicGlobalObject
)
669 for (ExecutableBase
* current
= m_compiledCode
.head(); current
; current
= current
->next()) {
670 if (!current
->isFunctionExecutable())
672 static_cast<FunctionExecutable
*>(current
)->clearCodeIfNotCompiling();
675 m_dfgCodeBlocks
.clearMarks();
676 m_dfgCodeBlocks
.deleteUnmarkedJettisonedCodeBlocks();
679 void Heap::deleteUnmarkedCompiledCode()
681 ExecutableBase
* next
;
682 for (ExecutableBase
* current
= m_compiledCode
.head(); current
; current
= next
) {
683 next
= current
->next();
684 if (isMarked(current
))
687 // We do this because executable memory is limited on some platforms and because
688 // CodeBlock requires eager finalization.
689 ExecutableBase::clearCodeVirtual(current
);
690 m_compiledCode
.remove(current
);
693 m_dfgCodeBlocks
.deleteUnmarkedJettisonedCodeBlocks();
694 m_jitStubRoutines
.deleteUnmarkedJettisonedStubRoutines();
697 void Heap::collectAllGarbage()
699 if (!m_isSafeToCollect
)
705 static double minute
= 60.0;
707 void Heap::collect(SweepToggle sweepToggle
)
709 SamplingRegion
samplingRegion("Garbage Collection");
712 ASSERT(vm()->apiLock().currentThreadIsHoldingLock());
713 RELEASE_ASSERT(vm()->identifierTable
== wtfThreadData().currentIdentifierTable());
714 ASSERT(m_isSafeToCollect
);
715 JAVASCRIPTCORE_GC_BEGIN();
716 RELEASE_ASSERT(m_operationInProgress
== NoOperation
);
717 m_operationInProgress
= Collection
;
720 if (m_activityCallback
)
721 m_activityCallback
->willCollect();
723 m_activityCallback
->willCollect();
724 #endif // PLATFORM(IOS)
726 double lastGCStartTime
= WTF::currentTime();
727 if (lastGCStartTime
- m_lastCodeDiscardTime
> minute
) {
728 deleteAllCompiledCode();
729 m_lastCodeDiscardTime
= WTF::currentTime();
733 GCPHASE(Canonicalize
);
734 m_objectSpace
.canonicalizeCellLivenessData();
740 GCPHASE(ReapingWeakHandles
);
741 m_objectSpace
.reapWeakSets();
744 JAVASCRIPTCORE_GC_MARKED();
747 m_blockSnapshot
.resize(m_objectSpace
.blocks().set().size());
748 MarkedBlockSnapshotFunctor
functor(m_blockSnapshot
);
749 m_objectSpace
.forEachBlock(functor
);
755 GCPHASE(FinalizeUnconditionalFinalizers
);
756 finalizeUnconditionalFinalizers();
760 GCPHASE(finalizeSmallStrings
);
761 m_vm
->smallStrings
.finalizeSmallStrings();
765 GCPHASE(DeleteCodeBlocks
);
766 deleteUnmarkedCompiledCode();
770 GCPHASE(DeleteSourceProviderCaches
);
771 m_vm
->clearSourceProviderCaches();
774 if (sweepToggle
== DoSweep
) {
775 SamplingRegion
samplingRegion("Garbage Collection: Sweeping");
777 m_objectSpace
.sweep();
778 m_objectSpace
.shrink();
781 m_sweeper
->startSweeping(m_blockSnapshot
);
782 m_bytesAbandoned
= 0;
785 GCPHASE(ResetAllocators
);
786 m_objectSpace
.resetAllocators();
789 size_t currentHeapSize
= size();
790 if (Options::gcMaxHeapSize() && currentHeapSize
> Options::gcMaxHeapSize())
791 HeapStatistics::exitWithFailure();
793 m_sizeAfterLastCollect
= currentHeapSize
;
795 // To avoid pathological GC churn in very small and very large heaps, we set
796 // the new allocation limit based on the current size of the heap, with a
798 size_t maxHeapSize
= max(minHeapSize(m_heapType
, m_ramSize
), proportionalHeapSize(currentHeapSize
, m_ramSize
));
799 m_bytesAllocatedLimit
= maxHeapSize
- currentHeapSize
;
801 m_bytesAllocated
= 0;
802 double lastGCEndTime
= WTF::currentTime();
803 m_lastGCLength
= lastGCEndTime
- lastGCStartTime
;
805 if (Options::recordGCPauseTimes())
806 HeapStatistics::recordGCPauseTime(lastGCStartTime
, lastGCEndTime
);
807 RELEASE_ASSERT(m_operationInProgress
== Collection
);
809 m_operationInProgress
= NoOperation
;
810 JAVASCRIPTCORE_GC_END();
812 if (Options::useZombieMode())
813 zombifyDeadObjects();
815 if (Options::objectsAreImmortal())
818 if (Options::showObjectStatistics())
819 HeapStatistics::showObjectStatistics(this);
822 void Heap::markDeadObjects()
824 m_objectSpace
.forEachDeadCell
<MarkObject
>();
827 void Heap::setActivityCallback(PassOwnPtr
<GCActivityCallback
> activityCallback
)
829 m_activityCallback
= activityCallback
;
832 GCActivityCallback
* Heap::activityCallback()
834 return m_activityCallback
.get();
838 void Heap::setIncrementalSweeper(PassOwnPtr
<IncrementalSweeper
> sweeper
)
842 #endif // PLATFORM(IOS)
844 IncrementalSweeper
* Heap::sweeper()
846 return m_sweeper
.get();
849 void Heap::setGarbageCollectionTimerEnabled(bool enable
)
852 if (m_activityCallback
)
853 m_activityCallback
->setEnabled(enable
);
855 activityCallback()->setEnabled(enable
);
856 #endif // PLATFORM(IOS)
859 void Heap::didAllocate(size_t bytes
)
862 if (m_activityCallback
)
863 m_activityCallback
->didAllocate(m_bytesAllocated
+ m_bytesAbandoned
);
865 m_activityCallback
->didAllocate(m_bytesAllocated
+ m_bytesAbandoned
);
866 #endif // PLATFORM(IOS)
867 m_bytesAllocated
+= bytes
;
870 bool Heap::isValidAllocation(size_t)
872 if (!isValidThreadState(m_vm
))
875 if (m_operationInProgress
!= NoOperation
)
881 void Heap::addFinalizer(JSCell
* cell
, Finalizer finalizer
)
883 WeakSet::allocate(cell
, &m_finalizerOwner
, reinterpret_cast<void*>(finalizer
)); // Balanced by FinalizerOwner::finalize().
886 void Heap::FinalizerOwner::finalize(Handle
<Unknown
> handle
, void* context
)
888 HandleSlot slot
= handle
.slot();
889 Finalizer finalizer
= reinterpret_cast<Finalizer
>(context
);
890 finalizer(slot
->asCell());
891 WeakSet::deallocate(WeakImpl::asWeakImpl(slot
));
894 void Heap::addCompiledCode(ExecutableBase
* executable
)
896 m_compiledCode
.append(executable
);
899 class Zombify
: public MarkedBlock::VoidFunctor
{
901 void operator()(JSCell
* cell
)
903 void** current
= reinterpret_cast<void**>(cell
);
905 // We want to maintain zapped-ness because that's how we know if we've called
907 if (cell
->isZapped())
910 void* limit
= static_cast<void*>(reinterpret_cast<char*>(cell
) + MarkedBlock::blockFor(cell
)->cellSize());
911 for (; current
< limit
; current
++)
912 *current
= reinterpret_cast<void*>(0xbbadbeef);
916 void Heap::zombifyDeadObjects()
918 // Sweep now because destructors will crash once we're zombified.
919 m_objectSpace
.sweep();
920 m_objectSpace
.forEachDeadCell
<Zombify
>();