]> git.saurik.com Git - apple/javascriptcore.git/blame - heap/Heap.cpp
JavaScriptCore-7600.1.4.16.1.tar.gz
[apple/javascriptcore.git] / heap / Heap.cpp
CommitLineData
14957cd0 1/*
81345200 2 * Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2011, 2013, 2014 Apple Inc. All rights reserved.
14957cd0
A
3 * Copyright (C) 2007 Eric Seidel <eric@webkit.org>
4 *
5 * This library is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Lesser General Public
7 * License as published by the Free Software Foundation; either
8 * version 2 of the License, or (at your option) any later version.
9 *
10 * This library is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Lesser General Public License for more details.
14 *
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this library; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
18 *
19 */
20
21#include "config.h"
22#include "Heap.h"
23
24#include "CodeBlock.h"
25#include "ConservativeRoots.h"
93a37866
A
26#include "CopiedSpace.h"
27#include "CopiedSpaceInlines.h"
28#include "CopyVisitorInlines.h"
81345200
A
29#include "DFGWorklist.h"
30#include "DelayedReleaseScope.h"
31#include "EdenGCActivityCallback.h"
32#include "FullGCActivityCallback.h"
14957cd0 33#include "GCActivityCallback.h"
81345200
A
34#include "GCIncomingRefCountedSetInlines.h"
35#include "HeapIterationScope.h"
6fe7ccc8 36#include "HeapRootVisitor.h"
93a37866
A
37#include "HeapStatistics.h"
38#include "IncrementalSweeper.h"
14957cd0 39#include "Interpreter.h"
14957cd0
A
40#include "JSGlobalObject.h"
41#include "JSLock.h"
42#include "JSONObject.h"
81345200
A
43#include "JSCInlines.h"
44#include "JSVirtualMachineInternal.h"
45#include "RecursiveAllocationScope.h"
14957cd0 46#include "Tracing.h"
93a37866 47#include "UnlinkedCodeBlock.h"
81345200 48#include "VM.h"
6fe7ccc8 49#include "WeakSetInlines.h"
14957cd0 50#include <algorithm>
93a37866 51#include <wtf/RAMSize.h>
6fe7ccc8 52#include <wtf/CurrentTime.h>
81345200 53#include <wtf/ProcessID.h>
14957cd0 54
14957cd0 55using namespace std;
6fe7ccc8 56using namespace JSC;
14957cd0
A
57
58namespace JSC {
59
6fe7ccc8 60namespace {
14957cd0 61
93a37866
A
62static const size_t largeHeapSize = 32 * MB; // About 1.5X the average webpage.
63static const size_t smallHeapSize = 1 * MB; // Matches the FastMalloc per-thread cache.
6fe7ccc8 64
81345200
A
65#define ENABLE_GC_LOGGING 0
66
6fe7ccc8
A
67#if ENABLE(GC_LOGGING)
68#if COMPILER(CLANG)
69#define DEFINE_GC_LOGGING_GLOBAL(type, name, arguments) \
70_Pragma("clang diagnostic push") \
71_Pragma("clang diagnostic ignored \"-Wglobal-constructors\"") \
72_Pragma("clang diagnostic ignored \"-Wexit-time-destructors\"") \
73static type name arguments; \
74_Pragma("clang diagnostic pop")
75#else
76#define DEFINE_GC_LOGGING_GLOBAL(type, name, arguments) \
77static type name arguments;
78#endif // COMPILER(CLANG)
79
80struct GCTimer {
81 GCTimer(const char* name)
81345200 82 : m_name(name)
6fe7ccc8
A
83 {
84 }
85 ~GCTimer()
86 {
81345200
A
87 logData(m_allCollectionData, "(All)");
88 logData(m_edenCollectionData, "(Eden)");
89 logData(m_fullCollectionData, "(Full)");
6fe7ccc8 90 }
81345200
A
91
92 struct TimeRecord {
93 TimeRecord()
94 : m_time(0)
95 , m_min(std::numeric_limits<double>::infinity())
96 , m_max(0)
97 , m_count(0)
98 {
99 }
100
101 double m_time;
102 double m_min;
103 double m_max;
104 size_t m_count;
105 };
106
107 void logData(const TimeRecord& data, const char* extra)
108 {
109 dataLogF("[%d] %s %s: %.2lfms (avg. %.2lf, min. %.2lf, max. %.2lf, count %lu)\n",
110 getCurrentProcessID(),
111 m_name, extra,
112 data.m_time * 1000,
113 data.m_time * 1000 / data.m_count,
114 data.m_min * 1000,
115 data.m_max * 1000,
116 data.m_count);
117 }
118
119 void updateData(TimeRecord& data, double duration)
120 {
121 if (duration < data.m_min)
122 data.m_min = duration;
123 if (duration > data.m_max)
124 data.m_max = duration;
125 data.m_count++;
126 data.m_time += duration;
127 }
128
129 void didFinishPhase(HeapOperation collectionType, double duration)
130 {
131 TimeRecord& data = collectionType == EdenCollection ? m_edenCollectionData : m_fullCollectionData;
132 updateData(data, duration);
133 updateData(m_allCollectionData, duration);
134 }
135
136 TimeRecord m_allCollectionData;
137 TimeRecord m_fullCollectionData;
138 TimeRecord m_edenCollectionData;
6fe7ccc8
A
139 const char* m_name;
140};
141
142struct GCTimerScope {
81345200 143 GCTimerScope(GCTimer* timer, HeapOperation collectionType)
6fe7ccc8 144 : m_timer(timer)
81345200
A
145 , m_start(WTF::monotonicallyIncreasingTime())
146 , m_collectionType(collectionType)
6fe7ccc8
A
147 {
148 }
149 ~GCTimerScope()
150 {
81345200
A
151 double delta = WTF::monotonicallyIncreasingTime() - m_start;
152 m_timer->didFinishPhase(m_collectionType, delta);
6fe7ccc8
A
153 }
154 GCTimer* m_timer;
155 double m_start;
81345200 156 HeapOperation m_collectionType;
6fe7ccc8
A
157};
158
159struct GCCounter {
160 GCCounter(const char* name)
161 : m_name(name)
162 , m_count(0)
163 , m_total(0)
164 , m_min(10000000)
165 , m_max(0)
166 {
167 }
168
169 void count(size_t amount)
170 {
171 m_count++;
172 m_total += amount;
173 if (amount < m_min)
174 m_min = amount;
175 if (amount > m_max)
176 m_max = amount;
177 }
178 ~GCCounter()
179 {
81345200 180 dataLogF("[%d] %s: %zu values (avg. %zu, min. %zu, max. %zu)\n", getCurrentProcessID(), m_name, m_total, m_total / m_count, m_min, m_max);
6fe7ccc8
A
181 }
182 const char* m_name;
183 size_t m_count;
184 size_t m_total;
185 size_t m_min;
186 size_t m_max;
187};
188
81345200 189#define GCPHASE(name) DEFINE_GC_LOGGING_GLOBAL(GCTimer, name##Timer, (#name)); GCTimerScope name##TimerScope(&name##Timer, m_operationInProgress)
6fe7ccc8
A
190#define GCCOUNTER(name, value) do { DEFINE_GC_LOGGING_GLOBAL(GCCounter, name##Counter, (#name)); name##Counter.count(value); } while (false)
191
192#else
193
194#define GCPHASE(name) do { } while (false)
6fe7ccc8
A
195#define GCCOUNTER(name, value) do { } while (false)
196#endif
197
93a37866 198static inline size_t minHeapSize(HeapType heapType, size_t ramSize)
6fe7ccc8 199{
93a37866
A
200 if (heapType == LargeHeap)
201 return min(largeHeapSize, ramSize / 4);
6fe7ccc8
A
202 return smallHeapSize;
203}
204
93a37866 205static inline size_t proportionalHeapSize(size_t heapSize, size_t ramSize)
6fe7ccc8 206{
93a37866
A
207 // Try to stay under 1/2 RAM size to leave room for the DOM, rendering, networking, etc.
208 if (heapSize < ramSize / 4)
209 return 2 * heapSize;
210 if (heapSize < ramSize / 2)
211 return 1.5 * heapSize;
212 return 1.25 * heapSize;
6fe7ccc8
A
213}
214
93a37866 215static inline bool isValidSharedInstanceThreadState(VM* vm)
6fe7ccc8 216{
81345200 217 return vm->currentThreadIsHoldingAPILock();
6fe7ccc8
A
218}
219
93a37866 220static inline bool isValidThreadState(VM* vm)
6fe7ccc8 221{
81345200 222 if (vm->atomicStringTable() != wtfThreadData().atomicStringTable())
93a37866 223 return false;
6fe7ccc8 224
93a37866
A
225 if (vm->isSharedInstance() && !isValidSharedInstanceThreadState(vm))
226 return false;
6fe7ccc8 227
93a37866 228 return true;
6fe7ccc8
A
229}
230
93a37866
A
231struct MarkObject : public MarkedBlock::VoidFunctor {
232 void operator()(JSCell* cell)
233 {
234 if (cell->isZapped())
235 return;
236 Heap::heap(cell)->setMarked(cell);
237 }
6fe7ccc8
A
238};
239
93a37866
A
240struct Count : public MarkedBlock::CountFunctor {
241 void operator()(JSCell*) { count(1); }
6fe7ccc8
A
242};
243
93a37866
A
244struct CountIfGlobalObject : MarkedBlock::CountFunctor {
245 void operator()(JSCell* cell) {
246 if (!cell->isObject())
247 return;
248 if (!asObject(cell)->isGlobalObject())
249 return;
250 count(1);
251 }
6fe7ccc8
A
252};
253
6fe7ccc8
A
254class RecordType {
255public:
256 typedef PassOwnPtr<TypeCountSet> ReturnType;
257
258 RecordType();
259 void operator()(JSCell*);
260 ReturnType returnValue();
261
262private:
263 const char* typeName(JSCell*);
264 OwnPtr<TypeCountSet> m_typeCountSet;
265};
266
267inline RecordType::RecordType()
268 : m_typeCountSet(adoptPtr(new TypeCountSet))
269{
270}
271
272inline const char* RecordType::typeName(JSCell* cell)
273{
274 const ClassInfo* info = cell->classInfo();
275 if (!info || !info->className)
276 return "[unknown]";
277 return info->className;
278}
279
280inline void RecordType::operator()(JSCell* cell)
281{
282 m_typeCountSet->add(typeName(cell));
283}
284
285inline PassOwnPtr<TypeCountSet> RecordType::returnValue()
286{
287 return m_typeCountSet.release();
288}
289
290} // anonymous namespace
291
93a37866
A
292Heap::Heap(VM* vm, HeapType heapType)
293 : m_heapType(heapType)
294 , m_ramSize(ramSize())
295 , m_minBytesPerCycle(minHeapSize(m_heapType, m_ramSize))
6fe7ccc8 296 , m_sizeAfterLastCollect(0)
81345200
A
297 , m_sizeAfterLastFullCollect(0)
298 , m_sizeBeforeLastFullCollect(0)
299 , m_sizeAfterLastEdenCollect(0)
300 , m_sizeBeforeLastEdenCollect(0)
301 , m_bytesAllocatedThisCycle(0)
302 , m_bytesAbandonedSinceLastFullCollect(0)
303 , m_maxEdenSize(m_minBytesPerCycle)
304 , m_maxHeapSize(m_minBytesPerCycle)
305 , m_shouldDoFullCollection(false)
306 , m_totalBytesVisited(0)
307 , m_totalBytesCopied(0)
6fe7ccc8 308 , m_operationInProgress(NoOperation)
93a37866 309 , m_blockAllocator()
6fe7ccc8
A
310 , m_objectSpace(this)
311 , m_storageSpace(this)
81345200 312 , m_extraMemoryUsage(0)
14957cd0 313 , m_machineThreads(this)
93a37866 314 , m_sharedData(vm)
6fe7ccc8 315 , m_slotVisitor(m_sharedData)
93a37866
A
316 , m_copyVisitor(m_sharedData)
317 , m_handleSet(vm)
81345200 318 , m_codeBlocks(m_blockAllocator)
6fe7ccc8 319 , m_isSafeToCollect(false)
81345200 320 , m_writeBarrierBuffer(256)
93a37866 321 , m_vm(vm)
81345200
A
322 // We seed with 10ms so that GCActivityCallback::didAllocate doesn't continuously
323 // schedule the timer if we've never done a collection.
324 , m_lastFullGCLength(0.01)
325 , m_lastEdenGCLength(0.01)
326 , m_lastCodeDiscardTime(WTF::monotonicallyIncreasingTime())
327 , m_fullActivityCallback(GCActivityCallback::createFullTimer(this))
328#if ENABLE(GGC)
329 , m_edenActivityCallback(GCActivityCallback::createEdenTimer(this))
330#else
331 , m_edenActivityCallback(m_fullActivityCallback)
332#endif
93a37866 333 , m_sweeper(IncrementalSweeper::create(this))
81345200 334 , m_deferralDepth(0)
14957cd0 335{
6fe7ccc8 336 m_storageSpace.init();
14957cd0
A
337}
338
339Heap::~Heap()
340{
14957cd0
A
341}
342
6fe7ccc8 343bool Heap::isPagedOut(double deadline)
14957cd0 344{
6fe7ccc8
A
345 return m_objectSpace.isPagedOut(deadline) || m_storageSpace.isPagedOut(deadline);
346}
14957cd0 347
93a37866 348// The VM is being destroyed and the collector will never run again.
6fe7ccc8
A
349// Run all pending finalizers now because we won't get another chance.
350void Heap::lastChanceToFinalize()
351{
81345200 352 RELEASE_ASSERT(!m_vm->entryScope);
93a37866 353 RELEASE_ASSERT(m_operationInProgress == NoOperation);
14957cd0 354
93a37866 355 m_objectSpace.lastChanceToFinalize();
14957cd0
A
356}
357
358void Heap::reportExtraMemoryCostSlowCase(size_t cost)
359{
360 // Our frequency of garbage collection tries to balance memory use against speed
361 // by collecting based on the number of newly created values. However, for values
362 // that hold on to a great deal of memory that's not in the form of other JS values,
363 // that is not good enough - in some cases a lot of those objects can pile up and
364 // use crazy amounts of memory without a GC happening. So we track these extra
365 // memory costs. Only unusually large objects are noted, and we only keep track
366 // of this extra cost until the next GC. In garbage collected languages, most values
367 // are either very short lived temporaries, or have extremely long lifetimes. So
368 // if a large value survives one garbage collection, there is not much point to
369 // collecting more frequently as long as it stays alive.
370
6fe7ccc8 371 didAllocate(cost);
81345200 372 collectIfNecessaryOrDefer();
14957cd0
A
373}
374
6fe7ccc8 375void Heap::reportAbandonedObjectGraph()
14957cd0 376{
6fe7ccc8
A
377 // Our clients don't know exactly how much memory they
378 // are abandoning so we just guess for them.
81345200 379 double abandonedBytes = 0.1 * m_sizeAfterLastCollect;
6fe7ccc8
A
380
381 // We want to accelerate the next collection. Because memory has just
382 // been abandoned, the next collection has the potential to
383 // be more profitable. Since allocation is the trigger for collection,
384 // we hasten the next collection by pretending that we've allocated more memory.
385 didAbandon(abandonedBytes);
386}
14957cd0 387
6fe7ccc8
A
388void Heap::didAbandon(size_t bytes)
389{
81345200
A
390 if (m_fullActivityCallback) {
391 m_fullActivityCallback->didAllocate(
392 m_sizeAfterLastCollect - m_sizeAfterLastFullCollect + m_bytesAllocatedThisCycle + m_bytesAbandonedSinceLastFullCollect);
393 }
394 m_bytesAbandonedSinceLastFullCollect += bytes;
14957cd0
A
395}
396
397void Heap::protect(JSValue k)
398{
399 ASSERT(k);
81345200 400 ASSERT(m_vm->currentThreadIsHoldingAPILock());
14957cd0
A
401
402 if (!k.isCell())
403 return;
404
405 m_protectedValues.add(k.asCell());
406}
407
408bool Heap::unprotect(JSValue k)
409{
410 ASSERT(k);
81345200 411 ASSERT(m_vm->currentThreadIsHoldingAPILock());
14957cd0
A
412
413 if (!k.isCell())
414 return false;
415
416 return m_protectedValues.remove(k.asCell());
417}
418
81345200 419void Heap::addReference(JSCell* cell, ArrayBuffer* buffer)
6fe7ccc8 420{
81345200
A
421 if (m_arrayBuffers.addReference(cell, buffer)) {
422 collectIfNecessaryOrDefer();
423 didAllocate(buffer->gcSizeEstimateInBytes());
424 }
14957cd0
A
425}
426
93a37866 427void Heap::pushTempSortVector(Vector<ValueStringPair, 0, UnsafeVectorOverflow>* tempVector)
14957cd0
A
428{
429 m_tempSortingVectors.append(tempVector);
430}
431
93a37866 432void Heap::popTempSortVector(Vector<ValueStringPair, 0, UnsafeVectorOverflow>* tempVector)
14957cd0
A
433{
434 ASSERT_UNUSED(tempVector, tempVector == m_tempSortingVectors.last());
435 m_tempSortingVectors.removeLast();
436}
6fe7ccc8 437
6fe7ccc8
A
438void Heap::harvestWeakReferences()
439{
440 m_slotVisitor.harvestWeakReferences();
441}
442
443void Heap::finalizeUnconditionalFinalizers()
444{
81345200 445 GCPHASE(FinalizeUnconditionalFinalizers);
6fe7ccc8
A
446 m_slotVisitor.finalizeUnconditionalFinalizers();
447}
448
93a37866
A
449inline JSStack& Heap::stack()
450{
451 return m_vm->interpreter->stack();
452}
453
81345200 454void Heap::willStartIterating()
14957cd0 455{
81345200
A
456 m_objectSpace.willStartIterating();
457}
458
459void Heap::didFinishIterating()
460{
461 m_objectSpace.didFinishIterating();
14957cd0
A
462}
463
464void Heap::getConservativeRegisterRoots(HashSet<JSCell*>& roots)
465{
93a37866
A
466 ASSERT(isValidThreadState(m_vm));
467 ConservativeRoots stackRoots(&m_objectSpace.blocks(), &m_storageSpace);
468 stack().gatherConservativeRoots(stackRoots);
469 size_t stackRootCount = stackRoots.size();
470 JSCell** registerRoots = stackRoots.roots();
471 for (size_t i = 0; i < stackRootCount; i++) {
14957cd0 472 setMarked(registerRoots[i]);
81345200 473 registerRoots[i]->setMarked();
14957cd0
A
474 roots.add(registerRoots[i]);
475 }
14957cd0
A
476}
477
81345200 478void Heap::markRoots(double gcStartTime)
14957cd0 479{
81345200 480 SamplingRegion samplingRegion("Garbage Collection: Marking");
14957cd0 481
93a37866
A
482 GCPHASE(MarkRoots);
483 ASSERT(isValidThreadState(m_vm));
484
81345200
A
485#if ENABLE(GGC)
486 Vector<const JSCell*> rememberedSet(m_slotVisitor.markStack().size());
487 m_slotVisitor.markStack().fillVector(rememberedSet);
488#else
489 Vector<const JSCell*> rememberedSet;
93a37866 490#endif
14957cd0 491
81345200
A
492 if (m_operationInProgress == EdenCollection)
493 m_codeBlocks.clearMarksForEdenCollection(rememberedSet);
494 else
495 m_codeBlocks.clearMarksForFullCollection();
496
6fe7ccc8
A
497 // We gather conservative roots before clearing mark bits because conservative
498 // gathering uses the mark bits to determine whether a reference is valid.
81345200 499 void* dummy;
40a37d08 500 ALLOCATE_AND_GET_REGISTER_STATE(registers);
81345200 501 ConservativeRoots conservativeRoots(&m_objectSpace.blocks(), &m_storageSpace);
40a37d08 502 gatherStackRoots(conservativeRoots, &dummy, registers);
81345200
A
503 gatherJSStackRoots(conservativeRoots);
504 gatherScratchBufferRoots(conservativeRoots);
505
506 sanitizeStackForVM(m_vm);
507
508 clearLivenessData();
509
510 m_sharedData.didStartMarking();
511 m_slotVisitor.didStartMarking();
512 HeapRootVisitor heapRootVisitor(m_slotVisitor);
513
6fe7ccc8 514 {
81345200
A
515 ParallelModeEnabler enabler(m_slotVisitor);
516
517 visitExternalRememberedSet();
518 visitSmallStrings();
519 visitConservativeRoots(conservativeRoots);
520 visitProtectedObjects(heapRootVisitor);
521 visitTempSortVectors(heapRootVisitor);
522 visitArgumentBuffers(heapRootVisitor);
523 visitException(heapRootVisitor);
524 visitStrongHandles(heapRootVisitor);
525 visitHandleStack(heapRootVisitor);
526 traceCodeBlocksAndJITStubRoutines();
527 converge();
6fe7ccc8 528 }
14957cd0 529
81345200
A
530 // Weak references must be marked last because their liveness depends on
531 // the liveness of the rest of the object graph.
532 visitWeakHandles(heapRootVisitor);
533
534 clearRememberedSet(rememberedSet);
535 m_sharedData.didFinishMarking();
536 updateObjectCounts(gcStartTime);
537 resetVisitors();
538}
539
540void Heap::copyBackingStores()
541{
542 if (m_operationInProgress == EdenCollection)
543 m_storageSpace.startedCopying<EdenCollection>();
544 else {
545 ASSERT(m_operationInProgress == FullCollection);
546 m_storageSpace.startedCopying<FullCollection>();
6fe7ccc8 547 }
14957cd0 548
81345200
A
549 if (m_storageSpace.shouldDoCopyPhase()) {
550 m_sharedData.didStartCopying();
551 m_copyVisitor.startCopying();
552 m_copyVisitor.copyFromShared();
553 m_copyVisitor.doneCopying();
554 // We need to wait for everybody to finish and return their CopiedBlocks
555 // before signaling that the phase is complete.
556 m_storageSpace.doneCopying();
557 m_sharedData.didFinishCopying();
558 } else
559 m_storageSpace.doneCopying();
560}
561
40a37d08 562void Heap::gatherStackRoots(ConservativeRoots& roots, void** dummy, MachineThreads::RegisterState& registers)
81345200
A
563{
564 GCPHASE(GatherStackRoots);
565 m_jitStubRoutines.clearMarks();
40a37d08 566 m_machineThreads.gatherConservativeRoots(roots, m_jitStubRoutines, m_codeBlocks, dummy, registers);
81345200
A
567}
568
569void Heap::gatherJSStackRoots(ConservativeRoots& roots)
570{
571#if !ENABLE(JIT)
572 GCPHASE(GatherJSStackRoots);
573 stack().gatherConservativeRoots(roots, m_jitStubRoutines, m_codeBlocks);
574#else
575 UNUSED_PARAM(roots);
576#endif
577}
578
579void Heap::gatherScratchBufferRoots(ConservativeRoots& roots)
580{
6fe7ccc8 581#if ENABLE(DFG_JIT)
81345200
A
582 GCPHASE(GatherScratchBufferRoots);
583 m_vm->gatherConservativeRoots(roots);
584#else
585 UNUSED_PARAM(roots);
6fe7ccc8 586#endif
81345200 587}
14957cd0 588
81345200
A
589void Heap::clearLivenessData()
590{
591 GCPHASE(ClearLivenessData);
592 m_objectSpace.clearNewlyAllocated();
593 m_objectSpace.clearMarks();
594}
14957cd0 595
81345200
A
596void Heap::visitExternalRememberedSet()
597{
598#if JSC_OBJC_API_ENABLED
599 scanExternalRememberedSet(*m_vm, m_slotVisitor);
600#endif
601}
6fe7ccc8 602
81345200
A
603void Heap::visitSmallStrings()
604{
605 GCPHASE(VisitSmallStrings);
606 m_vm->smallStrings.visitStrongReferences(m_slotVisitor);
93a37866 607
81345200
A
608 if (Options::logGC() == GCLogging::Verbose)
609 dataLog("Small strings:\n", m_slotVisitor);
93a37866 610
81345200
A
611 m_slotVisitor.donateAndDrain();
612}
93a37866 613
81345200
A
614void Heap::visitConservativeRoots(ConservativeRoots& roots)
615{
616 GCPHASE(VisitConservativeRoots);
617 m_slotVisitor.append(roots);
618
619 if (Options::logGC() == GCLogging::Verbose)
620 dataLog("Conservative Roots:\n", m_slotVisitor);
621
622 m_slotVisitor.donateAndDrain();
623}
624
625void Heap::visitCompilerWorklistWeakReferences()
626{
6fe7ccc8 627#if ENABLE(DFG_JIT)
81345200
A
628 for (auto worklist : m_suspendedCompilerWorklists)
629 worklist->visitWeakReferences(m_slotVisitor, m_codeBlocks);
630
631 if (Options::logGC() == GCLogging::Verbose)
632 dataLog("DFG Worklists:\n", m_slotVisitor);
6fe7ccc8 633#endif
81345200 634}
14957cd0 635
81345200
A
636void Heap::removeDeadCompilerWorklistEntries()
637{
638#if ENABLE(DFG_JIT)
639 GCPHASE(FinalizeDFGWorklists);
640 for (auto worklist : m_suspendedCompilerWorklists)
641 worklist->removeDeadPlans(*m_vm);
642#endif
643}
644
645void Heap::visitProtectedObjects(HeapRootVisitor& heapRootVisitor)
646{
647 GCPHASE(VisitProtectedObjects);
648
649 for (auto& pair : m_protectedValues)
650 heapRootVisitor.visit(&pair.key);
651
652 if (Options::logGC() == GCLogging::Verbose)
653 dataLog("Protected Objects:\n", m_slotVisitor);
654
655 m_slotVisitor.donateAndDrain();
656}
657
658void Heap::visitTempSortVectors(HeapRootVisitor& heapRootVisitor)
659{
660 GCPHASE(VisitTempSortVectors);
661 typedef Vector<Vector<ValueStringPair, 0, UnsafeVectorOverflow>*> VectorOfValueStringVectors;
662
663 for (auto* vector : m_tempSortingVectors) {
664 for (auto& valueStringPair : *vector) {
665 if (valueStringPair.first)
666 heapRootVisitor.visit(&valueStringPair.first);
6fe7ccc8 667 }
81345200
A
668 }
669
670 if (Options::logGC() == GCLogging::Verbose)
671 dataLog("Temp Sort Vectors:\n", m_slotVisitor);
672
673 m_slotVisitor.donateAndDrain();
674}
675
676void Heap::visitArgumentBuffers(HeapRootVisitor& visitor)
677{
678 GCPHASE(MarkingArgumentBuffers);
679 if (!m_markListSet || !m_markListSet->size())
680 return;
681
682 MarkedArgumentBuffer::markLists(visitor, *m_markListSet);
683
684 if (Options::logGC() == GCLogging::Verbose)
685 dataLog("Argument Buffers:\n", m_slotVisitor);
686
687 m_slotVisitor.donateAndDrain();
688}
689
690void Heap::visitException(HeapRootVisitor& visitor)
691{
692 GCPHASE(MarkingException);
693 if (!m_vm->exception())
694 return;
695
696 visitor.visit(m_vm->addressOfException());
697
698 if (Options::logGC() == GCLogging::Verbose)
699 dataLog("Exceptions:\n", m_slotVisitor);
700
701 m_slotVisitor.donateAndDrain();
702}
703
704void Heap::visitStrongHandles(HeapRootVisitor& visitor)
705{
706 GCPHASE(VisitStrongHandles);
707 m_handleSet.visitStrongHandles(visitor);
708
709 if (Options::logGC() == GCLogging::Verbose)
710 dataLog("Strong Handles:\n", m_slotVisitor);
711
712 m_slotVisitor.donateAndDrain();
713}
714
715void Heap::visitHandleStack(HeapRootVisitor& visitor)
716{
717 GCPHASE(VisitHandleStack);
718 m_handleStack.visit(visitor);
719
720 if (Options::logGC() == GCLogging::Verbose)
721 dataLog("Handle Stack:\n", m_slotVisitor);
722
723 m_slotVisitor.donateAndDrain();
724}
725
726void Heap::traceCodeBlocksAndJITStubRoutines()
727{
728 GCPHASE(TraceCodeBlocksAndJITStubRoutines);
729 m_codeBlocks.traceMarked(m_slotVisitor);
730 m_jitStubRoutines.traceMarkedStubRoutines(m_slotVisitor);
731
732 if (Options::logGC() == GCLogging::Verbose)
733 dataLog("Code Blocks and JIT Stub Routines:\n", m_slotVisitor);
734
735 m_slotVisitor.donateAndDrain();
736}
737
738void Heap::converge()
739{
6fe7ccc8 740#if ENABLE(PARALLEL_GC)
81345200
A
741 GCPHASE(Convergence);
742 m_slotVisitor.drainFromShared(SlotVisitor::MasterDrain);
6fe7ccc8 743#endif
81345200 744}
14957cd0 745
81345200
A
746void Heap::visitWeakHandles(HeapRootVisitor& visitor)
747{
748 GCPHASE(VisitingLiveWeakHandles);
749 while (true) {
750 m_objectSpace.visitWeakSets(visitor);
751 harvestWeakReferences();
752 visitCompilerWorklistWeakReferences();
753 m_codeBlocks.traceMarked(m_slotVisitor); // New "executing" code blocks may be discovered.
754 if (m_slotVisitor.isEmpty())
755 break;
756
757 if (Options::logGC() == GCLogging::Verbose)
758 dataLog("Live Weak Handles:\n", m_slotVisitor);
759
760 {
761 ParallelModeEnabler enabler(m_slotVisitor);
762 m_slotVisitor.donateAndDrain();
6fe7ccc8 763#if ENABLE(PARALLEL_GC)
81345200 764 m_slotVisitor.drainFromShared(SlotVisitor::MasterDrain);
6fe7ccc8 765#endif
6fe7ccc8
A
766 }
767 }
81345200 768}
14957cd0 769
81345200
A
770void Heap::clearRememberedSet(Vector<const JSCell*>& rememberedSet)
771{
772#if ENABLE(GGC)
773 GCPHASE(ClearRememberedSet);
774 for (auto* cell : rememberedSet) {
775 MarkedBlock::blockFor(cell)->clearRemembered(cell);
776 const_cast<JSCell*>(cell)->setRemembered(false);
777 }
778#else
779 UNUSED_PARAM(rememberedSet);
780#endif
781}
6fe7ccc8 782
81345200
A
783void Heap::updateObjectCounts(double gcStartTime)
784{
785 GCCOUNTER(VisitedValueCount, m_slotVisitor.visitCount());
786
787 if (Options::logGC() == GCLogging::Verbose) {
788 size_t visitCount = m_slotVisitor.visitCount();
93a37866 789#if ENABLE(PARALLEL_GC)
81345200 790 visitCount += m_sharedData.childVisitCount();
93a37866 791#endif
81345200
A
792 dataLogF("\nNumber of live Objects after GC %lu, took %.6f secs\n", static_cast<unsigned long>(visitCount), WTF::monotonicallyIncreasingTime() - gcStartTime);
793 }
794
795 if (m_operationInProgress == EdenCollection) {
796 m_totalBytesVisited += m_slotVisitor.bytesVisited();
797 m_totalBytesCopied += m_slotVisitor.bytesCopied();
798 } else {
799 ASSERT(m_operationInProgress == FullCollection);
800 m_totalBytesVisited = m_slotVisitor.bytesVisited();
801 m_totalBytesCopied = m_slotVisitor.bytesCopied();
802 }
803#if ENABLE(PARALLEL_GC)
804 m_totalBytesVisited += m_sharedData.childBytesVisited();
805 m_totalBytesCopied += m_sharedData.childBytesCopied();
93a37866 806#endif
81345200 807}
93a37866 808
81345200
A
809void Heap::resetVisitors()
810{
811 m_slotVisitor.reset();
93a37866
A
812#if ENABLE(PARALLEL_GC)
813 m_sharedData.resetChildren();
814#endif
6fe7ccc8 815 m_sharedData.reset();
14957cd0
A
816}
817
81345200 818size_t Heap::objectCount()
14957cd0 819{
81345200 820 return m_objectSpace.objectCount();
14957cd0
A
821}
822
81345200 823size_t Heap::extraSize()
14957cd0 824{
81345200 825 return m_extraMemoryUsage + m_arrayBuffers.size();
14957cd0
A
826}
827
6fe7ccc8 828size_t Heap::size()
14957cd0 829{
81345200 830 return m_objectSpace.size() + m_storageSpace.size() + extraSize();
14957cd0
A
831}
832
6fe7ccc8 833size_t Heap::capacity()
14957cd0 834{
81345200
A
835 return m_objectSpace.capacity() + m_storageSpace.capacity() + extraSize();
836}
837
838size_t Heap::sizeAfterCollect()
839{
840 // The result here may not agree with the normal Heap::size().
841 // This is due to the fact that we only count live copied bytes
842 // rather than all used (including dead) copied bytes, thus it's
843 // always the case that m_totalBytesCopied <= m_storageSpace.size().
844 ASSERT(m_totalBytesCopied <= m_storageSpace.size());
845 return m_totalBytesVisited + m_totalBytesCopied + extraSize();
14957cd0
A
846}
847
6fe7ccc8 848size_t Heap::protectedGlobalObjectCount()
14957cd0 849{
6fe7ccc8 850 return forEachProtectedCell<CountIfGlobalObject>();
14957cd0
A
851}
852
6fe7ccc8 853size_t Heap::globalObjectCount()
14957cd0 854{
81345200
A
855 HeapIterationScope iterationScope(*this);
856 return m_objectSpace.forEachLiveCell<CountIfGlobalObject>(iterationScope);
14957cd0
A
857}
858
6fe7ccc8 859size_t Heap::protectedObjectCount()
14957cd0 860{
6fe7ccc8 861 return forEachProtectedCell<Count>();
14957cd0
A
862}
863
864PassOwnPtr<TypeCountSet> Heap::protectedObjectTypeCounts()
865{
6fe7ccc8 866 return forEachProtectedCell<RecordType>();
14957cd0
A
867}
868
6fe7ccc8 869PassOwnPtr<TypeCountSet> Heap::objectTypeCounts()
14957cd0 870{
81345200
A
871 HeapIterationScope iterationScope(*this);
872 return m_objectSpace.forEachLiveCell<RecordType>(iterationScope);
14957cd0
A
873}
874
93a37866 875void Heap::deleteAllCompiledCode()
14957cd0 876{
93a37866
A
877 // If JavaScript is running, it's not safe to delete code, since we'll end
878 // up deleting code that is live on the stack.
81345200 879 if (m_vm->entryScope)
6fe7ccc8 880 return;
81345200
A
881
882 // If we have things on any worklist, then don't delete code. This is kind of
883 // a weird heuristic. It's definitely not safe to throw away code that is on
884 // the worklist. But this change was made in a hurry so we just avoid throwing
885 // away any code if there is any code on any worklist. I suspect that this
886 // might not actually be too dumb: if there is code on worklists then that
887 // means that we are running some hot JS code right now. Maybe causing
888 // recompilations isn't a good idea.
889#if ENABLE(DFG_JIT)
890 for (unsigned i = DFG::numberOfWorklists(); i--;) {
891 if (DFG::Worklist* worklist = DFG::worklistForIndexOrNull(i)) {
892 if (worklist->isActiveForVM(*vm()))
893 return;
894 }
895 }
896#endif // ENABLE(DFG_JIT)
6fe7ccc8 897
93a37866
A
898 for (ExecutableBase* current = m_compiledCode.head(); current; current = current->next()) {
899 if (!current->isFunctionExecutable())
900 continue;
901 static_cast<FunctionExecutable*>(current)->clearCodeIfNotCompiling();
902 }
903
81345200
A
904 ASSERT(m_operationInProgress == FullCollection || m_operationInProgress == NoOperation);
905 m_codeBlocks.clearMarksForFullCollection();
906 m_codeBlocks.deleteUnmarkedAndUnreferenced(FullCollection);
93a37866
A
907}
908
81345200 909void Heap::deleteAllUnlinkedFunctionCode()
93a37866 910{
81345200
A
911 for (ExecutableBase* current = m_compiledCode.head(); current; current = current->next()) {
912 if (!current->isFunctionExecutable())
913 continue;
914 static_cast<FunctionExecutable*>(current)->clearUnlinkedCodeForRecompilationIfNotCompiling();
915 }
916}
917
918void Heap::clearUnmarkedExecutables()
919{
920 GCPHASE(ClearUnmarkedExecutables);
93a37866
A
921 ExecutableBase* next;
922 for (ExecutableBase* current = m_compiledCode.head(); current; current = next) {
923 next = current->next();
924 if (isMarked(current))
925 continue;
926
927 // We do this because executable memory is limited on some platforms and because
928 // CodeBlock requires eager finalization.
929 ExecutableBase::clearCodeVirtual(current);
930 m_compiledCode.remove(current);
931 }
81345200 932}
93a37866 933
81345200
A
934void Heap::deleteUnmarkedCompiledCode()
935{
936 GCPHASE(DeleteCodeBlocks);
937 clearUnmarkedExecutables();
938 m_codeBlocks.deleteUnmarkedAndUnreferenced(m_operationInProgress);
93a37866 939 m_jitStubRoutines.deleteUnmarkedJettisonedStubRoutines();
14957cd0
A
940}
941
81345200
A
942void Heap::addToRememberedSet(const JSCell* cell)
943{
944 ASSERT(cell);
945 ASSERT(!Options::enableConcurrentJIT() || !isCompilationThread());
946 if (isRemembered(cell))
947 return;
948 MarkedBlock::blockFor(cell)->setRemembered(cell);
949 const_cast<JSCell*>(cell)->setRemembered(true);
950 m_slotVisitor.unconditionallyAppend(const_cast<JSCell*>(cell));
951}
952
14957cd0
A
953void Heap::collectAllGarbage()
954{
6fe7ccc8
A
955 if (!m_isSafeToCollect)
956 return;
957
81345200
A
958 collect(FullCollection);
959
960 SamplingRegion samplingRegion("Garbage Collection: Sweeping");
961 DelayedReleaseScope delayedReleaseScope(m_objectSpace);
962 m_objectSpace.sweep();
963 m_objectSpace.shrink();
14957cd0
A
964}
965
6fe7ccc8
A
966static double minute = 60.0;
967
81345200 968void Heap::collect(HeapOperation collectionType)
14957cd0 969{
81345200
A
970#if ENABLE(ALLOCATION_LOGGING)
971 dataLogF("JSC GC starting collection.\n");
972#endif
973
974 double before = 0;
975 if (Options::logGC()) {
976 dataLog("[GC: ");
977 before = currentTimeMS();
978 }
979
6fe7ccc8
A
980 SamplingRegion samplingRegion("Garbage Collection");
981
81345200
A
982 RELEASE_ASSERT(!m_deferralDepth);
983 ASSERT(vm()->currentThreadIsHoldingAPILock());
984 RELEASE_ASSERT(vm()->atomicStringTable() == wtfThreadData().atomicStringTable());
6fe7ccc8 985 ASSERT(m_isSafeToCollect);
14957cd0 986 JAVASCRIPTCORE_GC_BEGIN();
93a37866 987 RELEASE_ASSERT(m_operationInProgress == NoOperation);
14957cd0 988
81345200
A
989 suspendCompilerThreads();
990 willStartCollection(collectionType);
991 GCPHASE(Collect);
14957cd0 992
81345200 993 double gcStartTime = WTF::monotonicallyIncreasingTime();
14957cd0 994
81345200
A
995 deleteOldCode(gcStartTime);
996 flushOldStructureIDTables();
997 stopAllocation();
998 flushWriteBarrierBuffer();
6fe7ccc8 999
81345200 1000 markRoots(gcStartTime);
93a37866
A
1001
1002 JAVASCRIPTCORE_GC_MARKED();
1003
81345200
A
1004 reapWeakHandles();
1005 sweepArrayBuffers();
1006 snapshotMarkedSpace();
93a37866
A
1007
1008 copyBackingStores();
1009
81345200
A
1010 finalizeUnconditionalFinalizers();
1011 removeDeadCompilerWorklistEntries();
1012 deleteUnmarkedCompiledCode();
1013 deleteSourceProviderCaches();
1014 notifyIncrementalSweeper();
1015 rememberCurrentlyExecutingCodeBlocks();
1016
1017 resetAllocators();
1018 updateAllocationLimits();
1019 didFinishCollection(gcStartTime);
1020 resumeCompilerThreads();
1021
1022 if (Options::logGC()) {
1023 double after = currentTimeMS();
1024 dataLog(after - before, " ms]\n");
6fe7ccc8 1025 }
81345200 1026}
93a37866 1027
81345200
A
1028void Heap::suspendCompilerThreads()
1029{
1030#if ENABLE(DFG_JIT)
1031 GCPHASE(SuspendCompilerThreads);
1032 ASSERT(m_suspendedCompilerWorklists.isEmpty());
1033 for (unsigned i = DFG::numberOfWorklists(); i--;) {
1034 if (DFG::Worklist* worklist = DFG::worklistForIndexOrNull(i)) {
1035 m_suspendedCompilerWorklists.append(worklist);
1036 worklist->suspendAllThreads();
1037 }
6fe7ccc8 1038 }
81345200
A
1039#endif
1040}
6fe7ccc8 1041
81345200
A
1042void Heap::willStartCollection(HeapOperation collectionType)
1043{
1044 GCPHASE(StartingCollection);
1045 if (shouldDoFullCollection(collectionType)) {
1046 m_operationInProgress = FullCollection;
1047 m_slotVisitor.clearMarkStack();
1048 m_shouldDoFullCollection = false;
1049 if (Options::logGC())
1050 dataLog("FullCollection, ");
1051 } else {
1052 m_operationInProgress = EdenCollection;
1053 if (Options::logGC())
1054 dataLog("EdenCollection, ");
1055 }
1056 if (m_operationInProgress == FullCollection) {
1057 m_sizeBeforeLastFullCollect = m_sizeAfterLastCollect + m_bytesAllocatedThisCycle;
1058 m_extraMemoryUsage = 0;
1059
1060 if (m_fullActivityCallback)
1061 m_fullActivityCallback->willCollect();
1062 } else {
1063 ASSERT(m_operationInProgress == EdenCollection);
1064 m_sizeBeforeLastEdenCollect = m_sizeAfterLastCollect + m_bytesAllocatedThisCycle;
6fe7ccc8 1065 }
93a37866 1066
81345200
A
1067 if (m_edenActivityCallback)
1068 m_edenActivityCallback->willCollect();
1069}
1070
1071void Heap::deleteOldCode(double gcStartTime)
1072{
1073 if (m_operationInProgress == EdenCollection)
1074 return;
1075
1076 GCPHASE(DeleteOldCode);
1077 if (gcStartTime - m_lastCodeDiscardTime > minute) {
1078 deleteAllCompiledCode();
1079 m_lastCodeDiscardTime = WTF::monotonicallyIncreasingTime();
6fe7ccc8 1080 }
81345200 1081}
14957cd0 1082
81345200
A
1083void Heap::flushOldStructureIDTables()
1084{
1085 GCPHASE(FlushOldStructureIDTables);
1086 m_structureIDTable.flushOldTables();
1087}
1088
1089void Heap::flushWriteBarrierBuffer()
1090{
1091 GCPHASE(FlushWriteBarrierBuffer);
1092 if (m_operationInProgress == EdenCollection) {
1093 m_writeBarrierBuffer.flush(*this);
1094 return;
14957cd0 1095 }
81345200
A
1096 m_writeBarrierBuffer.reset();
1097}
14957cd0 1098
81345200
A
1099void Heap::stopAllocation()
1100{
1101 GCPHASE(StopAllocation);
1102 m_objectSpace.stopAllocating();
1103 if (m_operationInProgress == FullCollection)
1104 m_storageSpace.didStartFullCollection();
1105}
1106
1107void Heap::reapWeakHandles()
1108{
1109 GCPHASE(ReapingWeakHandles);
1110 m_objectSpace.reapWeakSets();
1111}
93a37866 1112
81345200
A
1113void Heap::sweepArrayBuffers()
1114{
1115 GCPHASE(SweepingArrayBuffers);
1116 m_arrayBuffers.sweep();
1117}
1118
1119struct MarkedBlockSnapshotFunctor : public MarkedBlock::VoidFunctor {
1120 MarkedBlockSnapshotFunctor(Vector<MarkedBlock*>& blocks)
1121 : m_index(0)
1122 , m_blocks(blocks)
93a37866 1123 {
6fe7ccc8 1124 }
81345200
A
1125
1126 void operator()(MarkedBlock* block) { m_blocks[m_index++] = block; }
1127
1128 size_t m_index;
1129 Vector<MarkedBlock*>& m_blocks;
1130};
1131
1132void Heap::snapshotMarkedSpace()
1133{
1134 GCPHASE(SnapshotMarkedSpace);
1135 if (m_operationInProgress != FullCollection)
1136 return;
1137
1138 m_blockSnapshot.resize(m_objectSpace.blocks().set().size());
1139 MarkedBlockSnapshotFunctor functor(m_blockSnapshot);
1140 m_objectSpace.forEachBlock(functor);
1141}
1142
1143void Heap::deleteSourceProviderCaches()
1144{
1145 GCPHASE(DeleteSourceProviderCaches);
1146 m_vm->clearSourceProviderCaches();
1147}
1148
1149void Heap::notifyIncrementalSweeper()
1150{
1151 GCPHASE(NotifyIncrementalSweeper);
1152 if (m_operationInProgress != FullCollection)
1153 return;
1154 m_sweeper->startSweeping(m_blockSnapshot);
1155}
1156
1157void Heap::rememberCurrentlyExecutingCodeBlocks()
1158{
1159 GCPHASE(RememberCurrentlyExecutingCodeBlocks);
1160 m_codeBlocks.rememberCurrentlyExecutingCodeBlocks(this);
1161}
1162
1163void Heap::resetAllocators()
1164{
1165 GCPHASE(ResetAllocators);
1166 m_objectSpace.resetAllocators();
1167}
1168
1169void Heap::updateAllocationLimits()
1170{
1171 GCPHASE(UpdateAllocationLimits);
1172 size_t currentHeapSize = sizeAfterCollect();
93a37866
A
1173 if (Options::gcMaxHeapSize() && currentHeapSize > Options::gcMaxHeapSize())
1174 HeapStatistics::exitWithFailure();
1175
81345200
A
1176 if (m_operationInProgress == FullCollection) {
1177 // To avoid pathological GC churn in very small and very large heaps, we set
1178 // the new allocation limit based on the current size of the heap, with a
1179 // fixed minimum.
1180 m_maxHeapSize = max(minHeapSize(m_heapType, m_ramSize), proportionalHeapSize(currentHeapSize, m_ramSize));
1181 m_maxEdenSize = m_maxHeapSize - currentHeapSize;
1182 m_sizeAfterLastFullCollect = currentHeapSize;
1183 m_bytesAbandonedSinceLastFullCollect = 0;
1184 } else {
1185 ASSERT(currentHeapSize >= m_sizeAfterLastCollect);
1186 m_maxEdenSize = m_maxHeapSize - currentHeapSize;
1187 m_sizeAfterLastEdenCollect = currentHeapSize;
1188 double edenToOldGenerationRatio = (double)m_maxEdenSize / (double)m_maxHeapSize;
1189 double minEdenToOldGenerationRatio = 1.0 / 3.0;
1190 if (edenToOldGenerationRatio < minEdenToOldGenerationRatio)
1191 m_shouldDoFullCollection = true;
1192 m_maxHeapSize += currentHeapSize - m_sizeAfterLastCollect;
1193 m_maxEdenSize = m_maxHeapSize - currentHeapSize;
1194 if (m_fullActivityCallback) {
1195 ASSERT(currentHeapSize >= m_sizeAfterLastFullCollect);
1196 m_fullActivityCallback->didAllocate(currentHeapSize - m_sizeAfterLastFullCollect);
1197 }
1198 }
1199
93a37866 1200 m_sizeAfterLastCollect = currentHeapSize;
81345200 1201 m_bytesAllocatedThisCycle = 0;
93a37866 1202
81345200
A
1203 if (Options::logGC())
1204 dataLog(currentHeapSize / 1024, " kb, ");
1205}
93a37866 1206
81345200
A
1207void Heap::didFinishCollection(double gcStartTime)
1208{
1209 GCPHASE(FinishingCollection);
1210 double gcEndTime = WTF::monotonicallyIncreasingTime();
1211 if (m_operationInProgress == FullCollection)
1212 m_lastFullGCLength = gcEndTime - gcStartTime;
1213 else
1214 m_lastEdenGCLength = gcEndTime - gcStartTime;
93a37866
A
1215
1216 if (Options::recordGCPauseTimes())
81345200
A
1217 HeapStatistics::recordGCPauseTime(gcStartTime, gcEndTime);
1218 RELEASE_ASSERT(m_operationInProgress == EdenCollection || m_operationInProgress == FullCollection);
93a37866 1219
6fe7ccc8 1220 m_operationInProgress = NoOperation;
14957cd0 1221 JAVASCRIPTCORE_GC_END();
6fe7ccc8 1222
93a37866
A
1223 if (Options::useZombieMode())
1224 zombifyDeadObjects();
1225
1226 if (Options::objectsAreImmortal())
1227 markDeadObjects();
1228
1229 if (Options::showObjectStatistics())
1230 HeapStatistics::showObjectStatistics(this);
81345200
A
1231
1232 if (Options::logGC() == GCLogging::Verbose)
1233 GCLogging::dumpObjectGraph(this);
1234}
1235
1236void Heap::resumeCompilerThreads()
1237{
1238#if ENABLE(DFG_JIT)
1239 GCPHASE(ResumeCompilerThreads);
1240 for (auto worklist : m_suspendedCompilerWorklists)
1241 worklist->resumeAllThreads();
1242 m_suspendedCompilerWorklists.clear();
1243#endif
6fe7ccc8 1244}
14957cd0 1245
93a37866 1246void Heap::markDeadObjects()
6fe7ccc8 1247{
81345200
A
1248 HeapIterationScope iterationScope(*this);
1249 m_objectSpace.forEachDeadCell<MarkObject>(iterationScope);
1250}
1251
1252void Heap::setFullActivityCallback(PassRefPtr<FullGCActivityCallback> activityCallback)
1253{
1254 m_fullActivityCallback = activityCallback;
1255}
1256
1257void Heap::setEdenActivityCallback(PassRefPtr<EdenGCActivityCallback> activityCallback)
1258{
1259 m_edenActivityCallback = activityCallback;
14957cd0
A
1260}
1261
81345200 1262GCActivityCallback* Heap::fullActivityCallback()
14957cd0 1263{
81345200 1264 return m_fullActivityCallback.get();
14957cd0
A
1265}
1266
81345200 1267GCActivityCallback* Heap::edenActivityCallback()
14957cd0 1268{
81345200 1269 return m_edenActivityCallback.get();
93a37866
A
1270}
1271
93a37866
A
1272void Heap::setIncrementalSweeper(PassOwnPtr<IncrementalSweeper> sweeper)
1273{
1274 m_sweeper = sweeper;
1275}
93a37866
A
1276
1277IncrementalSweeper* Heap::sweeper()
1278{
1279 return m_sweeper.get();
1280}
1281
1282void Heap::setGarbageCollectionTimerEnabled(bool enable)
1283{
81345200
A
1284 if (m_fullActivityCallback)
1285 m_fullActivityCallback->setEnabled(enable);
1286 if (m_edenActivityCallback)
1287 m_edenActivityCallback->setEnabled(enable);
6fe7ccc8
A
1288}
1289
1290void Heap::didAllocate(size_t bytes)
1291{
81345200
A
1292 if (m_edenActivityCallback)
1293 m_edenActivityCallback->didAllocate(m_bytesAllocatedThisCycle + m_bytesAbandonedSinceLastFullCollect);
1294 m_bytesAllocatedThisCycle += bytes;
6fe7ccc8
A
1295}
1296
93a37866 1297bool Heap::isValidAllocation(size_t)
6fe7ccc8 1298{
93a37866 1299 if (!isValidThreadState(m_vm))
6fe7ccc8
A
1300 return false;
1301
1302 if (m_operationInProgress != NoOperation)
1303 return false;
1304
1305 return true;
1306}
1307
1308void Heap::addFinalizer(JSCell* cell, Finalizer finalizer)
1309{
1310 WeakSet::allocate(cell, &m_finalizerOwner, reinterpret_cast<void*>(finalizer)); // Balanced by FinalizerOwner::finalize().
1311}
1312
1313void Heap::FinalizerOwner::finalize(Handle<Unknown> handle, void* context)
1314{
1315 HandleSlot slot = handle.slot();
1316 Finalizer finalizer = reinterpret_cast<Finalizer>(context);
1317 finalizer(slot->asCell());
1318 WeakSet::deallocate(WeakImpl::asWeakImpl(slot));
1319}
1320
93a37866 1321void Heap::addCompiledCode(ExecutableBase* executable)
6fe7ccc8 1322{
93a37866 1323 m_compiledCode.append(executable);
6fe7ccc8
A
1324}
1325
93a37866
A
1326class Zombify : public MarkedBlock::VoidFunctor {
1327public:
1328 void operator()(JSCell* cell)
1329 {
1330 void** current = reinterpret_cast<void**>(cell);
1331
1332 // We want to maintain zapped-ness because that's how we know if we've called
1333 // the destructor.
1334 if (cell->isZapped())
1335 current++;
1336
1337 void* limit = static_cast<void*>(reinterpret_cast<char*>(cell) + MarkedBlock::blockFor(cell)->cellSize());
1338 for (; current < limit; current++)
81345200 1339 *current = zombifiedBits;
93a37866
A
1340 }
1341};
1342
1343void Heap::zombifyDeadObjects()
6fe7ccc8 1344{
93a37866 1345 // Sweep now because destructors will crash once we're zombified.
81345200
A
1346 {
1347 SamplingRegion samplingRegion("Garbage Collection: Sweeping");
1348 DelayedReleaseScope delayedReleaseScope(m_objectSpace);
1349 m_objectSpace.zombifySweep();
1350 }
1351 HeapIterationScope iterationScope(*this);
1352 m_objectSpace.forEachDeadCell<Zombify>(iterationScope);
1353}
1354
1355void Heap::flushWriteBarrierBuffer(JSCell* cell)
1356{
1357#if ENABLE(GGC)
1358 m_writeBarrierBuffer.flush(*this);
1359 m_writeBarrierBuffer.add(cell);
1360#else
1361 UNUSED_PARAM(cell);
1362#endif
1363}
1364
1365bool Heap::shouldDoFullCollection(HeapOperation requestedCollectionType) const
1366{
1367#if ENABLE(GGC)
1368 if (Options::alwaysDoFullCollection())
1369 return true;
1370
1371 switch (requestedCollectionType) {
1372 case EdenCollection:
1373 return false;
1374 case FullCollection:
1375 return true;
1376 case AnyCollection:
1377 return m_shouldDoFullCollection;
1378 default:
1379 RELEASE_ASSERT_NOT_REACHED();
1380 return false;
1381 }
1382 RELEASE_ASSERT_NOT_REACHED();
1383 return false;
1384#else
1385 UNUSED_PARAM(requestedCollectionType);
1386 return true;
1387#endif
14957cd0
A
1388}
1389
1390} // namespace JSC