]> git.saurik.com Git - apple/javascriptcore.git/blame - heap/Heap.cpp
JavaScriptCore-7600.1.4.9.tar.gz
[apple/javascriptcore.git] / heap / Heap.cpp
CommitLineData
14957cd0 1/*
81345200 2 * Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2011, 2013, 2014 Apple Inc. All rights reserved.
14957cd0
A
3 * Copyright (C) 2007 Eric Seidel <eric@webkit.org>
4 *
5 * This library is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Lesser General Public
7 * License as published by the Free Software Foundation; either
8 * version 2 of the License, or (at your option) any later version.
9 *
10 * This library is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Lesser General Public License for more details.
14 *
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this library; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
18 *
19 */
20
21#include "config.h"
22#include "Heap.h"
23
24#include "CodeBlock.h"
25#include "ConservativeRoots.h"
93a37866
A
26#include "CopiedSpace.h"
27#include "CopiedSpaceInlines.h"
28#include "CopyVisitorInlines.h"
81345200
A
29#include "DFGWorklist.h"
30#include "DelayedReleaseScope.h"
31#include "EdenGCActivityCallback.h"
32#include "FullGCActivityCallback.h"
14957cd0 33#include "GCActivityCallback.h"
81345200
A
34#include "GCIncomingRefCountedSetInlines.h"
35#include "HeapIterationScope.h"
6fe7ccc8 36#include "HeapRootVisitor.h"
93a37866
A
37#include "HeapStatistics.h"
38#include "IncrementalSweeper.h"
14957cd0 39#include "Interpreter.h"
14957cd0
A
40#include "JSGlobalObject.h"
41#include "JSLock.h"
42#include "JSONObject.h"
81345200
A
43#include "JSCInlines.h"
44#include "JSVirtualMachineInternal.h"
45#include "RecursiveAllocationScope.h"
14957cd0 46#include "Tracing.h"
93a37866 47#include "UnlinkedCodeBlock.h"
81345200 48#include "VM.h"
6fe7ccc8 49#include "WeakSetInlines.h"
14957cd0 50#include <algorithm>
93a37866 51#include <wtf/RAMSize.h>
6fe7ccc8 52#include <wtf/CurrentTime.h>
81345200 53#include <wtf/ProcessID.h>
14957cd0 54
14957cd0 55using namespace std;
6fe7ccc8 56using namespace JSC;
14957cd0
A
57
58namespace JSC {
59
6fe7ccc8 60namespace {
14957cd0 61
93a37866
A
62static const size_t largeHeapSize = 32 * MB; // About 1.5X the average webpage.
63static const size_t smallHeapSize = 1 * MB; // Matches the FastMalloc per-thread cache.
6fe7ccc8 64
81345200
A
65#define ENABLE_GC_LOGGING 0
66
6fe7ccc8
A
67#if ENABLE(GC_LOGGING)
68#if COMPILER(CLANG)
69#define DEFINE_GC_LOGGING_GLOBAL(type, name, arguments) \
70_Pragma("clang diagnostic push") \
71_Pragma("clang diagnostic ignored \"-Wglobal-constructors\"") \
72_Pragma("clang diagnostic ignored \"-Wexit-time-destructors\"") \
73static type name arguments; \
74_Pragma("clang diagnostic pop")
75#else
76#define DEFINE_GC_LOGGING_GLOBAL(type, name, arguments) \
77static type name arguments;
78#endif // COMPILER(CLANG)
79
80struct GCTimer {
81 GCTimer(const char* name)
81345200 82 : m_name(name)
6fe7ccc8
A
83 {
84 }
85 ~GCTimer()
86 {
81345200
A
87 logData(m_allCollectionData, "(All)");
88 logData(m_edenCollectionData, "(Eden)");
89 logData(m_fullCollectionData, "(Full)");
6fe7ccc8 90 }
81345200
A
91
92 struct TimeRecord {
93 TimeRecord()
94 : m_time(0)
95 , m_min(std::numeric_limits<double>::infinity())
96 , m_max(0)
97 , m_count(0)
98 {
99 }
100
101 double m_time;
102 double m_min;
103 double m_max;
104 size_t m_count;
105 };
106
107 void logData(const TimeRecord& data, const char* extra)
108 {
109 dataLogF("[%d] %s %s: %.2lfms (avg. %.2lf, min. %.2lf, max. %.2lf, count %lu)\n",
110 getCurrentProcessID(),
111 m_name, extra,
112 data.m_time * 1000,
113 data.m_time * 1000 / data.m_count,
114 data.m_min * 1000,
115 data.m_max * 1000,
116 data.m_count);
117 }
118
119 void updateData(TimeRecord& data, double duration)
120 {
121 if (duration < data.m_min)
122 data.m_min = duration;
123 if (duration > data.m_max)
124 data.m_max = duration;
125 data.m_count++;
126 data.m_time += duration;
127 }
128
129 void didFinishPhase(HeapOperation collectionType, double duration)
130 {
131 TimeRecord& data = collectionType == EdenCollection ? m_edenCollectionData : m_fullCollectionData;
132 updateData(data, duration);
133 updateData(m_allCollectionData, duration);
134 }
135
136 TimeRecord m_allCollectionData;
137 TimeRecord m_fullCollectionData;
138 TimeRecord m_edenCollectionData;
6fe7ccc8
A
139 const char* m_name;
140};
141
142struct GCTimerScope {
81345200 143 GCTimerScope(GCTimer* timer, HeapOperation collectionType)
6fe7ccc8 144 : m_timer(timer)
81345200
A
145 , m_start(WTF::monotonicallyIncreasingTime())
146 , m_collectionType(collectionType)
6fe7ccc8
A
147 {
148 }
149 ~GCTimerScope()
150 {
81345200
A
151 double delta = WTF::monotonicallyIncreasingTime() - m_start;
152 m_timer->didFinishPhase(m_collectionType, delta);
6fe7ccc8
A
153 }
154 GCTimer* m_timer;
155 double m_start;
81345200 156 HeapOperation m_collectionType;
6fe7ccc8
A
157};
158
159struct GCCounter {
160 GCCounter(const char* name)
161 : m_name(name)
162 , m_count(0)
163 , m_total(0)
164 , m_min(10000000)
165 , m_max(0)
166 {
167 }
168
169 void count(size_t amount)
170 {
171 m_count++;
172 m_total += amount;
173 if (amount < m_min)
174 m_min = amount;
175 if (amount > m_max)
176 m_max = amount;
177 }
178 ~GCCounter()
179 {
81345200 180 dataLogF("[%d] %s: %zu values (avg. %zu, min. %zu, max. %zu)\n", getCurrentProcessID(), m_name, m_total, m_total / m_count, m_min, m_max);
6fe7ccc8
A
181 }
182 const char* m_name;
183 size_t m_count;
184 size_t m_total;
185 size_t m_min;
186 size_t m_max;
187};
188
81345200 189#define GCPHASE(name) DEFINE_GC_LOGGING_GLOBAL(GCTimer, name##Timer, (#name)); GCTimerScope name##TimerScope(&name##Timer, m_operationInProgress)
6fe7ccc8
A
190#define GCCOUNTER(name, value) do { DEFINE_GC_LOGGING_GLOBAL(GCCounter, name##Counter, (#name)); name##Counter.count(value); } while (false)
191
192#else
193
194#define GCPHASE(name) do { } while (false)
6fe7ccc8
A
195#define GCCOUNTER(name, value) do { } while (false)
196#endif
197
93a37866 198static inline size_t minHeapSize(HeapType heapType, size_t ramSize)
6fe7ccc8 199{
93a37866
A
200 if (heapType == LargeHeap)
201 return min(largeHeapSize, ramSize / 4);
6fe7ccc8
A
202 return smallHeapSize;
203}
204
93a37866 205static inline size_t proportionalHeapSize(size_t heapSize, size_t ramSize)
6fe7ccc8 206{
93a37866
A
207 // Try to stay under 1/2 RAM size to leave room for the DOM, rendering, networking, etc.
208 if (heapSize < ramSize / 4)
209 return 2 * heapSize;
210 if (heapSize < ramSize / 2)
211 return 1.5 * heapSize;
212 return 1.25 * heapSize;
6fe7ccc8
A
213}
214
93a37866 215static inline bool isValidSharedInstanceThreadState(VM* vm)
6fe7ccc8 216{
81345200 217 return vm->currentThreadIsHoldingAPILock();
6fe7ccc8
A
218}
219
93a37866 220static inline bool isValidThreadState(VM* vm)
6fe7ccc8 221{
81345200 222 if (vm->atomicStringTable() != wtfThreadData().atomicStringTable())
93a37866 223 return false;
6fe7ccc8 224
93a37866
A
225 if (vm->isSharedInstance() && !isValidSharedInstanceThreadState(vm))
226 return false;
6fe7ccc8 227
93a37866 228 return true;
6fe7ccc8
A
229}
230
93a37866
A
231struct MarkObject : public MarkedBlock::VoidFunctor {
232 void operator()(JSCell* cell)
233 {
234 if (cell->isZapped())
235 return;
236 Heap::heap(cell)->setMarked(cell);
237 }
6fe7ccc8
A
238};
239
93a37866
A
240struct Count : public MarkedBlock::CountFunctor {
241 void operator()(JSCell*) { count(1); }
6fe7ccc8
A
242};
243
93a37866
A
244struct CountIfGlobalObject : MarkedBlock::CountFunctor {
245 void operator()(JSCell* cell) {
246 if (!cell->isObject())
247 return;
248 if (!asObject(cell)->isGlobalObject())
249 return;
250 count(1);
251 }
6fe7ccc8
A
252};
253
6fe7ccc8
A
254class RecordType {
255public:
256 typedef PassOwnPtr<TypeCountSet> ReturnType;
257
258 RecordType();
259 void operator()(JSCell*);
260 ReturnType returnValue();
261
262private:
263 const char* typeName(JSCell*);
264 OwnPtr<TypeCountSet> m_typeCountSet;
265};
266
267inline RecordType::RecordType()
268 : m_typeCountSet(adoptPtr(new TypeCountSet))
269{
270}
271
272inline const char* RecordType::typeName(JSCell* cell)
273{
274 const ClassInfo* info = cell->classInfo();
275 if (!info || !info->className)
276 return "[unknown]";
277 return info->className;
278}
279
280inline void RecordType::operator()(JSCell* cell)
281{
282 m_typeCountSet->add(typeName(cell));
283}
284
285inline PassOwnPtr<TypeCountSet> RecordType::returnValue()
286{
287 return m_typeCountSet.release();
288}
289
290} // anonymous namespace
291
93a37866
A
292Heap::Heap(VM* vm, HeapType heapType)
293 : m_heapType(heapType)
294 , m_ramSize(ramSize())
295 , m_minBytesPerCycle(minHeapSize(m_heapType, m_ramSize))
6fe7ccc8 296 , m_sizeAfterLastCollect(0)
81345200
A
297 , m_sizeAfterLastFullCollect(0)
298 , m_sizeBeforeLastFullCollect(0)
299 , m_sizeAfterLastEdenCollect(0)
300 , m_sizeBeforeLastEdenCollect(0)
301 , m_bytesAllocatedThisCycle(0)
302 , m_bytesAbandonedSinceLastFullCollect(0)
303 , m_maxEdenSize(m_minBytesPerCycle)
304 , m_maxHeapSize(m_minBytesPerCycle)
305 , m_shouldDoFullCollection(false)
306 , m_totalBytesVisited(0)
307 , m_totalBytesCopied(0)
6fe7ccc8 308 , m_operationInProgress(NoOperation)
93a37866 309 , m_blockAllocator()
6fe7ccc8
A
310 , m_objectSpace(this)
311 , m_storageSpace(this)
81345200 312 , m_extraMemoryUsage(0)
14957cd0 313 , m_machineThreads(this)
93a37866 314 , m_sharedData(vm)
6fe7ccc8 315 , m_slotVisitor(m_sharedData)
93a37866
A
316 , m_copyVisitor(m_sharedData)
317 , m_handleSet(vm)
81345200 318 , m_codeBlocks(m_blockAllocator)
6fe7ccc8 319 , m_isSafeToCollect(false)
81345200 320 , m_writeBarrierBuffer(256)
93a37866 321 , m_vm(vm)
81345200
A
322 // We seed with 10ms so that GCActivityCallback::didAllocate doesn't continuously
323 // schedule the timer if we've never done a collection.
324 , m_lastFullGCLength(0.01)
325 , m_lastEdenGCLength(0.01)
326 , m_lastCodeDiscardTime(WTF::monotonicallyIncreasingTime())
327 , m_fullActivityCallback(GCActivityCallback::createFullTimer(this))
328#if ENABLE(GGC)
329 , m_edenActivityCallback(GCActivityCallback::createEdenTimer(this))
330#else
331 , m_edenActivityCallback(m_fullActivityCallback)
332#endif
93a37866 333 , m_sweeper(IncrementalSweeper::create(this))
81345200 334 , m_deferralDepth(0)
14957cd0 335{
6fe7ccc8 336 m_storageSpace.init();
14957cd0
A
337}
338
339Heap::~Heap()
340{
14957cd0
A
341}
342
6fe7ccc8 343bool Heap::isPagedOut(double deadline)
14957cd0 344{
6fe7ccc8
A
345 return m_objectSpace.isPagedOut(deadline) || m_storageSpace.isPagedOut(deadline);
346}
14957cd0 347
93a37866 348// The VM is being destroyed and the collector will never run again.
6fe7ccc8
A
349// Run all pending finalizers now because we won't get another chance.
350void Heap::lastChanceToFinalize()
351{
81345200 352 RELEASE_ASSERT(!m_vm->entryScope);
93a37866 353 RELEASE_ASSERT(m_operationInProgress == NoOperation);
14957cd0 354
93a37866 355 m_objectSpace.lastChanceToFinalize();
14957cd0
A
356}
357
358void Heap::reportExtraMemoryCostSlowCase(size_t cost)
359{
360 // Our frequency of garbage collection tries to balance memory use against speed
361 // by collecting based on the number of newly created values. However, for values
362 // that hold on to a great deal of memory that's not in the form of other JS values,
363 // that is not good enough - in some cases a lot of those objects can pile up and
364 // use crazy amounts of memory without a GC happening. So we track these extra
365 // memory costs. Only unusually large objects are noted, and we only keep track
366 // of this extra cost until the next GC. In garbage collected languages, most values
367 // are either very short lived temporaries, or have extremely long lifetimes. So
368 // if a large value survives one garbage collection, there is not much point to
369 // collecting more frequently as long as it stays alive.
370
6fe7ccc8 371 didAllocate(cost);
81345200 372 collectIfNecessaryOrDefer();
14957cd0
A
373}
374
6fe7ccc8 375void Heap::reportAbandonedObjectGraph()
14957cd0 376{
6fe7ccc8
A
377 // Our clients don't know exactly how much memory they
378 // are abandoning so we just guess for them.
81345200 379 double abandonedBytes = 0.1 * m_sizeAfterLastCollect;
6fe7ccc8
A
380
381 // We want to accelerate the next collection. Because memory has just
382 // been abandoned, the next collection has the potential to
383 // be more profitable. Since allocation is the trigger for collection,
384 // we hasten the next collection by pretending that we've allocated more memory.
385 didAbandon(abandonedBytes);
386}
14957cd0 387
6fe7ccc8
A
388void Heap::didAbandon(size_t bytes)
389{
81345200
A
390 if (m_fullActivityCallback) {
391 m_fullActivityCallback->didAllocate(
392 m_sizeAfterLastCollect - m_sizeAfterLastFullCollect + m_bytesAllocatedThisCycle + m_bytesAbandonedSinceLastFullCollect);
393 }
394 m_bytesAbandonedSinceLastFullCollect += bytes;
14957cd0
A
395}
396
397void Heap::protect(JSValue k)
398{
399 ASSERT(k);
81345200 400 ASSERT(m_vm->currentThreadIsHoldingAPILock());
14957cd0
A
401
402 if (!k.isCell())
403 return;
404
405 m_protectedValues.add(k.asCell());
406}
407
408bool Heap::unprotect(JSValue k)
409{
410 ASSERT(k);
81345200 411 ASSERT(m_vm->currentThreadIsHoldingAPILock());
14957cd0
A
412
413 if (!k.isCell())
414 return false;
415
416 return m_protectedValues.remove(k.asCell());
417}
418
81345200 419void Heap::addReference(JSCell* cell, ArrayBuffer* buffer)
6fe7ccc8 420{
81345200
A
421 if (m_arrayBuffers.addReference(cell, buffer)) {
422 collectIfNecessaryOrDefer();
423 didAllocate(buffer->gcSizeEstimateInBytes());
424 }
14957cd0
A
425}
426
93a37866 427void Heap::pushTempSortVector(Vector<ValueStringPair, 0, UnsafeVectorOverflow>* tempVector)
14957cd0
A
428{
429 m_tempSortingVectors.append(tempVector);
430}
431
93a37866 432void Heap::popTempSortVector(Vector<ValueStringPair, 0, UnsafeVectorOverflow>* tempVector)
14957cd0
A
433{
434 ASSERT_UNUSED(tempVector, tempVector == m_tempSortingVectors.last());
435 m_tempSortingVectors.removeLast();
436}
6fe7ccc8 437
6fe7ccc8
A
438void Heap::harvestWeakReferences()
439{
440 m_slotVisitor.harvestWeakReferences();
441}
442
443void Heap::finalizeUnconditionalFinalizers()
444{
81345200 445 GCPHASE(FinalizeUnconditionalFinalizers);
6fe7ccc8
A
446 m_slotVisitor.finalizeUnconditionalFinalizers();
447}
448
93a37866
A
449inline JSStack& Heap::stack()
450{
451 return m_vm->interpreter->stack();
452}
453
81345200 454void Heap::willStartIterating()
14957cd0 455{
81345200
A
456 m_objectSpace.willStartIterating();
457}
458
459void Heap::didFinishIterating()
460{
461 m_objectSpace.didFinishIterating();
14957cd0
A
462}
463
464void Heap::getConservativeRegisterRoots(HashSet<JSCell*>& roots)
465{
93a37866
A
466 ASSERT(isValidThreadState(m_vm));
467 ConservativeRoots stackRoots(&m_objectSpace.blocks(), &m_storageSpace);
468 stack().gatherConservativeRoots(stackRoots);
469 size_t stackRootCount = stackRoots.size();
470 JSCell** registerRoots = stackRoots.roots();
471 for (size_t i = 0; i < stackRootCount; i++) {
14957cd0 472 setMarked(registerRoots[i]);
81345200 473 registerRoots[i]->setMarked();
14957cd0
A
474 roots.add(registerRoots[i]);
475 }
14957cd0
A
476}
477
81345200 478void Heap::markRoots(double gcStartTime)
14957cd0 479{
81345200 480 SamplingRegion samplingRegion("Garbage Collection: Marking");
14957cd0 481
93a37866
A
482 GCPHASE(MarkRoots);
483 ASSERT(isValidThreadState(m_vm));
484
81345200
A
485#if ENABLE(GGC)
486 Vector<const JSCell*> rememberedSet(m_slotVisitor.markStack().size());
487 m_slotVisitor.markStack().fillVector(rememberedSet);
488#else
489 Vector<const JSCell*> rememberedSet;
93a37866 490#endif
14957cd0 491
81345200
A
492 if (m_operationInProgress == EdenCollection)
493 m_codeBlocks.clearMarksForEdenCollection(rememberedSet);
494 else
495 m_codeBlocks.clearMarksForFullCollection();
496
6fe7ccc8
A
497 // We gather conservative roots before clearing mark bits because conservative
498 // gathering uses the mark bits to determine whether a reference is valid.
81345200
A
499 void* dummy;
500 ConservativeRoots conservativeRoots(&m_objectSpace.blocks(), &m_storageSpace);
501 gatherStackRoots(conservativeRoots, &dummy);
502 gatherJSStackRoots(conservativeRoots);
503 gatherScratchBufferRoots(conservativeRoots);
504
505 sanitizeStackForVM(m_vm);
506
507 clearLivenessData();
508
509 m_sharedData.didStartMarking();
510 m_slotVisitor.didStartMarking();
511 HeapRootVisitor heapRootVisitor(m_slotVisitor);
512
6fe7ccc8 513 {
81345200
A
514 ParallelModeEnabler enabler(m_slotVisitor);
515
516 visitExternalRememberedSet();
517 visitSmallStrings();
518 visitConservativeRoots(conservativeRoots);
519 visitProtectedObjects(heapRootVisitor);
520 visitTempSortVectors(heapRootVisitor);
521 visitArgumentBuffers(heapRootVisitor);
522 visitException(heapRootVisitor);
523 visitStrongHandles(heapRootVisitor);
524 visitHandleStack(heapRootVisitor);
525 traceCodeBlocksAndJITStubRoutines();
526 converge();
6fe7ccc8 527 }
14957cd0 528
81345200
A
529 // Weak references must be marked last because their liveness depends on
530 // the liveness of the rest of the object graph.
531 visitWeakHandles(heapRootVisitor);
532
533 clearRememberedSet(rememberedSet);
534 m_sharedData.didFinishMarking();
535 updateObjectCounts(gcStartTime);
536 resetVisitors();
537}
538
539void Heap::copyBackingStores()
540{
541 if (m_operationInProgress == EdenCollection)
542 m_storageSpace.startedCopying<EdenCollection>();
543 else {
544 ASSERT(m_operationInProgress == FullCollection);
545 m_storageSpace.startedCopying<FullCollection>();
6fe7ccc8 546 }
14957cd0 547
81345200
A
548 if (m_storageSpace.shouldDoCopyPhase()) {
549 m_sharedData.didStartCopying();
550 m_copyVisitor.startCopying();
551 m_copyVisitor.copyFromShared();
552 m_copyVisitor.doneCopying();
553 // We need to wait for everybody to finish and return their CopiedBlocks
554 // before signaling that the phase is complete.
555 m_storageSpace.doneCopying();
556 m_sharedData.didFinishCopying();
557 } else
558 m_storageSpace.doneCopying();
559}
560
561void Heap::gatherStackRoots(ConservativeRoots& roots, void** dummy)
562{
563 GCPHASE(GatherStackRoots);
564 m_jitStubRoutines.clearMarks();
565 m_machineThreads.gatherConservativeRoots(roots, m_jitStubRoutines, m_codeBlocks, dummy);
566}
567
568void Heap::gatherJSStackRoots(ConservativeRoots& roots)
569{
570#if !ENABLE(JIT)
571 GCPHASE(GatherJSStackRoots);
572 stack().gatherConservativeRoots(roots, m_jitStubRoutines, m_codeBlocks);
573#else
574 UNUSED_PARAM(roots);
575#endif
576}
577
578void Heap::gatherScratchBufferRoots(ConservativeRoots& roots)
579{
6fe7ccc8 580#if ENABLE(DFG_JIT)
81345200
A
581 GCPHASE(GatherScratchBufferRoots);
582 m_vm->gatherConservativeRoots(roots);
583#else
584 UNUSED_PARAM(roots);
6fe7ccc8 585#endif
81345200 586}
14957cd0 587
81345200
A
588void Heap::clearLivenessData()
589{
590 GCPHASE(ClearLivenessData);
591 m_objectSpace.clearNewlyAllocated();
592 m_objectSpace.clearMarks();
593}
14957cd0 594
81345200
A
595void Heap::visitExternalRememberedSet()
596{
597#if JSC_OBJC_API_ENABLED
598 scanExternalRememberedSet(*m_vm, m_slotVisitor);
599#endif
600}
6fe7ccc8 601
81345200
A
602void Heap::visitSmallStrings()
603{
604 GCPHASE(VisitSmallStrings);
605 m_vm->smallStrings.visitStrongReferences(m_slotVisitor);
93a37866 606
81345200
A
607 if (Options::logGC() == GCLogging::Verbose)
608 dataLog("Small strings:\n", m_slotVisitor);
93a37866 609
81345200
A
610 m_slotVisitor.donateAndDrain();
611}
93a37866 612
81345200
A
613void Heap::visitConservativeRoots(ConservativeRoots& roots)
614{
615 GCPHASE(VisitConservativeRoots);
616 m_slotVisitor.append(roots);
617
618 if (Options::logGC() == GCLogging::Verbose)
619 dataLog("Conservative Roots:\n", m_slotVisitor);
620
621 m_slotVisitor.donateAndDrain();
622}
623
624void Heap::visitCompilerWorklistWeakReferences()
625{
6fe7ccc8 626#if ENABLE(DFG_JIT)
81345200
A
627 for (auto worklist : m_suspendedCompilerWorklists)
628 worklist->visitWeakReferences(m_slotVisitor, m_codeBlocks);
629
630 if (Options::logGC() == GCLogging::Verbose)
631 dataLog("DFG Worklists:\n", m_slotVisitor);
6fe7ccc8 632#endif
81345200 633}
14957cd0 634
81345200
A
635void Heap::removeDeadCompilerWorklistEntries()
636{
637#if ENABLE(DFG_JIT)
638 GCPHASE(FinalizeDFGWorklists);
639 for (auto worklist : m_suspendedCompilerWorklists)
640 worklist->removeDeadPlans(*m_vm);
641#endif
642}
643
644void Heap::visitProtectedObjects(HeapRootVisitor& heapRootVisitor)
645{
646 GCPHASE(VisitProtectedObjects);
647
648 for (auto& pair : m_protectedValues)
649 heapRootVisitor.visit(&pair.key);
650
651 if (Options::logGC() == GCLogging::Verbose)
652 dataLog("Protected Objects:\n", m_slotVisitor);
653
654 m_slotVisitor.donateAndDrain();
655}
656
657void Heap::visitTempSortVectors(HeapRootVisitor& heapRootVisitor)
658{
659 GCPHASE(VisitTempSortVectors);
660 typedef Vector<Vector<ValueStringPair, 0, UnsafeVectorOverflow>*> VectorOfValueStringVectors;
661
662 for (auto* vector : m_tempSortingVectors) {
663 for (auto& valueStringPair : *vector) {
664 if (valueStringPair.first)
665 heapRootVisitor.visit(&valueStringPair.first);
6fe7ccc8 666 }
81345200
A
667 }
668
669 if (Options::logGC() == GCLogging::Verbose)
670 dataLog("Temp Sort Vectors:\n", m_slotVisitor);
671
672 m_slotVisitor.donateAndDrain();
673}
674
675void Heap::visitArgumentBuffers(HeapRootVisitor& visitor)
676{
677 GCPHASE(MarkingArgumentBuffers);
678 if (!m_markListSet || !m_markListSet->size())
679 return;
680
681 MarkedArgumentBuffer::markLists(visitor, *m_markListSet);
682
683 if (Options::logGC() == GCLogging::Verbose)
684 dataLog("Argument Buffers:\n", m_slotVisitor);
685
686 m_slotVisitor.donateAndDrain();
687}
688
689void Heap::visitException(HeapRootVisitor& visitor)
690{
691 GCPHASE(MarkingException);
692 if (!m_vm->exception())
693 return;
694
695 visitor.visit(m_vm->addressOfException());
696
697 if (Options::logGC() == GCLogging::Verbose)
698 dataLog("Exceptions:\n", m_slotVisitor);
699
700 m_slotVisitor.donateAndDrain();
701}
702
703void Heap::visitStrongHandles(HeapRootVisitor& visitor)
704{
705 GCPHASE(VisitStrongHandles);
706 m_handleSet.visitStrongHandles(visitor);
707
708 if (Options::logGC() == GCLogging::Verbose)
709 dataLog("Strong Handles:\n", m_slotVisitor);
710
711 m_slotVisitor.donateAndDrain();
712}
713
714void Heap::visitHandleStack(HeapRootVisitor& visitor)
715{
716 GCPHASE(VisitHandleStack);
717 m_handleStack.visit(visitor);
718
719 if (Options::logGC() == GCLogging::Verbose)
720 dataLog("Handle Stack:\n", m_slotVisitor);
721
722 m_slotVisitor.donateAndDrain();
723}
724
725void Heap::traceCodeBlocksAndJITStubRoutines()
726{
727 GCPHASE(TraceCodeBlocksAndJITStubRoutines);
728 m_codeBlocks.traceMarked(m_slotVisitor);
729 m_jitStubRoutines.traceMarkedStubRoutines(m_slotVisitor);
730
731 if (Options::logGC() == GCLogging::Verbose)
732 dataLog("Code Blocks and JIT Stub Routines:\n", m_slotVisitor);
733
734 m_slotVisitor.donateAndDrain();
735}
736
737void Heap::converge()
738{
6fe7ccc8 739#if ENABLE(PARALLEL_GC)
81345200
A
740 GCPHASE(Convergence);
741 m_slotVisitor.drainFromShared(SlotVisitor::MasterDrain);
6fe7ccc8 742#endif
81345200 743}
14957cd0 744
81345200
A
745void Heap::visitWeakHandles(HeapRootVisitor& visitor)
746{
747 GCPHASE(VisitingLiveWeakHandles);
748 while (true) {
749 m_objectSpace.visitWeakSets(visitor);
750 harvestWeakReferences();
751 visitCompilerWorklistWeakReferences();
752 m_codeBlocks.traceMarked(m_slotVisitor); // New "executing" code blocks may be discovered.
753 if (m_slotVisitor.isEmpty())
754 break;
755
756 if (Options::logGC() == GCLogging::Verbose)
757 dataLog("Live Weak Handles:\n", m_slotVisitor);
758
759 {
760 ParallelModeEnabler enabler(m_slotVisitor);
761 m_slotVisitor.donateAndDrain();
6fe7ccc8 762#if ENABLE(PARALLEL_GC)
81345200 763 m_slotVisitor.drainFromShared(SlotVisitor::MasterDrain);
6fe7ccc8 764#endif
6fe7ccc8
A
765 }
766 }
81345200 767}
14957cd0 768
81345200
A
769void Heap::clearRememberedSet(Vector<const JSCell*>& rememberedSet)
770{
771#if ENABLE(GGC)
772 GCPHASE(ClearRememberedSet);
773 for (auto* cell : rememberedSet) {
774 MarkedBlock::blockFor(cell)->clearRemembered(cell);
775 const_cast<JSCell*>(cell)->setRemembered(false);
776 }
777#else
778 UNUSED_PARAM(rememberedSet);
779#endif
780}
6fe7ccc8 781
81345200
A
782void Heap::updateObjectCounts(double gcStartTime)
783{
784 GCCOUNTER(VisitedValueCount, m_slotVisitor.visitCount());
785
786 if (Options::logGC() == GCLogging::Verbose) {
787 size_t visitCount = m_slotVisitor.visitCount();
93a37866 788#if ENABLE(PARALLEL_GC)
81345200 789 visitCount += m_sharedData.childVisitCount();
93a37866 790#endif
81345200
A
791 dataLogF("\nNumber of live Objects after GC %lu, took %.6f secs\n", static_cast<unsigned long>(visitCount), WTF::monotonicallyIncreasingTime() - gcStartTime);
792 }
793
794 if (m_operationInProgress == EdenCollection) {
795 m_totalBytesVisited += m_slotVisitor.bytesVisited();
796 m_totalBytesCopied += m_slotVisitor.bytesCopied();
797 } else {
798 ASSERT(m_operationInProgress == FullCollection);
799 m_totalBytesVisited = m_slotVisitor.bytesVisited();
800 m_totalBytesCopied = m_slotVisitor.bytesCopied();
801 }
802#if ENABLE(PARALLEL_GC)
803 m_totalBytesVisited += m_sharedData.childBytesVisited();
804 m_totalBytesCopied += m_sharedData.childBytesCopied();
93a37866 805#endif
81345200 806}
93a37866 807
81345200
A
808void Heap::resetVisitors()
809{
810 m_slotVisitor.reset();
93a37866
A
811#if ENABLE(PARALLEL_GC)
812 m_sharedData.resetChildren();
813#endif
6fe7ccc8 814 m_sharedData.reset();
14957cd0
A
815}
816
81345200 817size_t Heap::objectCount()
14957cd0 818{
81345200 819 return m_objectSpace.objectCount();
14957cd0
A
820}
821
81345200 822size_t Heap::extraSize()
14957cd0 823{
81345200 824 return m_extraMemoryUsage + m_arrayBuffers.size();
14957cd0
A
825}
826
6fe7ccc8 827size_t Heap::size()
14957cd0 828{
81345200 829 return m_objectSpace.size() + m_storageSpace.size() + extraSize();
14957cd0
A
830}
831
6fe7ccc8 832size_t Heap::capacity()
14957cd0 833{
81345200
A
834 return m_objectSpace.capacity() + m_storageSpace.capacity() + extraSize();
835}
836
837size_t Heap::sizeAfterCollect()
838{
839 // The result here may not agree with the normal Heap::size().
840 // This is due to the fact that we only count live copied bytes
841 // rather than all used (including dead) copied bytes, thus it's
842 // always the case that m_totalBytesCopied <= m_storageSpace.size().
843 ASSERT(m_totalBytesCopied <= m_storageSpace.size());
844 return m_totalBytesVisited + m_totalBytesCopied + extraSize();
14957cd0
A
845}
846
6fe7ccc8 847size_t Heap::protectedGlobalObjectCount()
14957cd0 848{
6fe7ccc8 849 return forEachProtectedCell<CountIfGlobalObject>();
14957cd0
A
850}
851
6fe7ccc8 852size_t Heap::globalObjectCount()
14957cd0 853{
81345200
A
854 HeapIterationScope iterationScope(*this);
855 return m_objectSpace.forEachLiveCell<CountIfGlobalObject>(iterationScope);
14957cd0
A
856}
857
6fe7ccc8 858size_t Heap::protectedObjectCount()
14957cd0 859{
6fe7ccc8 860 return forEachProtectedCell<Count>();
14957cd0
A
861}
862
863PassOwnPtr<TypeCountSet> Heap::protectedObjectTypeCounts()
864{
6fe7ccc8 865 return forEachProtectedCell<RecordType>();
14957cd0
A
866}
867
6fe7ccc8 868PassOwnPtr<TypeCountSet> Heap::objectTypeCounts()
14957cd0 869{
81345200
A
870 HeapIterationScope iterationScope(*this);
871 return m_objectSpace.forEachLiveCell<RecordType>(iterationScope);
14957cd0
A
872}
873
93a37866 874void Heap::deleteAllCompiledCode()
14957cd0 875{
93a37866
A
876 // If JavaScript is running, it's not safe to delete code, since we'll end
877 // up deleting code that is live on the stack.
81345200 878 if (m_vm->entryScope)
6fe7ccc8 879 return;
81345200
A
880
881 // If we have things on any worklist, then don't delete code. This is kind of
882 // a weird heuristic. It's definitely not safe to throw away code that is on
883 // the worklist. But this change was made in a hurry so we just avoid throwing
884 // away any code if there is any code on any worklist. I suspect that this
885 // might not actually be too dumb: if there is code on worklists then that
886 // means that we are running some hot JS code right now. Maybe causing
887 // recompilations isn't a good idea.
888#if ENABLE(DFG_JIT)
889 for (unsigned i = DFG::numberOfWorklists(); i--;) {
890 if (DFG::Worklist* worklist = DFG::worklistForIndexOrNull(i)) {
891 if (worklist->isActiveForVM(*vm()))
892 return;
893 }
894 }
895#endif // ENABLE(DFG_JIT)
6fe7ccc8 896
93a37866
A
897 for (ExecutableBase* current = m_compiledCode.head(); current; current = current->next()) {
898 if (!current->isFunctionExecutable())
899 continue;
900 static_cast<FunctionExecutable*>(current)->clearCodeIfNotCompiling();
901 }
902
81345200
A
903 ASSERT(m_operationInProgress == FullCollection || m_operationInProgress == NoOperation);
904 m_codeBlocks.clearMarksForFullCollection();
905 m_codeBlocks.deleteUnmarkedAndUnreferenced(FullCollection);
93a37866
A
906}
907
81345200 908void Heap::deleteAllUnlinkedFunctionCode()
93a37866 909{
81345200
A
910 for (ExecutableBase* current = m_compiledCode.head(); current; current = current->next()) {
911 if (!current->isFunctionExecutable())
912 continue;
913 static_cast<FunctionExecutable*>(current)->clearUnlinkedCodeForRecompilationIfNotCompiling();
914 }
915}
916
917void Heap::clearUnmarkedExecutables()
918{
919 GCPHASE(ClearUnmarkedExecutables);
93a37866
A
920 ExecutableBase* next;
921 for (ExecutableBase* current = m_compiledCode.head(); current; current = next) {
922 next = current->next();
923 if (isMarked(current))
924 continue;
925
926 // We do this because executable memory is limited on some platforms and because
927 // CodeBlock requires eager finalization.
928 ExecutableBase::clearCodeVirtual(current);
929 m_compiledCode.remove(current);
930 }
81345200 931}
93a37866 932
81345200
A
933void Heap::deleteUnmarkedCompiledCode()
934{
935 GCPHASE(DeleteCodeBlocks);
936 clearUnmarkedExecutables();
937 m_codeBlocks.deleteUnmarkedAndUnreferenced(m_operationInProgress);
93a37866 938 m_jitStubRoutines.deleteUnmarkedJettisonedStubRoutines();
14957cd0
A
939}
940
81345200
A
941void Heap::addToRememberedSet(const JSCell* cell)
942{
943 ASSERT(cell);
944 ASSERT(!Options::enableConcurrentJIT() || !isCompilationThread());
945 if (isRemembered(cell))
946 return;
947 MarkedBlock::blockFor(cell)->setRemembered(cell);
948 const_cast<JSCell*>(cell)->setRemembered(true);
949 m_slotVisitor.unconditionallyAppend(const_cast<JSCell*>(cell));
950}
951
14957cd0
A
952void Heap::collectAllGarbage()
953{
6fe7ccc8
A
954 if (!m_isSafeToCollect)
955 return;
956
81345200
A
957 collect(FullCollection);
958
959 SamplingRegion samplingRegion("Garbage Collection: Sweeping");
960 DelayedReleaseScope delayedReleaseScope(m_objectSpace);
961 m_objectSpace.sweep();
962 m_objectSpace.shrink();
14957cd0
A
963}
964
6fe7ccc8
A
965static double minute = 60.0;
966
81345200 967void Heap::collect(HeapOperation collectionType)
14957cd0 968{
81345200
A
969#if ENABLE(ALLOCATION_LOGGING)
970 dataLogF("JSC GC starting collection.\n");
971#endif
972
973 double before = 0;
974 if (Options::logGC()) {
975 dataLog("[GC: ");
976 before = currentTimeMS();
977 }
978
6fe7ccc8
A
979 SamplingRegion samplingRegion("Garbage Collection");
980
81345200
A
981 RELEASE_ASSERT(!m_deferralDepth);
982 ASSERT(vm()->currentThreadIsHoldingAPILock());
983 RELEASE_ASSERT(vm()->atomicStringTable() == wtfThreadData().atomicStringTable());
6fe7ccc8 984 ASSERT(m_isSafeToCollect);
14957cd0 985 JAVASCRIPTCORE_GC_BEGIN();
93a37866 986 RELEASE_ASSERT(m_operationInProgress == NoOperation);
14957cd0 987
81345200
A
988 suspendCompilerThreads();
989 willStartCollection(collectionType);
990 GCPHASE(Collect);
14957cd0 991
81345200 992 double gcStartTime = WTF::monotonicallyIncreasingTime();
14957cd0 993
81345200
A
994 deleteOldCode(gcStartTime);
995 flushOldStructureIDTables();
996 stopAllocation();
997 flushWriteBarrierBuffer();
6fe7ccc8 998
81345200 999 markRoots(gcStartTime);
93a37866
A
1000
1001 JAVASCRIPTCORE_GC_MARKED();
1002
81345200
A
1003 reapWeakHandles();
1004 sweepArrayBuffers();
1005 snapshotMarkedSpace();
93a37866
A
1006
1007 copyBackingStores();
1008
81345200
A
1009 finalizeUnconditionalFinalizers();
1010 removeDeadCompilerWorklistEntries();
1011 deleteUnmarkedCompiledCode();
1012 deleteSourceProviderCaches();
1013 notifyIncrementalSweeper();
1014 rememberCurrentlyExecutingCodeBlocks();
1015
1016 resetAllocators();
1017 updateAllocationLimits();
1018 didFinishCollection(gcStartTime);
1019 resumeCompilerThreads();
1020
1021 if (Options::logGC()) {
1022 double after = currentTimeMS();
1023 dataLog(after - before, " ms]\n");
6fe7ccc8 1024 }
81345200 1025}
93a37866 1026
81345200
A
1027void Heap::suspendCompilerThreads()
1028{
1029#if ENABLE(DFG_JIT)
1030 GCPHASE(SuspendCompilerThreads);
1031 ASSERT(m_suspendedCompilerWorklists.isEmpty());
1032 for (unsigned i = DFG::numberOfWorklists(); i--;) {
1033 if (DFG::Worklist* worklist = DFG::worklistForIndexOrNull(i)) {
1034 m_suspendedCompilerWorklists.append(worklist);
1035 worklist->suspendAllThreads();
1036 }
6fe7ccc8 1037 }
81345200
A
1038#endif
1039}
6fe7ccc8 1040
81345200
A
1041void Heap::willStartCollection(HeapOperation collectionType)
1042{
1043 GCPHASE(StartingCollection);
1044 if (shouldDoFullCollection(collectionType)) {
1045 m_operationInProgress = FullCollection;
1046 m_slotVisitor.clearMarkStack();
1047 m_shouldDoFullCollection = false;
1048 if (Options::logGC())
1049 dataLog("FullCollection, ");
1050 } else {
1051 m_operationInProgress = EdenCollection;
1052 if (Options::logGC())
1053 dataLog("EdenCollection, ");
1054 }
1055 if (m_operationInProgress == FullCollection) {
1056 m_sizeBeforeLastFullCollect = m_sizeAfterLastCollect + m_bytesAllocatedThisCycle;
1057 m_extraMemoryUsage = 0;
1058
1059 if (m_fullActivityCallback)
1060 m_fullActivityCallback->willCollect();
1061 } else {
1062 ASSERT(m_operationInProgress == EdenCollection);
1063 m_sizeBeforeLastEdenCollect = m_sizeAfterLastCollect + m_bytesAllocatedThisCycle;
6fe7ccc8 1064 }
93a37866 1065
81345200
A
1066 if (m_edenActivityCallback)
1067 m_edenActivityCallback->willCollect();
1068}
1069
1070void Heap::deleteOldCode(double gcStartTime)
1071{
1072 if (m_operationInProgress == EdenCollection)
1073 return;
1074
1075 GCPHASE(DeleteOldCode);
1076 if (gcStartTime - m_lastCodeDiscardTime > minute) {
1077 deleteAllCompiledCode();
1078 m_lastCodeDiscardTime = WTF::monotonicallyIncreasingTime();
6fe7ccc8 1079 }
81345200 1080}
14957cd0 1081
81345200
A
1082void Heap::flushOldStructureIDTables()
1083{
1084 GCPHASE(FlushOldStructureIDTables);
1085 m_structureIDTable.flushOldTables();
1086}
1087
1088void Heap::flushWriteBarrierBuffer()
1089{
1090 GCPHASE(FlushWriteBarrierBuffer);
1091 if (m_operationInProgress == EdenCollection) {
1092 m_writeBarrierBuffer.flush(*this);
1093 return;
14957cd0 1094 }
81345200
A
1095 m_writeBarrierBuffer.reset();
1096}
14957cd0 1097
81345200
A
1098void Heap::stopAllocation()
1099{
1100 GCPHASE(StopAllocation);
1101 m_objectSpace.stopAllocating();
1102 if (m_operationInProgress == FullCollection)
1103 m_storageSpace.didStartFullCollection();
1104}
1105
1106void Heap::reapWeakHandles()
1107{
1108 GCPHASE(ReapingWeakHandles);
1109 m_objectSpace.reapWeakSets();
1110}
93a37866 1111
81345200
A
1112void Heap::sweepArrayBuffers()
1113{
1114 GCPHASE(SweepingArrayBuffers);
1115 m_arrayBuffers.sweep();
1116}
1117
1118struct MarkedBlockSnapshotFunctor : public MarkedBlock::VoidFunctor {
1119 MarkedBlockSnapshotFunctor(Vector<MarkedBlock*>& blocks)
1120 : m_index(0)
1121 , m_blocks(blocks)
93a37866 1122 {
6fe7ccc8 1123 }
81345200
A
1124
1125 void operator()(MarkedBlock* block) { m_blocks[m_index++] = block; }
1126
1127 size_t m_index;
1128 Vector<MarkedBlock*>& m_blocks;
1129};
1130
1131void Heap::snapshotMarkedSpace()
1132{
1133 GCPHASE(SnapshotMarkedSpace);
1134 if (m_operationInProgress != FullCollection)
1135 return;
1136
1137 m_blockSnapshot.resize(m_objectSpace.blocks().set().size());
1138 MarkedBlockSnapshotFunctor functor(m_blockSnapshot);
1139 m_objectSpace.forEachBlock(functor);
1140}
1141
1142void Heap::deleteSourceProviderCaches()
1143{
1144 GCPHASE(DeleteSourceProviderCaches);
1145 m_vm->clearSourceProviderCaches();
1146}
1147
1148void Heap::notifyIncrementalSweeper()
1149{
1150 GCPHASE(NotifyIncrementalSweeper);
1151 if (m_operationInProgress != FullCollection)
1152 return;
1153 m_sweeper->startSweeping(m_blockSnapshot);
1154}
1155
1156void Heap::rememberCurrentlyExecutingCodeBlocks()
1157{
1158 GCPHASE(RememberCurrentlyExecutingCodeBlocks);
1159 m_codeBlocks.rememberCurrentlyExecutingCodeBlocks(this);
1160}
1161
1162void Heap::resetAllocators()
1163{
1164 GCPHASE(ResetAllocators);
1165 m_objectSpace.resetAllocators();
1166}
1167
1168void Heap::updateAllocationLimits()
1169{
1170 GCPHASE(UpdateAllocationLimits);
1171 size_t currentHeapSize = sizeAfterCollect();
93a37866
A
1172 if (Options::gcMaxHeapSize() && currentHeapSize > Options::gcMaxHeapSize())
1173 HeapStatistics::exitWithFailure();
1174
81345200
A
1175 if (m_operationInProgress == FullCollection) {
1176 // To avoid pathological GC churn in very small and very large heaps, we set
1177 // the new allocation limit based on the current size of the heap, with a
1178 // fixed minimum.
1179 m_maxHeapSize = max(minHeapSize(m_heapType, m_ramSize), proportionalHeapSize(currentHeapSize, m_ramSize));
1180 m_maxEdenSize = m_maxHeapSize - currentHeapSize;
1181 m_sizeAfterLastFullCollect = currentHeapSize;
1182 m_bytesAbandonedSinceLastFullCollect = 0;
1183 } else {
1184 ASSERT(currentHeapSize >= m_sizeAfterLastCollect);
1185 m_maxEdenSize = m_maxHeapSize - currentHeapSize;
1186 m_sizeAfterLastEdenCollect = currentHeapSize;
1187 double edenToOldGenerationRatio = (double)m_maxEdenSize / (double)m_maxHeapSize;
1188 double minEdenToOldGenerationRatio = 1.0 / 3.0;
1189 if (edenToOldGenerationRatio < minEdenToOldGenerationRatio)
1190 m_shouldDoFullCollection = true;
1191 m_maxHeapSize += currentHeapSize - m_sizeAfterLastCollect;
1192 m_maxEdenSize = m_maxHeapSize - currentHeapSize;
1193 if (m_fullActivityCallback) {
1194 ASSERT(currentHeapSize >= m_sizeAfterLastFullCollect);
1195 m_fullActivityCallback->didAllocate(currentHeapSize - m_sizeAfterLastFullCollect);
1196 }
1197 }
1198
93a37866 1199 m_sizeAfterLastCollect = currentHeapSize;
81345200 1200 m_bytesAllocatedThisCycle = 0;
93a37866 1201
81345200
A
1202 if (Options::logGC())
1203 dataLog(currentHeapSize / 1024, " kb, ");
1204}
93a37866 1205
81345200
A
1206void Heap::didFinishCollection(double gcStartTime)
1207{
1208 GCPHASE(FinishingCollection);
1209 double gcEndTime = WTF::monotonicallyIncreasingTime();
1210 if (m_operationInProgress == FullCollection)
1211 m_lastFullGCLength = gcEndTime - gcStartTime;
1212 else
1213 m_lastEdenGCLength = gcEndTime - gcStartTime;
93a37866
A
1214
1215 if (Options::recordGCPauseTimes())
81345200
A
1216 HeapStatistics::recordGCPauseTime(gcStartTime, gcEndTime);
1217 RELEASE_ASSERT(m_operationInProgress == EdenCollection || m_operationInProgress == FullCollection);
93a37866 1218
6fe7ccc8 1219 m_operationInProgress = NoOperation;
14957cd0 1220 JAVASCRIPTCORE_GC_END();
6fe7ccc8 1221
93a37866
A
1222 if (Options::useZombieMode())
1223 zombifyDeadObjects();
1224
1225 if (Options::objectsAreImmortal())
1226 markDeadObjects();
1227
1228 if (Options::showObjectStatistics())
1229 HeapStatistics::showObjectStatistics(this);
81345200
A
1230
1231 if (Options::logGC() == GCLogging::Verbose)
1232 GCLogging::dumpObjectGraph(this);
1233}
1234
1235void Heap::resumeCompilerThreads()
1236{
1237#if ENABLE(DFG_JIT)
1238 GCPHASE(ResumeCompilerThreads);
1239 for (auto worklist : m_suspendedCompilerWorklists)
1240 worklist->resumeAllThreads();
1241 m_suspendedCompilerWorklists.clear();
1242#endif
6fe7ccc8 1243}
14957cd0 1244
93a37866 1245void Heap::markDeadObjects()
6fe7ccc8 1246{
81345200
A
1247 HeapIterationScope iterationScope(*this);
1248 m_objectSpace.forEachDeadCell<MarkObject>(iterationScope);
1249}
1250
1251void Heap::setFullActivityCallback(PassRefPtr<FullGCActivityCallback> activityCallback)
1252{
1253 m_fullActivityCallback = activityCallback;
1254}
1255
1256void Heap::setEdenActivityCallback(PassRefPtr<EdenGCActivityCallback> activityCallback)
1257{
1258 m_edenActivityCallback = activityCallback;
14957cd0
A
1259}
1260
81345200 1261GCActivityCallback* Heap::fullActivityCallback()
14957cd0 1262{
81345200 1263 return m_fullActivityCallback.get();
14957cd0
A
1264}
1265
81345200 1266GCActivityCallback* Heap::edenActivityCallback()
14957cd0 1267{
81345200 1268 return m_edenActivityCallback.get();
93a37866
A
1269}
1270
93a37866
A
1271void Heap::setIncrementalSweeper(PassOwnPtr<IncrementalSweeper> sweeper)
1272{
1273 m_sweeper = sweeper;
1274}
93a37866
A
1275
1276IncrementalSweeper* Heap::sweeper()
1277{
1278 return m_sweeper.get();
1279}
1280
1281void Heap::setGarbageCollectionTimerEnabled(bool enable)
1282{
81345200
A
1283 if (m_fullActivityCallback)
1284 m_fullActivityCallback->setEnabled(enable);
1285 if (m_edenActivityCallback)
1286 m_edenActivityCallback->setEnabled(enable);
6fe7ccc8
A
1287}
1288
1289void Heap::didAllocate(size_t bytes)
1290{
81345200
A
1291 if (m_edenActivityCallback)
1292 m_edenActivityCallback->didAllocate(m_bytesAllocatedThisCycle + m_bytesAbandonedSinceLastFullCollect);
1293 m_bytesAllocatedThisCycle += bytes;
6fe7ccc8
A
1294}
1295
93a37866 1296bool Heap::isValidAllocation(size_t)
6fe7ccc8 1297{
93a37866 1298 if (!isValidThreadState(m_vm))
6fe7ccc8
A
1299 return false;
1300
1301 if (m_operationInProgress != NoOperation)
1302 return false;
1303
1304 return true;
1305}
1306
1307void Heap::addFinalizer(JSCell* cell, Finalizer finalizer)
1308{
1309 WeakSet::allocate(cell, &m_finalizerOwner, reinterpret_cast<void*>(finalizer)); // Balanced by FinalizerOwner::finalize().
1310}
1311
1312void Heap::FinalizerOwner::finalize(Handle<Unknown> handle, void* context)
1313{
1314 HandleSlot slot = handle.slot();
1315 Finalizer finalizer = reinterpret_cast<Finalizer>(context);
1316 finalizer(slot->asCell());
1317 WeakSet::deallocate(WeakImpl::asWeakImpl(slot));
1318}
1319
93a37866 1320void Heap::addCompiledCode(ExecutableBase* executable)
6fe7ccc8 1321{
93a37866 1322 m_compiledCode.append(executable);
6fe7ccc8
A
1323}
1324
93a37866
A
1325class Zombify : public MarkedBlock::VoidFunctor {
1326public:
1327 void operator()(JSCell* cell)
1328 {
1329 void** current = reinterpret_cast<void**>(cell);
1330
1331 // We want to maintain zapped-ness because that's how we know if we've called
1332 // the destructor.
1333 if (cell->isZapped())
1334 current++;
1335
1336 void* limit = static_cast<void*>(reinterpret_cast<char*>(cell) + MarkedBlock::blockFor(cell)->cellSize());
1337 for (; current < limit; current++)
81345200 1338 *current = zombifiedBits;
93a37866
A
1339 }
1340};
1341
1342void Heap::zombifyDeadObjects()
6fe7ccc8 1343{
93a37866 1344 // Sweep now because destructors will crash once we're zombified.
81345200
A
1345 {
1346 SamplingRegion samplingRegion("Garbage Collection: Sweeping");
1347 DelayedReleaseScope delayedReleaseScope(m_objectSpace);
1348 m_objectSpace.zombifySweep();
1349 }
1350 HeapIterationScope iterationScope(*this);
1351 m_objectSpace.forEachDeadCell<Zombify>(iterationScope);
1352}
1353
1354void Heap::flushWriteBarrierBuffer(JSCell* cell)
1355{
1356#if ENABLE(GGC)
1357 m_writeBarrierBuffer.flush(*this);
1358 m_writeBarrierBuffer.add(cell);
1359#else
1360 UNUSED_PARAM(cell);
1361#endif
1362}
1363
1364bool Heap::shouldDoFullCollection(HeapOperation requestedCollectionType) const
1365{
1366#if ENABLE(GGC)
1367 if (Options::alwaysDoFullCollection())
1368 return true;
1369
1370 switch (requestedCollectionType) {
1371 case EdenCollection:
1372 return false;
1373 case FullCollection:
1374 return true;
1375 case AnyCollection:
1376 return m_shouldDoFullCollection;
1377 default:
1378 RELEASE_ASSERT_NOT_REACHED();
1379 return false;
1380 }
1381 RELEASE_ASSERT_NOT_REACHED();
1382 return false;
1383#else
1384 UNUSED_PARAM(requestedCollectionType);
1385 return true;
1386#endif
14957cd0
A
1387}
1388
1389} // namespace JSC