]> git.saurik.com Git - apple/javascriptcore.git/blob - heap/Heap.cpp
JavaScriptCore-7601.1.46.3.tar.gz
[apple/javascriptcore.git] / heap / Heap.cpp
1 /*
2 * Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2011, 2013, 2014 Apple Inc. All rights reserved.
3 * Copyright (C) 2007 Eric Seidel <eric@webkit.org>
4 *
5 * This library is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Lesser General Public
7 * License as published by the Free Software Foundation; either
8 * version 2 of the License, or (at your option) any later version.
9 *
10 * This library is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Lesser General Public License for more details.
14 *
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this library; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
18 *
19 */
20
21 #include "config.h"
22 #include "Heap.h"
23
24 #include "CodeBlock.h"
25 #include "ConservativeRoots.h"
26 #include "CopiedSpace.h"
27 #include "CopiedSpaceInlines.h"
28 #include "CopyVisitorInlines.h"
29 #include "DFGWorklist.h"
30 #include "EdenGCActivityCallback.h"
31 #include "FullGCActivityCallback.h"
32 #include "GCActivityCallback.h"
33 #include "GCIncomingRefCountedSetInlines.h"
34 #include "HeapIterationScope.h"
35 #include "HeapRootVisitor.h"
36 #include "HeapStatistics.h"
37 #include "HeapVerifier.h"
38 #include "IncrementalSweeper.h"
39 #include "Interpreter.h"
40 #include "JSGlobalObject.h"
41 #include "JSLock.h"
42 #include "JSONObject.h"
43 #include "JSCInlines.h"
44 #include "JSVirtualMachineInternal.h"
45 #include "RecursiveAllocationScope.h"
46 #include "Tracing.h"
47 #include "TypeProfilerLog.h"
48 #include "UnlinkedCodeBlock.h"
49 #include "VM.h"
50 #include "WeakSetInlines.h"
51 #include <algorithm>
52 #include <wtf/RAMSize.h>
53 #include <wtf/CurrentTime.h>
54 #include <wtf/ProcessID.h>
55
56 using namespace std;
57 using namespace JSC;
58
59 namespace JSC {
60
61 namespace {
62
63 static const size_t largeHeapSize = 32 * MB; // About 1.5X the average webpage.
64 static const size_t smallHeapSize = 1 * MB; // Matches the FastMalloc per-thread cache.
65
66 #define ENABLE_GC_LOGGING 0
67
68 #if ENABLE(GC_LOGGING)
69 #if COMPILER(CLANG)
70 #define DEFINE_GC_LOGGING_GLOBAL(type, name, arguments) \
71 _Pragma("clang diagnostic push") \
72 _Pragma("clang diagnostic ignored \"-Wglobal-constructors\"") \
73 _Pragma("clang diagnostic ignored \"-Wexit-time-destructors\"") \
74 static type name arguments; \
75 _Pragma("clang diagnostic pop")
76 #else
77 #define DEFINE_GC_LOGGING_GLOBAL(type, name, arguments) \
78 static type name arguments;
79 #endif // COMPILER(CLANG)
80
81 struct GCTimer {
82 GCTimer(const char* name)
83 : name(name)
84 {
85 }
86 ~GCTimer()
87 {
88 logData(allCollectionData, "(All)");
89 logData(edenCollectionData, "(Eden)");
90 logData(fullCollectionData, "(Full)");
91 }
92
93 struct TimeRecord {
94 TimeRecord()
95 : time(0)
96 , min(std::numeric_limits<double>::infinity())
97 , max(0)
98 , count(0)
99 {
100 }
101
102 double time;
103 double min;
104 double max;
105 size_t count;
106 };
107
108 void logData(const TimeRecord& data, const char* extra)
109 {
110 dataLogF("[%d] %s (Parent: %s) %s: %.2lfms (avg. %.2lf, min. %.2lf, max. %.2lf, count %lu)\n",
111 getCurrentProcessID(),
112 name,
113 parent ? parent->name : "nullptr",
114 extra,
115 data.time * 1000,
116 data.time * 1000 / data.count,
117 data.min * 1000,
118 data.max * 1000,
119 data.count);
120 }
121
122 void updateData(TimeRecord& data, double duration)
123 {
124 if (duration < data.min)
125 data.min = duration;
126 if (duration > data.max)
127 data.max = duration;
128 data.count++;
129 data.time += duration;
130 }
131
132 void didFinishPhase(HeapOperation collectionType, double duration)
133 {
134 TimeRecord& data = collectionType == EdenCollection ? edenCollectionData : fullCollectionData;
135 updateData(data, duration);
136 updateData(allCollectionData, duration);
137 }
138
139 static GCTimer* s_currentGlobalTimer;
140
141 TimeRecord allCollectionData;
142 TimeRecord fullCollectionData;
143 TimeRecord edenCollectionData;
144 const char* name;
145 GCTimer* parent { nullptr };
146 };
147
148 GCTimer* GCTimer::s_currentGlobalTimer = nullptr;
149
150 struct GCTimerScope {
151 GCTimerScope(GCTimer& timer, HeapOperation collectionType)
152 : timer(timer)
153 , start(WTF::monotonicallyIncreasingTime())
154 , collectionType(collectionType)
155 {
156 timer.parent = GCTimer::s_currentGlobalTimer;
157 GCTimer::s_currentGlobalTimer = &timer;
158 }
159 ~GCTimerScope()
160 {
161 double delta = WTF::monotonicallyIncreasingTime() - start;
162 timer.didFinishPhase(collectionType, delta);
163 GCTimer::s_currentGlobalTimer = timer.parent;
164 }
165 GCTimer& timer;
166 double start;
167 HeapOperation collectionType;
168 };
169
170 struct GCCounter {
171 GCCounter(const char* name)
172 : name(name)
173 , count(0)
174 , total(0)
175 , min(10000000)
176 , max(0)
177 {
178 }
179
180 void add(size_t amount)
181 {
182 count++;
183 total += amount;
184 if (amount < min)
185 min = amount;
186 if (amount > max)
187 max = amount;
188 }
189 ~GCCounter()
190 {
191 dataLogF("[%d] %s: %zu values (avg. %zu, min. %zu, max. %zu)\n", getCurrentProcessID(), name, total, total / count, min, max);
192 }
193 const char* name;
194 size_t count;
195 size_t total;
196 size_t min;
197 size_t max;
198 };
199
200 #define GCPHASE(name) DEFINE_GC_LOGGING_GLOBAL(GCTimer, name##Timer, (#name)); GCTimerScope name##TimerScope(name##Timer, m_operationInProgress)
201 #define GCCOUNTER(name, value) do { DEFINE_GC_LOGGING_GLOBAL(GCCounter, name##Counter, (#name)); name##Counter.add(value); } while (false)
202
203 #else
204
205 #define GCPHASE(name) do { } while (false)
206 #define GCCOUNTER(name, value) do { } while (false)
207 #endif
208
209 static inline size_t minHeapSize(HeapType heapType, size_t ramSize)
210 {
211 if (heapType == LargeHeap)
212 return min(largeHeapSize, ramSize / 4);
213 return smallHeapSize;
214 }
215
216 static inline size_t proportionalHeapSize(size_t heapSize, size_t ramSize)
217 {
218 // Try to stay under 1/2 RAM size to leave room for the DOM, rendering, networking, etc.
219 if (heapSize < ramSize / 4)
220 return 2 * heapSize;
221 if (heapSize < ramSize / 2)
222 return 1.5 * heapSize;
223 return 1.25 * heapSize;
224 }
225
226 static inline bool isValidSharedInstanceThreadState(VM* vm)
227 {
228 return vm->currentThreadIsHoldingAPILock();
229 }
230
231 static inline bool isValidThreadState(VM* vm)
232 {
233 if (vm->atomicStringTable() != wtfThreadData().atomicStringTable())
234 return false;
235
236 if (vm->isSharedInstance() && !isValidSharedInstanceThreadState(vm))
237 return false;
238
239 return true;
240 }
241
242 struct MarkObject : public MarkedBlock::VoidFunctor {
243 inline void visit(JSCell* cell)
244 {
245 if (cell->isZapped())
246 return;
247 Heap::heap(cell)->setMarked(cell);
248 }
249 IterationStatus operator()(JSCell* cell)
250 {
251 visit(cell);
252 return IterationStatus::Continue;
253 }
254 };
255
256 struct Count : public MarkedBlock::CountFunctor {
257 void operator()(JSCell*) { count(1); }
258 };
259
260 struct CountIfGlobalObject : MarkedBlock::CountFunctor {
261 inline void visit(JSCell* cell)
262 {
263 if (!cell->isObject())
264 return;
265 if (!asObject(cell)->isGlobalObject())
266 return;
267 count(1);
268 }
269 IterationStatus operator()(JSCell* cell)
270 {
271 visit(cell);
272 return IterationStatus::Continue;
273 }
274 };
275
276 class RecordType {
277 public:
278 typedef std::unique_ptr<TypeCountSet> ReturnType;
279
280 RecordType();
281 IterationStatus operator()(JSCell*);
282 ReturnType returnValue();
283
284 private:
285 const char* typeName(JSCell*);
286 std::unique_ptr<TypeCountSet> m_typeCountSet;
287 };
288
289 inline RecordType::RecordType()
290 : m_typeCountSet(std::make_unique<TypeCountSet>())
291 {
292 }
293
294 inline const char* RecordType::typeName(JSCell* cell)
295 {
296 const ClassInfo* info = cell->classInfo();
297 if (!info || !info->className)
298 return "[unknown]";
299 return info->className;
300 }
301
302 inline IterationStatus RecordType::operator()(JSCell* cell)
303 {
304 m_typeCountSet->add(typeName(cell));
305 return IterationStatus::Continue;
306 }
307
308 inline std::unique_ptr<TypeCountSet> RecordType::returnValue()
309 {
310 return WTF::move(m_typeCountSet);
311 }
312
313 } // anonymous namespace
314
315 Heap::Heap(VM* vm, HeapType heapType)
316 : m_heapType(heapType)
317 , m_ramSize(Options::forceRAMSize() ? Options::forceRAMSize() : ramSize())
318 , m_minBytesPerCycle(minHeapSize(m_heapType, m_ramSize))
319 , m_sizeAfterLastCollect(0)
320 , m_sizeAfterLastFullCollect(0)
321 , m_sizeBeforeLastFullCollect(0)
322 , m_sizeAfterLastEdenCollect(0)
323 , m_sizeBeforeLastEdenCollect(0)
324 , m_bytesAllocatedThisCycle(0)
325 , m_bytesAbandonedSinceLastFullCollect(0)
326 , m_maxEdenSize(m_minBytesPerCycle)
327 , m_maxHeapSize(m_minBytesPerCycle)
328 , m_shouldDoFullCollection(false)
329 , m_totalBytesVisited(0)
330 , m_totalBytesCopied(0)
331 , m_operationInProgress(NoOperation)
332 , m_objectSpace(this)
333 , m_storageSpace(this)
334 , m_extraMemorySize(0)
335 , m_deprecatedExtraMemorySize(0)
336 , m_machineThreads(this)
337 , m_sharedData(vm)
338 , m_slotVisitor(m_sharedData)
339 , m_copyVisitor(m_sharedData)
340 , m_handleSet(vm)
341 , m_isSafeToCollect(false)
342 , m_writeBarrierBuffer(256)
343 , m_vm(vm)
344 // We seed with 10ms so that GCActivityCallback::didAllocate doesn't continuously
345 // schedule the timer if we've never done a collection.
346 , m_lastFullGCLength(0.01)
347 , m_lastEdenGCLength(0.01)
348 , m_lastCodeDiscardTime(WTF::monotonicallyIncreasingTime())
349 , m_fullActivityCallback(GCActivityCallback::createFullTimer(this))
350 #if ENABLE(GGC)
351 , m_edenActivityCallback(GCActivityCallback::createEdenTimer(this))
352 #else
353 , m_edenActivityCallback(m_fullActivityCallback)
354 #endif
355 #if USE(CF)
356 , m_sweeper(std::make_unique<IncrementalSweeper>(this, CFRunLoopGetCurrent()))
357 #else
358 , m_sweeper(std::make_unique<IncrementalSweeper>(this->vm()))
359 #endif
360 , m_deferralDepth(0)
361 #if USE(CF)
362 , m_delayedReleaseRecursionCount(0)
363 #endif
364 {
365 m_storageSpace.init();
366 if (Options::verifyHeap())
367 m_verifier = std::make_unique<HeapVerifier>(this, Options::numberOfGCCyclesToRecordForVerification());
368 }
369
370 Heap::~Heap()
371 {
372 for (WeakBlock* block : m_logicallyEmptyWeakBlocks)
373 WeakBlock::destroy(block);
374 }
375
376 bool Heap::isPagedOut(double deadline)
377 {
378 return m_objectSpace.isPagedOut(deadline) || m_storageSpace.isPagedOut(deadline);
379 }
380
381 // The VM is being destroyed and the collector will never run again.
382 // Run all pending finalizers now because we won't get another chance.
383 void Heap::lastChanceToFinalize()
384 {
385 RELEASE_ASSERT(!m_vm->entryScope);
386 RELEASE_ASSERT(m_operationInProgress == NoOperation);
387
388 m_objectSpace.lastChanceToFinalize();
389 releaseDelayedReleasedObjects();
390
391 sweepAllLogicallyEmptyWeakBlocks();
392 }
393
394 void Heap::releaseDelayedReleasedObjects()
395 {
396 #if USE(CF)
397 // We need to guard against the case that releasing an object can create more objects due to the
398 // release calling into JS. When those JS call(s) exit and all locks are being dropped we end up
399 // back here and could try to recursively release objects. We guard that with a recursive entry
400 // count. Only the initial call will release objects, recursive calls simple return and let the
401 // the initial call to the function take care of any objects created during release time.
402 // This also means that we need to loop until there are no objects in m_delayedReleaseObjects
403 // and use a temp Vector for the actual releasing.
404 if (!m_delayedReleaseRecursionCount++) {
405 while (!m_delayedReleaseObjects.isEmpty()) {
406 ASSERT(m_vm->currentThreadIsHoldingAPILock());
407
408 Vector<RetainPtr<CFTypeRef>> objectsToRelease = WTF::move(m_delayedReleaseObjects);
409
410 {
411 // We need to drop locks before calling out to arbitrary code.
412 JSLock::DropAllLocks dropAllLocks(m_vm);
413
414 objectsToRelease.clear();
415 }
416 }
417 }
418 m_delayedReleaseRecursionCount--;
419 #endif
420 }
421
422 void Heap::reportExtraMemoryAllocatedSlowCase(size_t size)
423 {
424 didAllocate(size);
425 collectIfNecessaryOrDefer();
426 }
427
428 void Heap::deprecatedReportExtraMemorySlowCase(size_t size)
429 {
430 m_deprecatedExtraMemorySize += size;
431 reportExtraMemoryAllocatedSlowCase(size);
432 }
433
434 void Heap::reportAbandonedObjectGraph()
435 {
436 // Our clients don't know exactly how much memory they
437 // are abandoning so we just guess for them.
438 double abandonedBytes = 0.1 * m_sizeAfterLastCollect;
439
440 // We want to accelerate the next collection. Because memory has just
441 // been abandoned, the next collection has the potential to
442 // be more profitable. Since allocation is the trigger for collection,
443 // we hasten the next collection by pretending that we've allocated more memory.
444 didAbandon(abandonedBytes);
445 }
446
447 void Heap::didAbandon(size_t bytes)
448 {
449 if (m_fullActivityCallback) {
450 m_fullActivityCallback->didAllocate(
451 m_sizeAfterLastCollect - m_sizeAfterLastFullCollect + m_bytesAllocatedThisCycle + m_bytesAbandonedSinceLastFullCollect);
452 }
453 m_bytesAbandonedSinceLastFullCollect += bytes;
454 }
455
456 void Heap::protect(JSValue k)
457 {
458 ASSERT(k);
459 ASSERT(m_vm->currentThreadIsHoldingAPILock());
460
461 if (!k.isCell())
462 return;
463
464 m_protectedValues.add(k.asCell());
465 }
466
467 bool Heap::unprotect(JSValue k)
468 {
469 ASSERT(k);
470 ASSERT(m_vm->currentThreadIsHoldingAPILock());
471
472 if (!k.isCell())
473 return false;
474
475 return m_protectedValues.remove(k.asCell());
476 }
477
478 void Heap::addReference(JSCell* cell, ArrayBuffer* buffer)
479 {
480 if (m_arrayBuffers.addReference(cell, buffer)) {
481 collectIfNecessaryOrDefer();
482 didAllocate(buffer->gcSizeEstimateInBytes());
483 }
484 }
485
486 void Heap::harvestWeakReferences()
487 {
488 m_slotVisitor.harvestWeakReferences();
489 }
490
491 void Heap::finalizeUnconditionalFinalizers()
492 {
493 GCPHASE(FinalizeUnconditionalFinalizers);
494 m_slotVisitor.finalizeUnconditionalFinalizers();
495 }
496
497 inline JSStack& Heap::stack()
498 {
499 return m_vm->interpreter->stack();
500 }
501
502 void Heap::willStartIterating()
503 {
504 m_objectSpace.willStartIterating();
505 }
506
507 void Heap::didFinishIterating()
508 {
509 m_objectSpace.didFinishIterating();
510 }
511
512 void Heap::getConservativeRegisterRoots(HashSet<JSCell*>& roots)
513 {
514 ASSERT(isValidThreadState(m_vm));
515 ConservativeRoots stackRoots(&m_objectSpace.blocks(), &m_storageSpace);
516 stack().gatherConservativeRoots(stackRoots);
517 size_t stackRootCount = stackRoots.size();
518 JSCell** registerRoots = stackRoots.roots();
519 for (size_t i = 0; i < stackRootCount; i++) {
520 setMarked(registerRoots[i]);
521 registerRoots[i]->setMarked();
522 roots.add(registerRoots[i]);
523 }
524 }
525
526 void Heap::markRoots(double gcStartTime, void* stackOrigin, void* stackTop, MachineThreads::RegisterState& calleeSavedRegisters)
527 {
528 SamplingRegion samplingRegion("Garbage Collection: Marking");
529
530 GCPHASE(MarkRoots);
531 ASSERT(isValidThreadState(m_vm));
532
533 #if ENABLE(GGC)
534 Vector<const JSCell*> rememberedSet(m_slotVisitor.markStack().size());
535 m_slotVisitor.markStack().fillVector(rememberedSet);
536 #else
537 Vector<const JSCell*> rememberedSet;
538 #endif
539
540 if (m_operationInProgress == EdenCollection)
541 m_codeBlocks.clearMarksForEdenCollection(rememberedSet);
542 else
543 m_codeBlocks.clearMarksForFullCollection();
544
545 // We gather conservative roots before clearing mark bits because conservative
546 // gathering uses the mark bits to determine whether a reference is valid.
547 ConservativeRoots conservativeRoots(&m_objectSpace.blocks(), &m_storageSpace);
548 gatherStackRoots(conservativeRoots, stackOrigin, stackTop, calleeSavedRegisters);
549 gatherJSStackRoots(conservativeRoots);
550 gatherScratchBufferRoots(conservativeRoots);
551
552 clearLivenessData();
553
554 m_sharedData.didStartMarking();
555 m_slotVisitor.didStartMarking();
556 HeapRootVisitor heapRootVisitor(m_slotVisitor);
557
558 {
559 ParallelModeEnabler enabler(m_slotVisitor);
560
561 visitExternalRememberedSet();
562 visitSmallStrings();
563 visitConservativeRoots(conservativeRoots);
564 visitProtectedObjects(heapRootVisitor);
565 visitArgumentBuffers(heapRootVisitor);
566 visitException(heapRootVisitor);
567 visitStrongHandles(heapRootVisitor);
568 visitHandleStack(heapRootVisitor);
569 traceCodeBlocksAndJITStubRoutines();
570 converge();
571 }
572
573 // Weak references must be marked last because their liveness depends on
574 // the liveness of the rest of the object graph.
575 visitWeakHandles(heapRootVisitor);
576
577 clearRememberedSet(rememberedSet);
578 m_sharedData.didFinishMarking();
579 updateObjectCounts(gcStartTime);
580 resetVisitors();
581 }
582
583 void Heap::copyBackingStores()
584 {
585 GCPHASE(CopyBackingStores);
586 if (m_operationInProgress == EdenCollection)
587 m_storageSpace.startedCopying<EdenCollection>();
588 else {
589 ASSERT(m_operationInProgress == FullCollection);
590 m_storageSpace.startedCopying<FullCollection>();
591 }
592
593 if (m_storageSpace.shouldDoCopyPhase()) {
594 m_sharedData.didStartCopying();
595 m_copyVisitor.startCopying();
596 m_copyVisitor.copyFromShared();
597 m_copyVisitor.doneCopying();
598 // We need to wait for everybody to finish and return their CopiedBlocks
599 // before signaling that the phase is complete.
600 m_storageSpace.doneCopying();
601 m_sharedData.didFinishCopying();
602 } else
603 m_storageSpace.doneCopying();
604 }
605
606 void Heap::gatherStackRoots(ConservativeRoots& roots, void* stackOrigin, void* stackTop, MachineThreads::RegisterState& calleeSavedRegisters)
607 {
608 GCPHASE(GatherStackRoots);
609 m_jitStubRoutines.clearMarks();
610 m_machineThreads.gatherConservativeRoots(roots, m_jitStubRoutines, m_codeBlocks, stackOrigin, stackTop, calleeSavedRegisters);
611 }
612
613 void Heap::gatherJSStackRoots(ConservativeRoots& roots)
614 {
615 #if !ENABLE(JIT)
616 GCPHASE(GatherJSStackRoots);
617 stack().gatherConservativeRoots(roots, m_jitStubRoutines, m_codeBlocks);
618 #else
619 UNUSED_PARAM(roots);
620 #endif
621 }
622
623 void Heap::gatherScratchBufferRoots(ConservativeRoots& roots)
624 {
625 #if ENABLE(DFG_JIT)
626 GCPHASE(GatherScratchBufferRoots);
627 m_vm->gatherConservativeRoots(roots);
628 #else
629 UNUSED_PARAM(roots);
630 #endif
631 }
632
633 void Heap::clearLivenessData()
634 {
635 GCPHASE(ClearLivenessData);
636 m_objectSpace.clearNewlyAllocated();
637 m_objectSpace.clearMarks();
638 }
639
640 void Heap::visitExternalRememberedSet()
641 {
642 #if JSC_OBJC_API_ENABLED
643 scanExternalRememberedSet(*m_vm, m_slotVisitor);
644 #endif
645 }
646
647 void Heap::visitSmallStrings()
648 {
649 GCPHASE(VisitSmallStrings);
650 if (!m_vm->smallStrings.needsToBeVisited(m_operationInProgress))
651 return;
652
653 m_vm->smallStrings.visitStrongReferences(m_slotVisitor);
654 if (Options::logGC() == GCLogging::Verbose)
655 dataLog("Small strings:\n", m_slotVisitor);
656 m_slotVisitor.donateAndDrain();
657 }
658
659 void Heap::visitConservativeRoots(ConservativeRoots& roots)
660 {
661 GCPHASE(VisitConservativeRoots);
662 m_slotVisitor.append(roots);
663
664 if (Options::logGC() == GCLogging::Verbose)
665 dataLog("Conservative Roots:\n", m_slotVisitor);
666
667 m_slotVisitor.donateAndDrain();
668 }
669
670 void Heap::visitCompilerWorklistWeakReferences()
671 {
672 #if ENABLE(DFG_JIT)
673 for (auto worklist : m_suspendedCompilerWorklists)
674 worklist->visitWeakReferences(m_slotVisitor, m_codeBlocks);
675
676 if (Options::logGC() == GCLogging::Verbose)
677 dataLog("DFG Worklists:\n", m_slotVisitor);
678 #endif
679 }
680
681 void Heap::removeDeadCompilerWorklistEntries()
682 {
683 #if ENABLE(DFG_JIT)
684 GCPHASE(FinalizeDFGWorklists);
685 for (auto worklist : m_suspendedCompilerWorklists)
686 worklist->removeDeadPlans(*m_vm);
687 #endif
688 }
689
690 void Heap::visitProtectedObjects(HeapRootVisitor& heapRootVisitor)
691 {
692 GCPHASE(VisitProtectedObjects);
693
694 for (auto& pair : m_protectedValues)
695 heapRootVisitor.visit(&pair.key);
696
697 if (Options::logGC() == GCLogging::Verbose)
698 dataLog("Protected Objects:\n", m_slotVisitor);
699
700 m_slotVisitor.donateAndDrain();
701 }
702
703 void Heap::visitArgumentBuffers(HeapRootVisitor& visitor)
704 {
705 GCPHASE(MarkingArgumentBuffers);
706 if (!m_markListSet || !m_markListSet->size())
707 return;
708
709 MarkedArgumentBuffer::markLists(visitor, *m_markListSet);
710
711 if (Options::logGC() == GCLogging::Verbose)
712 dataLog("Argument Buffers:\n", m_slotVisitor);
713
714 m_slotVisitor.donateAndDrain();
715 }
716
717 void Heap::visitException(HeapRootVisitor& visitor)
718 {
719 GCPHASE(MarkingException);
720 if (!m_vm->exception() && !m_vm->lastException())
721 return;
722
723 visitor.visit(m_vm->addressOfException());
724 visitor.visit(m_vm->addressOfLastException());
725
726 if (Options::logGC() == GCLogging::Verbose)
727 dataLog("Exceptions:\n", m_slotVisitor);
728
729 m_slotVisitor.donateAndDrain();
730 }
731
732 void Heap::visitStrongHandles(HeapRootVisitor& visitor)
733 {
734 GCPHASE(VisitStrongHandles);
735 m_handleSet.visitStrongHandles(visitor);
736
737 if (Options::logGC() == GCLogging::Verbose)
738 dataLog("Strong Handles:\n", m_slotVisitor);
739
740 m_slotVisitor.donateAndDrain();
741 }
742
743 void Heap::visitHandleStack(HeapRootVisitor& visitor)
744 {
745 GCPHASE(VisitHandleStack);
746 m_handleStack.visit(visitor);
747
748 if (Options::logGC() == GCLogging::Verbose)
749 dataLog("Handle Stack:\n", m_slotVisitor);
750
751 m_slotVisitor.donateAndDrain();
752 }
753
754 void Heap::traceCodeBlocksAndJITStubRoutines()
755 {
756 GCPHASE(TraceCodeBlocksAndJITStubRoutines);
757 m_codeBlocks.traceMarked(m_slotVisitor);
758 m_jitStubRoutines.traceMarkedStubRoutines(m_slotVisitor);
759
760 if (Options::logGC() == GCLogging::Verbose)
761 dataLog("Code Blocks and JIT Stub Routines:\n", m_slotVisitor);
762
763 m_slotVisitor.donateAndDrain();
764 }
765
766 void Heap::converge()
767 {
768 #if ENABLE(PARALLEL_GC)
769 GCPHASE(Convergence);
770 m_slotVisitor.drainFromShared(SlotVisitor::MasterDrain);
771 #endif
772 }
773
774 void Heap::visitWeakHandles(HeapRootVisitor& visitor)
775 {
776 GCPHASE(VisitingLiveWeakHandles);
777 while (true) {
778 m_objectSpace.visitWeakSets(visitor);
779 harvestWeakReferences();
780 visitCompilerWorklistWeakReferences();
781 m_codeBlocks.traceMarked(m_slotVisitor); // New "executing" code blocks may be discovered.
782 if (m_slotVisitor.isEmpty())
783 break;
784
785 if (Options::logGC() == GCLogging::Verbose)
786 dataLog("Live Weak Handles:\n", m_slotVisitor);
787
788 {
789 ParallelModeEnabler enabler(m_slotVisitor);
790 m_slotVisitor.donateAndDrain();
791 #if ENABLE(PARALLEL_GC)
792 m_slotVisitor.drainFromShared(SlotVisitor::MasterDrain);
793 #endif
794 }
795 }
796 }
797
798 void Heap::clearRememberedSet(Vector<const JSCell*>& rememberedSet)
799 {
800 #if ENABLE(GGC)
801 GCPHASE(ClearRememberedSet);
802 for (auto* cell : rememberedSet)
803 const_cast<JSCell*>(cell)->setRemembered(false);
804 #else
805 UNUSED_PARAM(rememberedSet);
806 #endif
807 }
808
809 void Heap::updateObjectCounts(double gcStartTime)
810 {
811 GCCOUNTER(VisitedValueCount, m_slotVisitor.visitCount());
812
813 if (Options::logGC() == GCLogging::Verbose) {
814 size_t visitCount = m_slotVisitor.visitCount();
815 #if ENABLE(PARALLEL_GC)
816 visitCount += m_sharedData.childVisitCount();
817 #endif
818 dataLogF("\nNumber of live Objects after GC %lu, took %.6f secs\n", static_cast<unsigned long>(visitCount), WTF::monotonicallyIncreasingTime() - gcStartTime);
819 }
820
821 size_t bytesRemovedFromOldSpaceDueToReallocation =
822 m_storageSpace.takeBytesRemovedFromOldSpaceDueToReallocation();
823
824 if (m_operationInProgress == FullCollection) {
825 m_totalBytesVisited = 0;
826 m_totalBytesCopied = 0;
827 } else
828 m_totalBytesCopied -= bytesRemovedFromOldSpaceDueToReallocation;
829
830 m_totalBytesVisited += m_slotVisitor.bytesVisited();
831 m_totalBytesCopied += m_slotVisitor.bytesCopied();
832 #if ENABLE(PARALLEL_GC)
833 m_totalBytesVisited += m_sharedData.childBytesVisited();
834 m_totalBytesCopied += m_sharedData.childBytesCopied();
835 #endif
836 }
837
838 void Heap::resetVisitors()
839 {
840 m_slotVisitor.reset();
841 #if ENABLE(PARALLEL_GC)
842 m_sharedData.resetChildren();
843 #endif
844 m_sharedData.reset();
845 }
846
847 size_t Heap::objectCount()
848 {
849 return m_objectSpace.objectCount();
850 }
851
852 size_t Heap::extraMemorySize()
853 {
854 return m_extraMemorySize + m_deprecatedExtraMemorySize + m_arrayBuffers.size();
855 }
856
857 size_t Heap::size()
858 {
859 return m_objectSpace.size() + m_storageSpace.size() + extraMemorySize();
860 }
861
862 size_t Heap::capacity()
863 {
864 return m_objectSpace.capacity() + m_storageSpace.capacity() + extraMemorySize();
865 }
866
867 size_t Heap::sizeAfterCollect()
868 {
869 // The result here may not agree with the normal Heap::size().
870 // This is due to the fact that we only count live copied bytes
871 // rather than all used (including dead) copied bytes, thus it's
872 // always the case that m_totalBytesCopied <= m_storageSpace.size().
873 ASSERT(m_totalBytesCopied <= m_storageSpace.size());
874 return m_totalBytesVisited + m_totalBytesCopied + extraMemorySize();
875 }
876
877 size_t Heap::protectedGlobalObjectCount()
878 {
879 return forEachProtectedCell<CountIfGlobalObject>();
880 }
881
882 size_t Heap::globalObjectCount()
883 {
884 HeapIterationScope iterationScope(*this);
885 return m_objectSpace.forEachLiveCell<CountIfGlobalObject>(iterationScope);
886 }
887
888 size_t Heap::protectedObjectCount()
889 {
890 return forEachProtectedCell<Count>();
891 }
892
893 std::unique_ptr<TypeCountSet> Heap::protectedObjectTypeCounts()
894 {
895 return forEachProtectedCell<RecordType>();
896 }
897
898 std::unique_ptr<TypeCountSet> Heap::objectTypeCounts()
899 {
900 HeapIterationScope iterationScope(*this);
901 return m_objectSpace.forEachLiveCell<RecordType>(iterationScope);
902 }
903
904 void Heap::deleteAllCompiledCode()
905 {
906 // If JavaScript is running, it's not safe to delete code, since we'll end
907 // up deleting code that is live on the stack.
908 if (m_vm->entryScope)
909 return;
910
911 // If we have things on any worklist, then don't delete code. This is kind of
912 // a weird heuristic. It's definitely not safe to throw away code that is on
913 // the worklist. But this change was made in a hurry so we just avoid throwing
914 // away any code if there is any code on any worklist. I suspect that this
915 // might not actually be too dumb: if there is code on worklists then that
916 // means that we are running some hot JS code right now. Maybe causing
917 // recompilations isn't a good idea.
918 #if ENABLE(DFG_JIT)
919 for (unsigned i = DFG::numberOfWorklists(); i--;) {
920 if (DFG::Worklist* worklist = DFG::worklistForIndexOrNull(i)) {
921 if (worklist->isActiveForVM(*vm()))
922 return;
923 }
924 }
925 #endif // ENABLE(DFG_JIT)
926
927 for (ExecutableBase* current : m_compiledCode) {
928 if (!current->isFunctionExecutable())
929 continue;
930 static_cast<FunctionExecutable*>(current)->clearCode();
931 }
932
933 ASSERT(m_operationInProgress == FullCollection || m_operationInProgress == NoOperation);
934 m_codeBlocks.clearMarksForFullCollection();
935 m_codeBlocks.deleteUnmarkedAndUnreferenced(FullCollection);
936 }
937
938 void Heap::deleteAllUnlinkedFunctionCode()
939 {
940 for (ExecutableBase* current : m_compiledCode) {
941 if (!current->isFunctionExecutable())
942 continue;
943 static_cast<FunctionExecutable*>(current)->clearUnlinkedCodeForRecompilation();
944 }
945 }
946
947 void Heap::clearUnmarkedExecutables()
948 {
949 GCPHASE(ClearUnmarkedExecutables);
950 for (unsigned i = m_compiledCode.size(); i--;) {
951 ExecutableBase* current = m_compiledCode[i];
952 if (isMarked(current))
953 continue;
954
955 // We do this because executable memory is limited on some platforms and because
956 // CodeBlock requires eager finalization.
957 ExecutableBase::clearCodeVirtual(current);
958 std::swap(m_compiledCode[i], m_compiledCode.last());
959 m_compiledCode.removeLast();
960 }
961 }
962
963 void Heap::deleteUnmarkedCompiledCode()
964 {
965 GCPHASE(DeleteCodeBlocks);
966 clearUnmarkedExecutables();
967 m_codeBlocks.deleteUnmarkedAndUnreferenced(m_operationInProgress);
968 m_jitStubRoutines.deleteUnmarkedJettisonedStubRoutines();
969 }
970
971 void Heap::addToRememberedSet(const JSCell* cell)
972 {
973 ASSERT(cell);
974 ASSERT(!Options::enableConcurrentJIT() || !isCompilationThread());
975 if (isRemembered(cell))
976 return;
977 const_cast<JSCell*>(cell)->setRemembered(true);
978 m_slotVisitor.unconditionallyAppend(const_cast<JSCell*>(cell));
979 }
980
981 void Heap::collectAndSweep(HeapOperation collectionType)
982 {
983 if (!m_isSafeToCollect)
984 return;
985
986 collect(collectionType);
987
988 SamplingRegion samplingRegion("Garbage Collection: Sweeping");
989
990 DeferGCForAWhile deferGC(*this);
991 m_objectSpace.sweep();
992 m_objectSpace.shrink();
993
994 sweepAllLogicallyEmptyWeakBlocks();
995 }
996
997 static double minute = 60.0;
998
999 NEVER_INLINE void Heap::collect(HeapOperation collectionType)
1000 {
1001 void* stackTop;
1002 ALLOCATE_AND_GET_REGISTER_STATE(registers);
1003
1004 collectImpl(collectionType, wtfThreadData().stack().origin(), &stackTop, registers);
1005
1006 sanitizeStackForVM(m_vm);
1007 }
1008
1009 NEVER_INLINE void Heap::collectImpl(HeapOperation collectionType, void* stackOrigin, void* stackTop, MachineThreads::RegisterState& calleeSavedRegisters)
1010 {
1011 #if ENABLE(ALLOCATION_LOGGING)
1012 dataLogF("JSC GC starting collection.\n");
1013 #endif
1014
1015 double before = 0;
1016 if (Options::logGC()) {
1017 dataLog("[GC: ");
1018 before = currentTimeMS();
1019 }
1020
1021 SamplingRegion samplingRegion("Garbage Collection");
1022
1023 if (vm()->typeProfiler()) {
1024 DeferGCForAWhile awhile(*this);
1025 vm()->typeProfilerLog()->processLogEntries(ASCIILiteral("GC"));
1026 }
1027
1028 RELEASE_ASSERT(!m_deferralDepth);
1029 ASSERT(vm()->currentThreadIsHoldingAPILock());
1030 RELEASE_ASSERT(vm()->atomicStringTable() == wtfThreadData().atomicStringTable());
1031 ASSERT(m_isSafeToCollect);
1032 JAVASCRIPTCORE_GC_BEGIN();
1033 RELEASE_ASSERT(m_operationInProgress == NoOperation);
1034
1035 suspendCompilerThreads();
1036 willStartCollection(collectionType);
1037 GCPHASE(Collect);
1038
1039 double gcStartTime = WTF::monotonicallyIncreasingTime();
1040 if (m_verifier) {
1041 // Verify that live objects from the last GC cycle haven't been corrupted by
1042 // mutators before we begin this new GC cycle.
1043 m_verifier->verify(HeapVerifier::Phase::BeforeGC);
1044
1045 m_verifier->initializeGCCycle();
1046 m_verifier->gatherLiveObjects(HeapVerifier::Phase::BeforeMarking);
1047 }
1048
1049 deleteOldCode(gcStartTime);
1050 flushOldStructureIDTables();
1051 stopAllocation();
1052 flushWriteBarrierBuffer();
1053
1054 markRoots(gcStartTime, stackOrigin, stackTop, calleeSavedRegisters);
1055
1056 if (m_verifier) {
1057 m_verifier->gatherLiveObjects(HeapVerifier::Phase::AfterMarking);
1058 m_verifier->verify(HeapVerifier::Phase::AfterMarking);
1059 }
1060 JAVASCRIPTCORE_GC_MARKED();
1061
1062 if (vm()->typeProfiler())
1063 vm()->typeProfiler()->invalidateTypeSetCache();
1064
1065 reapWeakHandles();
1066 pruneStaleEntriesFromWeakGCMaps();
1067 sweepArrayBuffers();
1068 snapshotMarkedSpace();
1069
1070 copyBackingStores();
1071
1072 finalizeUnconditionalFinalizers();
1073 removeDeadCompilerWorklistEntries();
1074 deleteUnmarkedCompiledCode();
1075 deleteSourceProviderCaches();
1076 notifyIncrementalSweeper();
1077 rememberCurrentlyExecutingCodeBlocks();
1078
1079 resetAllocators();
1080 updateAllocationLimits();
1081 didFinishCollection(gcStartTime);
1082 resumeCompilerThreads();
1083
1084 if (m_verifier) {
1085 m_verifier->trimDeadObjects();
1086 m_verifier->verify(HeapVerifier::Phase::AfterGC);
1087 }
1088
1089 if (Options::logGC()) {
1090 double after = currentTimeMS();
1091 dataLog(after - before, " ms]\n");
1092 }
1093 }
1094
1095 void Heap::suspendCompilerThreads()
1096 {
1097 #if ENABLE(DFG_JIT)
1098 GCPHASE(SuspendCompilerThreads);
1099 ASSERT(m_suspendedCompilerWorklists.isEmpty());
1100 for (unsigned i = DFG::numberOfWorklists(); i--;) {
1101 if (DFG::Worklist* worklist = DFG::worklistForIndexOrNull(i)) {
1102 m_suspendedCompilerWorklists.append(worklist);
1103 worklist->suspendAllThreads();
1104 }
1105 }
1106 #endif
1107 }
1108
1109 void Heap::willStartCollection(HeapOperation collectionType)
1110 {
1111 GCPHASE(StartingCollection);
1112 if (shouldDoFullCollection(collectionType)) {
1113 m_operationInProgress = FullCollection;
1114 m_slotVisitor.clearMarkStack();
1115 m_shouldDoFullCollection = false;
1116 if (Options::logGC())
1117 dataLog("FullCollection, ");
1118 } else {
1119 m_operationInProgress = EdenCollection;
1120 if (Options::logGC())
1121 dataLog("EdenCollection, ");
1122 }
1123 if (m_operationInProgress == FullCollection) {
1124 m_sizeBeforeLastFullCollect = m_sizeAfterLastCollect + m_bytesAllocatedThisCycle;
1125 m_extraMemorySize = 0;
1126 m_deprecatedExtraMemorySize = 0;
1127
1128 if (m_fullActivityCallback)
1129 m_fullActivityCallback->willCollect();
1130 } else {
1131 ASSERT(m_operationInProgress == EdenCollection);
1132 m_sizeBeforeLastEdenCollect = m_sizeAfterLastCollect + m_bytesAllocatedThisCycle;
1133 }
1134
1135 if (m_edenActivityCallback)
1136 m_edenActivityCallback->willCollect();
1137 }
1138
1139 void Heap::deleteOldCode(double gcStartTime)
1140 {
1141 if (m_operationInProgress == EdenCollection)
1142 return;
1143
1144 GCPHASE(DeleteOldCode);
1145 if (gcStartTime - m_lastCodeDiscardTime > minute) {
1146 deleteAllCompiledCode();
1147 m_lastCodeDiscardTime = WTF::monotonicallyIncreasingTime();
1148 }
1149 }
1150
1151 void Heap::flushOldStructureIDTables()
1152 {
1153 GCPHASE(FlushOldStructureIDTables);
1154 m_structureIDTable.flushOldTables();
1155 }
1156
1157 void Heap::flushWriteBarrierBuffer()
1158 {
1159 GCPHASE(FlushWriteBarrierBuffer);
1160 if (m_operationInProgress == EdenCollection) {
1161 m_writeBarrierBuffer.flush(*this);
1162 return;
1163 }
1164 m_writeBarrierBuffer.reset();
1165 }
1166
1167 void Heap::stopAllocation()
1168 {
1169 GCPHASE(StopAllocation);
1170 m_objectSpace.stopAllocating();
1171 if (m_operationInProgress == FullCollection)
1172 m_storageSpace.didStartFullCollection();
1173 }
1174
1175 void Heap::reapWeakHandles()
1176 {
1177 GCPHASE(ReapingWeakHandles);
1178 m_objectSpace.reapWeakSets();
1179 }
1180
1181 void Heap::pruneStaleEntriesFromWeakGCMaps()
1182 {
1183 GCPHASE(PruningStaleEntriesFromWeakGCMaps);
1184 if (m_operationInProgress != FullCollection)
1185 return;
1186 for (auto& pruneCallback : m_weakGCMaps.values())
1187 pruneCallback();
1188 }
1189
1190 void Heap::sweepArrayBuffers()
1191 {
1192 GCPHASE(SweepingArrayBuffers);
1193 m_arrayBuffers.sweep();
1194 }
1195
1196 struct MarkedBlockSnapshotFunctor : public MarkedBlock::VoidFunctor {
1197 MarkedBlockSnapshotFunctor(Vector<MarkedBlock*>& blocks)
1198 : m_index(0)
1199 , m_blocks(blocks)
1200 {
1201 }
1202
1203 void operator()(MarkedBlock* block) { m_blocks[m_index++] = block; }
1204
1205 size_t m_index;
1206 Vector<MarkedBlock*>& m_blocks;
1207 };
1208
1209 void Heap::snapshotMarkedSpace()
1210 {
1211 GCPHASE(SnapshotMarkedSpace);
1212
1213 if (m_operationInProgress == EdenCollection) {
1214 m_blockSnapshot.appendVector(m_objectSpace.blocksWithNewObjects());
1215 // Sort and deduplicate the block snapshot since we might be appending to an unfinished work list.
1216 std::sort(m_blockSnapshot.begin(), m_blockSnapshot.end());
1217 m_blockSnapshot.shrink(std::unique(m_blockSnapshot.begin(), m_blockSnapshot.end()) - m_blockSnapshot.begin());
1218 } else {
1219 m_blockSnapshot.resizeToFit(m_objectSpace.blocks().set().size());
1220 MarkedBlockSnapshotFunctor functor(m_blockSnapshot);
1221 m_objectSpace.forEachBlock(functor);
1222 }
1223 }
1224
1225 void Heap::deleteSourceProviderCaches()
1226 {
1227 GCPHASE(DeleteSourceProviderCaches);
1228 m_vm->clearSourceProviderCaches();
1229 }
1230
1231 void Heap::notifyIncrementalSweeper()
1232 {
1233 GCPHASE(NotifyIncrementalSweeper);
1234
1235 if (m_operationInProgress == FullCollection) {
1236 if (!m_logicallyEmptyWeakBlocks.isEmpty())
1237 m_indexOfNextLogicallyEmptyWeakBlockToSweep = 0;
1238 }
1239
1240 m_sweeper->startSweeping();
1241 }
1242
1243 void Heap::rememberCurrentlyExecutingCodeBlocks()
1244 {
1245 GCPHASE(RememberCurrentlyExecutingCodeBlocks);
1246 m_codeBlocks.rememberCurrentlyExecutingCodeBlocks(this);
1247 }
1248
1249 void Heap::resetAllocators()
1250 {
1251 GCPHASE(ResetAllocators);
1252 m_objectSpace.resetAllocators();
1253 }
1254
1255 void Heap::updateAllocationLimits()
1256 {
1257 GCPHASE(UpdateAllocationLimits);
1258 size_t currentHeapSize = sizeAfterCollect();
1259 if (Options::gcMaxHeapSize() && currentHeapSize > Options::gcMaxHeapSize())
1260 HeapStatistics::exitWithFailure();
1261
1262 if (m_operationInProgress == FullCollection) {
1263 // To avoid pathological GC churn in very small and very large heaps, we set
1264 // the new allocation limit based on the current size of the heap, with a
1265 // fixed minimum.
1266 m_maxHeapSize = max(minHeapSize(m_heapType, m_ramSize), proportionalHeapSize(currentHeapSize, m_ramSize));
1267 m_maxEdenSize = m_maxHeapSize - currentHeapSize;
1268 m_sizeAfterLastFullCollect = currentHeapSize;
1269 m_bytesAbandonedSinceLastFullCollect = 0;
1270 } else {
1271 ASSERT(currentHeapSize >= m_sizeAfterLastCollect);
1272 m_maxEdenSize = m_maxHeapSize - currentHeapSize;
1273 m_sizeAfterLastEdenCollect = currentHeapSize;
1274 double edenToOldGenerationRatio = (double)m_maxEdenSize / (double)m_maxHeapSize;
1275 double minEdenToOldGenerationRatio = 1.0 / 3.0;
1276 if (edenToOldGenerationRatio < minEdenToOldGenerationRatio)
1277 m_shouldDoFullCollection = true;
1278 m_maxHeapSize += currentHeapSize - m_sizeAfterLastCollect;
1279 m_maxEdenSize = m_maxHeapSize - currentHeapSize;
1280 if (m_fullActivityCallback) {
1281 ASSERT(currentHeapSize >= m_sizeAfterLastFullCollect);
1282 m_fullActivityCallback->didAllocate(currentHeapSize - m_sizeAfterLastFullCollect);
1283 }
1284 }
1285
1286 m_sizeAfterLastCollect = currentHeapSize;
1287 m_bytesAllocatedThisCycle = 0;
1288
1289 if (Options::logGC())
1290 dataLog(currentHeapSize / 1024, " kb, ");
1291 }
1292
1293 void Heap::didFinishCollection(double gcStartTime)
1294 {
1295 GCPHASE(FinishingCollection);
1296 double gcEndTime = WTF::monotonicallyIncreasingTime();
1297 if (m_operationInProgress == FullCollection)
1298 m_lastFullGCLength = gcEndTime - gcStartTime;
1299 else
1300 m_lastEdenGCLength = gcEndTime - gcStartTime;
1301
1302 if (Options::recordGCPauseTimes())
1303 HeapStatistics::recordGCPauseTime(gcStartTime, gcEndTime);
1304
1305 if (Options::useZombieMode())
1306 zombifyDeadObjects();
1307
1308 if (Options::objectsAreImmortal())
1309 markDeadObjects();
1310
1311 if (Options::showObjectStatistics())
1312 HeapStatistics::showObjectStatistics(this);
1313
1314 if (Options::logGC() == GCLogging::Verbose)
1315 GCLogging::dumpObjectGraph(this);
1316
1317 RELEASE_ASSERT(m_operationInProgress == EdenCollection || m_operationInProgress == FullCollection);
1318 m_operationInProgress = NoOperation;
1319 JAVASCRIPTCORE_GC_END();
1320 }
1321
1322 void Heap::resumeCompilerThreads()
1323 {
1324 #if ENABLE(DFG_JIT)
1325 GCPHASE(ResumeCompilerThreads);
1326 for (auto worklist : m_suspendedCompilerWorklists)
1327 worklist->resumeAllThreads();
1328 m_suspendedCompilerWorklists.clear();
1329 #endif
1330 }
1331
1332 void Heap::markDeadObjects()
1333 {
1334 HeapIterationScope iterationScope(*this);
1335 m_objectSpace.forEachDeadCell<MarkObject>(iterationScope);
1336 }
1337
1338 void Heap::setFullActivityCallback(PassRefPtr<FullGCActivityCallback> activityCallback)
1339 {
1340 m_fullActivityCallback = activityCallback;
1341 }
1342
1343 void Heap::setEdenActivityCallback(PassRefPtr<EdenGCActivityCallback> activityCallback)
1344 {
1345 m_edenActivityCallback = activityCallback;
1346 }
1347
1348 GCActivityCallback* Heap::fullActivityCallback()
1349 {
1350 return m_fullActivityCallback.get();
1351 }
1352
1353 GCActivityCallback* Heap::edenActivityCallback()
1354 {
1355 return m_edenActivityCallback.get();
1356 }
1357
1358 void Heap::setIncrementalSweeper(std::unique_ptr<IncrementalSweeper> sweeper)
1359 {
1360 m_sweeper = WTF::move(sweeper);
1361 }
1362
1363 IncrementalSweeper* Heap::sweeper()
1364 {
1365 return m_sweeper.get();
1366 }
1367
1368 void Heap::setGarbageCollectionTimerEnabled(bool enable)
1369 {
1370 if (m_fullActivityCallback)
1371 m_fullActivityCallback->setEnabled(enable);
1372 if (m_edenActivityCallback)
1373 m_edenActivityCallback->setEnabled(enable);
1374 }
1375
1376 void Heap::didAllocate(size_t bytes)
1377 {
1378 if (m_edenActivityCallback)
1379 m_edenActivityCallback->didAllocate(m_bytesAllocatedThisCycle + m_bytesAbandonedSinceLastFullCollect);
1380 m_bytesAllocatedThisCycle += bytes;
1381 }
1382
1383 bool Heap::isValidAllocation(size_t)
1384 {
1385 if (!isValidThreadState(m_vm))
1386 return false;
1387
1388 if (m_operationInProgress != NoOperation)
1389 return false;
1390
1391 return true;
1392 }
1393
1394 void Heap::addFinalizer(JSCell* cell, Finalizer finalizer)
1395 {
1396 WeakSet::allocate(cell, &m_finalizerOwner, reinterpret_cast<void*>(finalizer)); // Balanced by FinalizerOwner::finalize().
1397 }
1398
1399 void Heap::FinalizerOwner::finalize(Handle<Unknown> handle, void* context)
1400 {
1401 HandleSlot slot = handle.slot();
1402 Finalizer finalizer = reinterpret_cast<Finalizer>(context);
1403 finalizer(slot->asCell());
1404 WeakSet::deallocate(WeakImpl::asWeakImpl(slot));
1405 }
1406
1407 void Heap::addCompiledCode(ExecutableBase* executable)
1408 {
1409 m_compiledCode.append(executable);
1410 }
1411
1412 void Heap::collectAllGarbageIfNotDoneRecently()
1413 {
1414 if (!m_fullActivityCallback) {
1415 collectAllGarbage();
1416 return;
1417 }
1418
1419 if (m_fullActivityCallback->didSyncGCRecently()) {
1420 // A synchronous GC was already requested recently so we merely accelerate next collection.
1421 reportAbandonedObjectGraph();
1422 return;
1423 }
1424
1425 m_fullActivityCallback->setDidSyncGCRecently();
1426 collectAllGarbage();
1427 }
1428
1429 class Zombify : public MarkedBlock::VoidFunctor {
1430 public:
1431 inline void visit(JSCell* cell)
1432 {
1433 void** current = reinterpret_cast<void**>(cell);
1434
1435 // We want to maintain zapped-ness because that's how we know if we've called
1436 // the destructor.
1437 if (cell->isZapped())
1438 current++;
1439
1440 void* limit = static_cast<void*>(reinterpret_cast<char*>(cell) + MarkedBlock::blockFor(cell)->cellSize());
1441 for (; current < limit; current++)
1442 *current = zombifiedBits;
1443 }
1444 IterationStatus operator()(JSCell* cell)
1445 {
1446 visit(cell);
1447 return IterationStatus::Continue;
1448 }
1449 };
1450
1451 void Heap::zombifyDeadObjects()
1452 {
1453 // Sweep now because destructors will crash once we're zombified.
1454 {
1455 SamplingRegion samplingRegion("Garbage Collection: Sweeping");
1456 m_objectSpace.zombifySweep();
1457 }
1458 HeapIterationScope iterationScope(*this);
1459 m_objectSpace.forEachDeadCell<Zombify>(iterationScope);
1460 }
1461
1462 void Heap::flushWriteBarrierBuffer(JSCell* cell)
1463 {
1464 #if ENABLE(GGC)
1465 m_writeBarrierBuffer.flush(*this);
1466 m_writeBarrierBuffer.add(cell);
1467 #else
1468 UNUSED_PARAM(cell);
1469 #endif
1470 }
1471
1472 bool Heap::shouldDoFullCollection(HeapOperation requestedCollectionType) const
1473 {
1474 #if ENABLE(GGC)
1475 if (Options::alwaysDoFullCollection())
1476 return true;
1477
1478 switch (requestedCollectionType) {
1479 case EdenCollection:
1480 return false;
1481 case FullCollection:
1482 return true;
1483 case AnyCollection:
1484 return m_shouldDoFullCollection;
1485 default:
1486 RELEASE_ASSERT_NOT_REACHED();
1487 return false;
1488 }
1489 RELEASE_ASSERT_NOT_REACHED();
1490 return false;
1491 #else
1492 UNUSED_PARAM(requestedCollectionType);
1493 return true;
1494 #endif
1495 }
1496
1497 void Heap::addLogicallyEmptyWeakBlock(WeakBlock* block)
1498 {
1499 m_logicallyEmptyWeakBlocks.append(block);
1500 }
1501
1502 void Heap::sweepAllLogicallyEmptyWeakBlocks()
1503 {
1504 if (m_logicallyEmptyWeakBlocks.isEmpty())
1505 return;
1506
1507 m_indexOfNextLogicallyEmptyWeakBlockToSweep = 0;
1508 while (sweepNextLogicallyEmptyWeakBlock()) { }
1509 }
1510
1511 bool Heap::sweepNextLogicallyEmptyWeakBlock()
1512 {
1513 if (m_indexOfNextLogicallyEmptyWeakBlockToSweep == WTF::notFound)
1514 return false;
1515
1516 WeakBlock* block = m_logicallyEmptyWeakBlocks[m_indexOfNextLogicallyEmptyWeakBlockToSweep];
1517
1518 block->sweep();
1519 if (block->isEmpty()) {
1520 std::swap(m_logicallyEmptyWeakBlocks[m_indexOfNextLogicallyEmptyWeakBlockToSweep], m_logicallyEmptyWeakBlocks.last());
1521 m_logicallyEmptyWeakBlocks.removeLast();
1522 WeakBlock::destroy(block);
1523 } else
1524 m_indexOfNextLogicallyEmptyWeakBlockToSweep++;
1525
1526 if (m_indexOfNextLogicallyEmptyWeakBlockToSweep >= m_logicallyEmptyWeakBlocks.size()) {
1527 m_indexOfNextLogicallyEmptyWeakBlockToSweep = WTF::notFound;
1528 return false;
1529 }
1530
1531 return true;
1532 }
1533
1534 } // namespace JSC