]> git.saurik.com Git - apple/javascriptcore.git/blob - heap/Heap.cpp
447383b667882fc29e7e805d039dd3d53ee2d6ec
[apple/javascriptcore.git] / heap / Heap.cpp
1 /*
2 * Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2011, 2013, 2014 Apple Inc. All rights reserved.
3 * Copyright (C) 2007 Eric Seidel <eric@webkit.org>
4 *
5 * This library is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Lesser General Public
7 * License as published by the Free Software Foundation; either
8 * version 2 of the License, or (at your option) any later version.
9 *
10 * This library is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Lesser General Public License for more details.
14 *
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this library; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
18 *
19 */
20
21 #include "config.h"
22 #include "Heap.h"
23
24 #include "CodeBlock.h"
25 #include "ConservativeRoots.h"
26 #include "CopiedSpace.h"
27 #include "CopiedSpaceInlines.h"
28 #include "CopyVisitorInlines.h"
29 #include "DFGWorklist.h"
30 #include "DelayedReleaseScope.h"
31 #include "EdenGCActivityCallback.h"
32 #include "FullGCActivityCallback.h"
33 #include "GCActivityCallback.h"
34 #include "GCIncomingRefCountedSetInlines.h"
35 #include "HeapIterationScope.h"
36 #include "HeapRootVisitor.h"
37 #include "HeapStatistics.h"
38 #include "IncrementalSweeper.h"
39 #include "Interpreter.h"
40 #include "JSGlobalObject.h"
41 #include "JSLock.h"
42 #include "JSONObject.h"
43 #include "JSCInlines.h"
44 #include "JSVirtualMachineInternal.h"
45 #include "RecursiveAllocationScope.h"
46 #include "Tracing.h"
47 #include "UnlinkedCodeBlock.h"
48 #include "VM.h"
49 #include "WeakSetInlines.h"
50 #include <algorithm>
51 #include <wtf/RAMSize.h>
52 #include <wtf/CurrentTime.h>
53 #include <wtf/ProcessID.h>
54
55 using namespace std;
56 using namespace JSC;
57
58 namespace JSC {
59
60 namespace {
61
62 static const size_t largeHeapSize = 32 * MB; // About 1.5X the average webpage.
63 static const size_t smallHeapSize = 1 * MB; // Matches the FastMalloc per-thread cache.
64
65 #define ENABLE_GC_LOGGING 0
66
67 #if ENABLE(GC_LOGGING)
68 #if COMPILER(CLANG)
69 #define DEFINE_GC_LOGGING_GLOBAL(type, name, arguments) \
70 _Pragma("clang diagnostic push") \
71 _Pragma("clang diagnostic ignored \"-Wglobal-constructors\"") \
72 _Pragma("clang diagnostic ignored \"-Wexit-time-destructors\"") \
73 static type name arguments; \
74 _Pragma("clang diagnostic pop")
75 #else
76 #define DEFINE_GC_LOGGING_GLOBAL(type, name, arguments) \
77 static type name arguments;
78 #endif // COMPILER(CLANG)
79
80 struct GCTimer {
81 GCTimer(const char* name)
82 : m_name(name)
83 {
84 }
85 ~GCTimer()
86 {
87 logData(m_allCollectionData, "(All)");
88 logData(m_edenCollectionData, "(Eden)");
89 logData(m_fullCollectionData, "(Full)");
90 }
91
92 struct TimeRecord {
93 TimeRecord()
94 : m_time(0)
95 , m_min(std::numeric_limits<double>::infinity())
96 , m_max(0)
97 , m_count(0)
98 {
99 }
100
101 double m_time;
102 double m_min;
103 double m_max;
104 size_t m_count;
105 };
106
107 void logData(const TimeRecord& data, const char* extra)
108 {
109 dataLogF("[%d] %s %s: %.2lfms (avg. %.2lf, min. %.2lf, max. %.2lf, count %lu)\n",
110 getCurrentProcessID(),
111 m_name, extra,
112 data.m_time * 1000,
113 data.m_time * 1000 / data.m_count,
114 data.m_min * 1000,
115 data.m_max * 1000,
116 data.m_count);
117 }
118
119 void updateData(TimeRecord& data, double duration)
120 {
121 if (duration < data.m_min)
122 data.m_min = duration;
123 if (duration > data.m_max)
124 data.m_max = duration;
125 data.m_count++;
126 data.m_time += duration;
127 }
128
129 void didFinishPhase(HeapOperation collectionType, double duration)
130 {
131 TimeRecord& data = collectionType == EdenCollection ? m_edenCollectionData : m_fullCollectionData;
132 updateData(data, duration);
133 updateData(m_allCollectionData, duration);
134 }
135
136 TimeRecord m_allCollectionData;
137 TimeRecord m_fullCollectionData;
138 TimeRecord m_edenCollectionData;
139 const char* m_name;
140 };
141
142 struct GCTimerScope {
143 GCTimerScope(GCTimer* timer, HeapOperation collectionType)
144 : m_timer(timer)
145 , m_start(WTF::monotonicallyIncreasingTime())
146 , m_collectionType(collectionType)
147 {
148 }
149 ~GCTimerScope()
150 {
151 double delta = WTF::monotonicallyIncreasingTime() - m_start;
152 m_timer->didFinishPhase(m_collectionType, delta);
153 }
154 GCTimer* m_timer;
155 double m_start;
156 HeapOperation m_collectionType;
157 };
158
159 struct GCCounter {
160 GCCounter(const char* name)
161 : m_name(name)
162 , m_count(0)
163 , m_total(0)
164 , m_min(10000000)
165 , m_max(0)
166 {
167 }
168
169 void count(size_t amount)
170 {
171 m_count++;
172 m_total += amount;
173 if (amount < m_min)
174 m_min = amount;
175 if (amount > m_max)
176 m_max = amount;
177 }
178 ~GCCounter()
179 {
180 dataLogF("[%d] %s: %zu values (avg. %zu, min. %zu, max. %zu)\n", getCurrentProcessID(), m_name, m_total, m_total / m_count, m_min, m_max);
181 }
182 const char* m_name;
183 size_t m_count;
184 size_t m_total;
185 size_t m_min;
186 size_t m_max;
187 };
188
189 #define GCPHASE(name) DEFINE_GC_LOGGING_GLOBAL(GCTimer, name##Timer, (#name)); GCTimerScope name##TimerScope(&name##Timer, m_operationInProgress)
190 #define GCCOUNTER(name, value) do { DEFINE_GC_LOGGING_GLOBAL(GCCounter, name##Counter, (#name)); name##Counter.count(value); } while (false)
191
192 #else
193
194 #define GCPHASE(name) do { } while (false)
195 #define GCCOUNTER(name, value) do { } while (false)
196 #endif
197
198 static inline size_t minHeapSize(HeapType heapType, size_t ramSize)
199 {
200 if (heapType == LargeHeap)
201 return min(largeHeapSize, ramSize / 4);
202 return smallHeapSize;
203 }
204
205 static inline size_t proportionalHeapSize(size_t heapSize, size_t ramSize)
206 {
207 // Try to stay under 1/2 RAM size to leave room for the DOM, rendering, networking, etc.
208 if (heapSize < ramSize / 4)
209 return 2 * heapSize;
210 if (heapSize < ramSize / 2)
211 return 1.5 * heapSize;
212 return 1.25 * heapSize;
213 }
214
215 static inline bool isValidSharedInstanceThreadState(VM* vm)
216 {
217 return vm->currentThreadIsHoldingAPILock();
218 }
219
220 static inline bool isValidThreadState(VM* vm)
221 {
222 if (vm->atomicStringTable() != wtfThreadData().atomicStringTable())
223 return false;
224
225 if (vm->isSharedInstance() && !isValidSharedInstanceThreadState(vm))
226 return false;
227
228 return true;
229 }
230
231 struct MarkObject : public MarkedBlock::VoidFunctor {
232 void operator()(JSCell* cell)
233 {
234 if (cell->isZapped())
235 return;
236 Heap::heap(cell)->setMarked(cell);
237 }
238 };
239
240 struct Count : public MarkedBlock::CountFunctor {
241 void operator()(JSCell*) { count(1); }
242 };
243
244 struct CountIfGlobalObject : MarkedBlock::CountFunctor {
245 void operator()(JSCell* cell) {
246 if (!cell->isObject())
247 return;
248 if (!asObject(cell)->isGlobalObject())
249 return;
250 count(1);
251 }
252 };
253
254 class RecordType {
255 public:
256 typedef PassOwnPtr<TypeCountSet> ReturnType;
257
258 RecordType();
259 void operator()(JSCell*);
260 ReturnType returnValue();
261
262 private:
263 const char* typeName(JSCell*);
264 OwnPtr<TypeCountSet> m_typeCountSet;
265 };
266
267 inline RecordType::RecordType()
268 : m_typeCountSet(adoptPtr(new TypeCountSet))
269 {
270 }
271
272 inline const char* RecordType::typeName(JSCell* cell)
273 {
274 const ClassInfo* info = cell->classInfo();
275 if (!info || !info->className)
276 return "[unknown]";
277 return info->className;
278 }
279
280 inline void RecordType::operator()(JSCell* cell)
281 {
282 m_typeCountSet->add(typeName(cell));
283 }
284
285 inline PassOwnPtr<TypeCountSet> RecordType::returnValue()
286 {
287 return m_typeCountSet.release();
288 }
289
290 } // anonymous namespace
291
292 Heap::Heap(VM* vm, HeapType heapType)
293 : m_heapType(heapType)
294 , m_ramSize(ramSize())
295 , m_minBytesPerCycle(minHeapSize(m_heapType, m_ramSize))
296 , m_sizeAfterLastCollect(0)
297 , m_sizeAfterLastFullCollect(0)
298 , m_sizeBeforeLastFullCollect(0)
299 , m_sizeAfterLastEdenCollect(0)
300 , m_sizeBeforeLastEdenCollect(0)
301 , m_bytesAllocatedThisCycle(0)
302 , m_bytesAbandonedSinceLastFullCollect(0)
303 , m_maxEdenSize(m_minBytesPerCycle)
304 , m_maxHeapSize(m_minBytesPerCycle)
305 , m_shouldDoFullCollection(false)
306 , m_totalBytesVisited(0)
307 , m_totalBytesCopied(0)
308 , m_operationInProgress(NoOperation)
309 , m_blockAllocator()
310 , m_objectSpace(this)
311 , m_storageSpace(this)
312 , m_extraMemoryUsage(0)
313 , m_machineThreads(this)
314 , m_sharedData(vm)
315 , m_slotVisitor(m_sharedData)
316 , m_copyVisitor(m_sharedData)
317 , m_handleSet(vm)
318 , m_codeBlocks(m_blockAllocator)
319 , m_isSafeToCollect(false)
320 , m_writeBarrierBuffer(256)
321 , m_vm(vm)
322 // We seed with 10ms so that GCActivityCallback::didAllocate doesn't continuously
323 // schedule the timer if we've never done a collection.
324 , m_lastFullGCLength(0.01)
325 , m_lastEdenGCLength(0.01)
326 , m_lastCodeDiscardTime(WTF::monotonicallyIncreasingTime())
327 , m_fullActivityCallback(GCActivityCallback::createFullTimer(this))
328 #if ENABLE(GGC)
329 , m_edenActivityCallback(GCActivityCallback::createEdenTimer(this))
330 #else
331 , m_edenActivityCallback(m_fullActivityCallback)
332 #endif
333 , m_sweeper(IncrementalSweeper::create(this))
334 , m_deferralDepth(0)
335 {
336 m_storageSpace.init();
337 }
338
339 Heap::~Heap()
340 {
341 }
342
343 bool Heap::isPagedOut(double deadline)
344 {
345 return m_objectSpace.isPagedOut(deadline) || m_storageSpace.isPagedOut(deadline);
346 }
347
348 // The VM is being destroyed and the collector will never run again.
349 // Run all pending finalizers now because we won't get another chance.
350 void Heap::lastChanceToFinalize()
351 {
352 RELEASE_ASSERT(!m_vm->entryScope);
353 RELEASE_ASSERT(m_operationInProgress == NoOperation);
354
355 m_objectSpace.lastChanceToFinalize();
356 }
357
358 void Heap::reportExtraMemoryCostSlowCase(size_t cost)
359 {
360 // Our frequency of garbage collection tries to balance memory use against speed
361 // by collecting based on the number of newly created values. However, for values
362 // that hold on to a great deal of memory that's not in the form of other JS values,
363 // that is not good enough - in some cases a lot of those objects can pile up and
364 // use crazy amounts of memory without a GC happening. So we track these extra
365 // memory costs. Only unusually large objects are noted, and we only keep track
366 // of this extra cost until the next GC. In garbage collected languages, most values
367 // are either very short lived temporaries, or have extremely long lifetimes. So
368 // if a large value survives one garbage collection, there is not much point to
369 // collecting more frequently as long as it stays alive.
370
371 didAllocate(cost);
372 collectIfNecessaryOrDefer();
373 }
374
375 void Heap::reportAbandonedObjectGraph()
376 {
377 // Our clients don't know exactly how much memory they
378 // are abandoning so we just guess for them.
379 double abandonedBytes = 0.1 * m_sizeAfterLastCollect;
380
381 // We want to accelerate the next collection. Because memory has just
382 // been abandoned, the next collection has the potential to
383 // be more profitable. Since allocation is the trigger for collection,
384 // we hasten the next collection by pretending that we've allocated more memory.
385 didAbandon(abandonedBytes);
386 }
387
388 void Heap::didAbandon(size_t bytes)
389 {
390 if (m_fullActivityCallback) {
391 m_fullActivityCallback->didAllocate(
392 m_sizeAfterLastCollect - m_sizeAfterLastFullCollect + m_bytesAllocatedThisCycle + m_bytesAbandonedSinceLastFullCollect);
393 }
394 m_bytesAbandonedSinceLastFullCollect += bytes;
395 }
396
397 void Heap::protect(JSValue k)
398 {
399 ASSERT(k);
400 ASSERT(m_vm->currentThreadIsHoldingAPILock());
401
402 if (!k.isCell())
403 return;
404
405 m_protectedValues.add(k.asCell());
406 }
407
408 bool Heap::unprotect(JSValue k)
409 {
410 ASSERT(k);
411 ASSERT(m_vm->currentThreadIsHoldingAPILock());
412
413 if (!k.isCell())
414 return false;
415
416 return m_protectedValues.remove(k.asCell());
417 }
418
419 void Heap::addReference(JSCell* cell, ArrayBuffer* buffer)
420 {
421 if (m_arrayBuffers.addReference(cell, buffer)) {
422 collectIfNecessaryOrDefer();
423 didAllocate(buffer->gcSizeEstimateInBytes());
424 }
425 }
426
427 void Heap::pushTempSortVector(Vector<ValueStringPair, 0, UnsafeVectorOverflow>* tempVector)
428 {
429 m_tempSortingVectors.append(tempVector);
430 }
431
432 void Heap::popTempSortVector(Vector<ValueStringPair, 0, UnsafeVectorOverflow>* tempVector)
433 {
434 ASSERT_UNUSED(tempVector, tempVector == m_tempSortingVectors.last());
435 m_tempSortingVectors.removeLast();
436 }
437
438 void Heap::harvestWeakReferences()
439 {
440 m_slotVisitor.harvestWeakReferences();
441 }
442
443 void Heap::finalizeUnconditionalFinalizers()
444 {
445 GCPHASE(FinalizeUnconditionalFinalizers);
446 m_slotVisitor.finalizeUnconditionalFinalizers();
447 }
448
449 inline JSStack& Heap::stack()
450 {
451 return m_vm->interpreter->stack();
452 }
453
454 void Heap::willStartIterating()
455 {
456 m_objectSpace.willStartIterating();
457 }
458
459 void Heap::didFinishIterating()
460 {
461 m_objectSpace.didFinishIterating();
462 }
463
464 void Heap::getConservativeRegisterRoots(HashSet<JSCell*>& roots)
465 {
466 ASSERT(isValidThreadState(m_vm));
467 ConservativeRoots stackRoots(&m_objectSpace.blocks(), &m_storageSpace);
468 stack().gatherConservativeRoots(stackRoots);
469 size_t stackRootCount = stackRoots.size();
470 JSCell** registerRoots = stackRoots.roots();
471 for (size_t i = 0; i < stackRootCount; i++) {
472 setMarked(registerRoots[i]);
473 registerRoots[i]->setMarked();
474 roots.add(registerRoots[i]);
475 }
476 }
477
478 void Heap::markRoots(double gcStartTime)
479 {
480 SamplingRegion samplingRegion("Garbage Collection: Marking");
481
482 GCPHASE(MarkRoots);
483 ASSERT(isValidThreadState(m_vm));
484
485 #if ENABLE(GGC)
486 Vector<const JSCell*> rememberedSet(m_slotVisitor.markStack().size());
487 m_slotVisitor.markStack().fillVector(rememberedSet);
488 #else
489 Vector<const JSCell*> rememberedSet;
490 #endif
491
492 if (m_operationInProgress == EdenCollection)
493 m_codeBlocks.clearMarksForEdenCollection(rememberedSet);
494 else
495 m_codeBlocks.clearMarksForFullCollection();
496
497 // We gather conservative roots before clearing mark bits because conservative
498 // gathering uses the mark bits to determine whether a reference is valid.
499 void* dummy;
500 ALLOCATE_AND_GET_REGISTER_STATE(registers);
501 ConservativeRoots conservativeRoots(&m_objectSpace.blocks(), &m_storageSpace);
502 gatherStackRoots(conservativeRoots, &dummy, registers);
503 gatherJSStackRoots(conservativeRoots);
504 gatherScratchBufferRoots(conservativeRoots);
505
506 sanitizeStackForVM(m_vm);
507
508 clearLivenessData();
509
510 m_sharedData.didStartMarking();
511 m_slotVisitor.didStartMarking();
512 HeapRootVisitor heapRootVisitor(m_slotVisitor);
513
514 {
515 ParallelModeEnabler enabler(m_slotVisitor);
516
517 visitExternalRememberedSet();
518 visitSmallStrings();
519 visitConservativeRoots(conservativeRoots);
520 visitProtectedObjects(heapRootVisitor);
521 visitTempSortVectors(heapRootVisitor);
522 visitArgumentBuffers(heapRootVisitor);
523 visitException(heapRootVisitor);
524 visitStrongHandles(heapRootVisitor);
525 visitHandleStack(heapRootVisitor);
526 traceCodeBlocksAndJITStubRoutines();
527 converge();
528 }
529
530 // Weak references must be marked last because their liveness depends on
531 // the liveness of the rest of the object graph.
532 visitWeakHandles(heapRootVisitor);
533
534 clearRememberedSet(rememberedSet);
535 m_sharedData.didFinishMarking();
536 updateObjectCounts(gcStartTime);
537 resetVisitors();
538 }
539
540 void Heap::copyBackingStores()
541 {
542 if (m_operationInProgress == EdenCollection)
543 m_storageSpace.startedCopying<EdenCollection>();
544 else {
545 ASSERT(m_operationInProgress == FullCollection);
546 m_storageSpace.startedCopying<FullCollection>();
547 }
548
549 if (m_storageSpace.shouldDoCopyPhase()) {
550 m_sharedData.didStartCopying();
551 m_copyVisitor.startCopying();
552 m_copyVisitor.copyFromShared();
553 m_copyVisitor.doneCopying();
554 // We need to wait for everybody to finish and return their CopiedBlocks
555 // before signaling that the phase is complete.
556 m_storageSpace.doneCopying();
557 m_sharedData.didFinishCopying();
558 } else
559 m_storageSpace.doneCopying();
560 }
561
562 void Heap::gatherStackRoots(ConservativeRoots& roots, void** dummy, MachineThreads::RegisterState& registers)
563 {
564 GCPHASE(GatherStackRoots);
565 m_jitStubRoutines.clearMarks();
566 m_machineThreads.gatherConservativeRoots(roots, m_jitStubRoutines, m_codeBlocks, dummy, registers);
567 }
568
569 void Heap::gatherJSStackRoots(ConservativeRoots& roots)
570 {
571 #if !ENABLE(JIT)
572 GCPHASE(GatherJSStackRoots);
573 stack().gatherConservativeRoots(roots, m_jitStubRoutines, m_codeBlocks);
574 #else
575 UNUSED_PARAM(roots);
576 #endif
577 }
578
579 void Heap::gatherScratchBufferRoots(ConservativeRoots& roots)
580 {
581 #if ENABLE(DFG_JIT)
582 GCPHASE(GatherScratchBufferRoots);
583 m_vm->gatherConservativeRoots(roots);
584 #else
585 UNUSED_PARAM(roots);
586 #endif
587 }
588
589 void Heap::clearLivenessData()
590 {
591 GCPHASE(ClearLivenessData);
592 m_objectSpace.clearNewlyAllocated();
593 m_objectSpace.clearMarks();
594 }
595
596 void Heap::visitExternalRememberedSet()
597 {
598 #if JSC_OBJC_API_ENABLED
599 scanExternalRememberedSet(*m_vm, m_slotVisitor);
600 #endif
601 }
602
603 void Heap::visitSmallStrings()
604 {
605 GCPHASE(VisitSmallStrings);
606 m_vm->smallStrings.visitStrongReferences(m_slotVisitor);
607
608 if (Options::logGC() == GCLogging::Verbose)
609 dataLog("Small strings:\n", m_slotVisitor);
610
611 m_slotVisitor.donateAndDrain();
612 }
613
614 void Heap::visitConservativeRoots(ConservativeRoots& roots)
615 {
616 GCPHASE(VisitConservativeRoots);
617 m_slotVisitor.append(roots);
618
619 if (Options::logGC() == GCLogging::Verbose)
620 dataLog("Conservative Roots:\n", m_slotVisitor);
621
622 m_slotVisitor.donateAndDrain();
623 }
624
625 void Heap::visitCompilerWorklistWeakReferences()
626 {
627 #if ENABLE(DFG_JIT)
628 for (auto worklist : m_suspendedCompilerWorklists)
629 worklist->visitWeakReferences(m_slotVisitor, m_codeBlocks);
630
631 if (Options::logGC() == GCLogging::Verbose)
632 dataLog("DFG Worklists:\n", m_slotVisitor);
633 #endif
634 }
635
636 void Heap::removeDeadCompilerWorklistEntries()
637 {
638 #if ENABLE(DFG_JIT)
639 GCPHASE(FinalizeDFGWorklists);
640 for (auto worklist : m_suspendedCompilerWorklists)
641 worklist->removeDeadPlans(*m_vm);
642 #endif
643 }
644
645 void Heap::visitProtectedObjects(HeapRootVisitor& heapRootVisitor)
646 {
647 GCPHASE(VisitProtectedObjects);
648
649 for (auto& pair : m_protectedValues)
650 heapRootVisitor.visit(&pair.key);
651
652 if (Options::logGC() == GCLogging::Verbose)
653 dataLog("Protected Objects:\n", m_slotVisitor);
654
655 m_slotVisitor.donateAndDrain();
656 }
657
658 void Heap::visitTempSortVectors(HeapRootVisitor& heapRootVisitor)
659 {
660 GCPHASE(VisitTempSortVectors);
661 typedef Vector<Vector<ValueStringPair, 0, UnsafeVectorOverflow>*> VectorOfValueStringVectors;
662
663 for (auto* vector : m_tempSortingVectors) {
664 for (auto& valueStringPair : *vector) {
665 if (valueStringPair.first)
666 heapRootVisitor.visit(&valueStringPair.first);
667 }
668 }
669
670 if (Options::logGC() == GCLogging::Verbose)
671 dataLog("Temp Sort Vectors:\n", m_slotVisitor);
672
673 m_slotVisitor.donateAndDrain();
674 }
675
676 void Heap::visitArgumentBuffers(HeapRootVisitor& visitor)
677 {
678 GCPHASE(MarkingArgumentBuffers);
679 if (!m_markListSet || !m_markListSet->size())
680 return;
681
682 MarkedArgumentBuffer::markLists(visitor, *m_markListSet);
683
684 if (Options::logGC() == GCLogging::Verbose)
685 dataLog("Argument Buffers:\n", m_slotVisitor);
686
687 m_slotVisitor.donateAndDrain();
688 }
689
690 void Heap::visitException(HeapRootVisitor& visitor)
691 {
692 GCPHASE(MarkingException);
693 if (!m_vm->exception())
694 return;
695
696 visitor.visit(m_vm->addressOfException());
697
698 if (Options::logGC() == GCLogging::Verbose)
699 dataLog("Exceptions:\n", m_slotVisitor);
700
701 m_slotVisitor.donateAndDrain();
702 }
703
704 void Heap::visitStrongHandles(HeapRootVisitor& visitor)
705 {
706 GCPHASE(VisitStrongHandles);
707 m_handleSet.visitStrongHandles(visitor);
708
709 if (Options::logGC() == GCLogging::Verbose)
710 dataLog("Strong Handles:\n", m_slotVisitor);
711
712 m_slotVisitor.donateAndDrain();
713 }
714
715 void Heap::visitHandleStack(HeapRootVisitor& visitor)
716 {
717 GCPHASE(VisitHandleStack);
718 m_handleStack.visit(visitor);
719
720 if (Options::logGC() == GCLogging::Verbose)
721 dataLog("Handle Stack:\n", m_slotVisitor);
722
723 m_slotVisitor.donateAndDrain();
724 }
725
726 void Heap::traceCodeBlocksAndJITStubRoutines()
727 {
728 GCPHASE(TraceCodeBlocksAndJITStubRoutines);
729 m_codeBlocks.traceMarked(m_slotVisitor);
730 m_jitStubRoutines.traceMarkedStubRoutines(m_slotVisitor);
731
732 if (Options::logGC() == GCLogging::Verbose)
733 dataLog("Code Blocks and JIT Stub Routines:\n", m_slotVisitor);
734
735 m_slotVisitor.donateAndDrain();
736 }
737
738 void Heap::converge()
739 {
740 #if ENABLE(PARALLEL_GC)
741 GCPHASE(Convergence);
742 m_slotVisitor.drainFromShared(SlotVisitor::MasterDrain);
743 #endif
744 }
745
746 void Heap::visitWeakHandles(HeapRootVisitor& visitor)
747 {
748 GCPHASE(VisitingLiveWeakHandles);
749 while (true) {
750 m_objectSpace.visitWeakSets(visitor);
751 harvestWeakReferences();
752 visitCompilerWorklistWeakReferences();
753 m_codeBlocks.traceMarked(m_slotVisitor); // New "executing" code blocks may be discovered.
754 if (m_slotVisitor.isEmpty())
755 break;
756
757 if (Options::logGC() == GCLogging::Verbose)
758 dataLog("Live Weak Handles:\n", m_slotVisitor);
759
760 {
761 ParallelModeEnabler enabler(m_slotVisitor);
762 m_slotVisitor.donateAndDrain();
763 #if ENABLE(PARALLEL_GC)
764 m_slotVisitor.drainFromShared(SlotVisitor::MasterDrain);
765 #endif
766 }
767 }
768 }
769
770 void Heap::clearRememberedSet(Vector<const JSCell*>& rememberedSet)
771 {
772 #if ENABLE(GGC)
773 GCPHASE(ClearRememberedSet);
774 for (auto* cell : rememberedSet) {
775 MarkedBlock::blockFor(cell)->clearRemembered(cell);
776 const_cast<JSCell*>(cell)->setRemembered(false);
777 }
778 #else
779 UNUSED_PARAM(rememberedSet);
780 #endif
781 }
782
783 void Heap::updateObjectCounts(double gcStartTime)
784 {
785 GCCOUNTER(VisitedValueCount, m_slotVisitor.visitCount());
786
787 if (Options::logGC() == GCLogging::Verbose) {
788 size_t visitCount = m_slotVisitor.visitCount();
789 #if ENABLE(PARALLEL_GC)
790 visitCount += m_sharedData.childVisitCount();
791 #endif
792 dataLogF("\nNumber of live Objects after GC %lu, took %.6f secs\n", static_cast<unsigned long>(visitCount), WTF::monotonicallyIncreasingTime() - gcStartTime);
793 }
794
795 if (m_operationInProgress == EdenCollection) {
796 m_totalBytesVisited += m_slotVisitor.bytesVisited();
797 m_totalBytesCopied += m_slotVisitor.bytesCopied();
798 } else {
799 ASSERT(m_operationInProgress == FullCollection);
800 m_totalBytesVisited = m_slotVisitor.bytesVisited();
801 m_totalBytesCopied = m_slotVisitor.bytesCopied();
802 }
803 #if ENABLE(PARALLEL_GC)
804 m_totalBytesVisited += m_sharedData.childBytesVisited();
805 m_totalBytesCopied += m_sharedData.childBytesCopied();
806 #endif
807 }
808
809 void Heap::resetVisitors()
810 {
811 m_slotVisitor.reset();
812 #if ENABLE(PARALLEL_GC)
813 m_sharedData.resetChildren();
814 #endif
815 m_sharedData.reset();
816 }
817
818 size_t Heap::objectCount()
819 {
820 return m_objectSpace.objectCount();
821 }
822
823 size_t Heap::extraSize()
824 {
825 return m_extraMemoryUsage + m_arrayBuffers.size();
826 }
827
828 size_t Heap::size()
829 {
830 return m_objectSpace.size() + m_storageSpace.size() + extraSize();
831 }
832
833 size_t Heap::capacity()
834 {
835 return m_objectSpace.capacity() + m_storageSpace.capacity() + extraSize();
836 }
837
838 size_t Heap::sizeAfterCollect()
839 {
840 // The result here may not agree with the normal Heap::size().
841 // This is due to the fact that we only count live copied bytes
842 // rather than all used (including dead) copied bytes, thus it's
843 // always the case that m_totalBytesCopied <= m_storageSpace.size().
844 ASSERT(m_totalBytesCopied <= m_storageSpace.size());
845 return m_totalBytesVisited + m_totalBytesCopied + extraSize();
846 }
847
848 size_t Heap::protectedGlobalObjectCount()
849 {
850 return forEachProtectedCell<CountIfGlobalObject>();
851 }
852
853 size_t Heap::globalObjectCount()
854 {
855 HeapIterationScope iterationScope(*this);
856 return m_objectSpace.forEachLiveCell<CountIfGlobalObject>(iterationScope);
857 }
858
859 size_t Heap::protectedObjectCount()
860 {
861 return forEachProtectedCell<Count>();
862 }
863
864 PassOwnPtr<TypeCountSet> Heap::protectedObjectTypeCounts()
865 {
866 return forEachProtectedCell<RecordType>();
867 }
868
869 PassOwnPtr<TypeCountSet> Heap::objectTypeCounts()
870 {
871 HeapIterationScope iterationScope(*this);
872 return m_objectSpace.forEachLiveCell<RecordType>(iterationScope);
873 }
874
875 void Heap::deleteAllCompiledCode()
876 {
877 // If JavaScript is running, it's not safe to delete code, since we'll end
878 // up deleting code that is live on the stack.
879 if (m_vm->entryScope)
880 return;
881
882 // If we have things on any worklist, then don't delete code. This is kind of
883 // a weird heuristic. It's definitely not safe to throw away code that is on
884 // the worklist. But this change was made in a hurry so we just avoid throwing
885 // away any code if there is any code on any worklist. I suspect that this
886 // might not actually be too dumb: if there is code on worklists then that
887 // means that we are running some hot JS code right now. Maybe causing
888 // recompilations isn't a good idea.
889 #if ENABLE(DFG_JIT)
890 for (unsigned i = DFG::numberOfWorklists(); i--;) {
891 if (DFG::Worklist* worklist = DFG::worklistForIndexOrNull(i)) {
892 if (worklist->isActiveForVM(*vm()))
893 return;
894 }
895 }
896 #endif // ENABLE(DFG_JIT)
897
898 for (ExecutableBase* current = m_compiledCode.head(); current; current = current->next()) {
899 if (!current->isFunctionExecutable())
900 continue;
901 static_cast<FunctionExecutable*>(current)->clearCodeIfNotCompiling();
902 }
903
904 ASSERT(m_operationInProgress == FullCollection || m_operationInProgress == NoOperation);
905 m_codeBlocks.clearMarksForFullCollection();
906 m_codeBlocks.deleteUnmarkedAndUnreferenced(FullCollection);
907 }
908
909 void Heap::deleteAllUnlinkedFunctionCode()
910 {
911 for (ExecutableBase* current = m_compiledCode.head(); current; current = current->next()) {
912 if (!current->isFunctionExecutable())
913 continue;
914 static_cast<FunctionExecutable*>(current)->clearUnlinkedCodeForRecompilationIfNotCompiling();
915 }
916 }
917
918 void Heap::clearUnmarkedExecutables()
919 {
920 GCPHASE(ClearUnmarkedExecutables);
921 ExecutableBase* next;
922 for (ExecutableBase* current = m_compiledCode.head(); current; current = next) {
923 next = current->next();
924 if (isMarked(current))
925 continue;
926
927 // We do this because executable memory is limited on some platforms and because
928 // CodeBlock requires eager finalization.
929 ExecutableBase::clearCodeVirtual(current);
930 m_compiledCode.remove(current);
931 }
932 }
933
934 void Heap::deleteUnmarkedCompiledCode()
935 {
936 GCPHASE(DeleteCodeBlocks);
937 clearUnmarkedExecutables();
938 m_codeBlocks.deleteUnmarkedAndUnreferenced(m_operationInProgress);
939 m_jitStubRoutines.deleteUnmarkedJettisonedStubRoutines();
940 }
941
942 void Heap::addToRememberedSet(const JSCell* cell)
943 {
944 ASSERT(cell);
945 ASSERT(!Options::enableConcurrentJIT() || !isCompilationThread());
946 if (isRemembered(cell))
947 return;
948 MarkedBlock::blockFor(cell)->setRemembered(cell);
949 const_cast<JSCell*>(cell)->setRemembered(true);
950 m_slotVisitor.unconditionallyAppend(const_cast<JSCell*>(cell));
951 }
952
953 void Heap::collectAllGarbage()
954 {
955 if (!m_isSafeToCollect)
956 return;
957
958 collect(FullCollection);
959
960 SamplingRegion samplingRegion("Garbage Collection: Sweeping");
961 DelayedReleaseScope delayedReleaseScope(m_objectSpace);
962 m_objectSpace.sweep();
963 m_objectSpace.shrink();
964 }
965
966 static double minute = 60.0;
967
968 void Heap::collect(HeapOperation collectionType)
969 {
970 #if ENABLE(ALLOCATION_LOGGING)
971 dataLogF("JSC GC starting collection.\n");
972 #endif
973
974 double before = 0;
975 if (Options::logGC()) {
976 dataLog("[GC: ");
977 before = currentTimeMS();
978 }
979
980 SamplingRegion samplingRegion("Garbage Collection");
981
982 RELEASE_ASSERT(!m_deferralDepth);
983 ASSERT(vm()->currentThreadIsHoldingAPILock());
984 RELEASE_ASSERT(vm()->atomicStringTable() == wtfThreadData().atomicStringTable());
985 ASSERT(m_isSafeToCollect);
986 JAVASCRIPTCORE_GC_BEGIN();
987 RELEASE_ASSERT(m_operationInProgress == NoOperation);
988
989 suspendCompilerThreads();
990 willStartCollection(collectionType);
991 GCPHASE(Collect);
992
993 double gcStartTime = WTF::monotonicallyIncreasingTime();
994
995 deleteOldCode(gcStartTime);
996 flushOldStructureIDTables();
997 stopAllocation();
998 flushWriteBarrierBuffer();
999
1000 markRoots(gcStartTime);
1001
1002 JAVASCRIPTCORE_GC_MARKED();
1003
1004 reapWeakHandles();
1005 sweepArrayBuffers();
1006 snapshotMarkedSpace();
1007
1008 copyBackingStores();
1009
1010 finalizeUnconditionalFinalizers();
1011 removeDeadCompilerWorklistEntries();
1012 deleteUnmarkedCompiledCode();
1013 deleteSourceProviderCaches();
1014 notifyIncrementalSweeper();
1015 rememberCurrentlyExecutingCodeBlocks();
1016
1017 resetAllocators();
1018 updateAllocationLimits();
1019 didFinishCollection(gcStartTime);
1020 resumeCompilerThreads();
1021
1022 if (Options::logGC()) {
1023 double after = currentTimeMS();
1024 dataLog(after - before, " ms]\n");
1025 }
1026 }
1027
1028 void Heap::suspendCompilerThreads()
1029 {
1030 #if ENABLE(DFG_JIT)
1031 GCPHASE(SuspendCompilerThreads);
1032 ASSERT(m_suspendedCompilerWorklists.isEmpty());
1033 for (unsigned i = DFG::numberOfWorklists(); i--;) {
1034 if (DFG::Worklist* worklist = DFG::worklistForIndexOrNull(i)) {
1035 m_suspendedCompilerWorklists.append(worklist);
1036 worklist->suspendAllThreads();
1037 }
1038 }
1039 #endif
1040 }
1041
1042 void Heap::willStartCollection(HeapOperation collectionType)
1043 {
1044 GCPHASE(StartingCollection);
1045 if (shouldDoFullCollection(collectionType)) {
1046 m_operationInProgress = FullCollection;
1047 m_slotVisitor.clearMarkStack();
1048 m_shouldDoFullCollection = false;
1049 if (Options::logGC())
1050 dataLog("FullCollection, ");
1051 } else {
1052 m_operationInProgress = EdenCollection;
1053 if (Options::logGC())
1054 dataLog("EdenCollection, ");
1055 }
1056 if (m_operationInProgress == FullCollection) {
1057 m_sizeBeforeLastFullCollect = m_sizeAfterLastCollect + m_bytesAllocatedThisCycle;
1058 m_extraMemoryUsage = 0;
1059
1060 if (m_fullActivityCallback)
1061 m_fullActivityCallback->willCollect();
1062 } else {
1063 ASSERT(m_operationInProgress == EdenCollection);
1064 m_sizeBeforeLastEdenCollect = m_sizeAfterLastCollect + m_bytesAllocatedThisCycle;
1065 }
1066
1067 if (m_edenActivityCallback)
1068 m_edenActivityCallback->willCollect();
1069 }
1070
1071 void Heap::deleteOldCode(double gcStartTime)
1072 {
1073 if (m_operationInProgress == EdenCollection)
1074 return;
1075
1076 GCPHASE(DeleteOldCode);
1077 if (gcStartTime - m_lastCodeDiscardTime > minute) {
1078 deleteAllCompiledCode();
1079 m_lastCodeDiscardTime = WTF::monotonicallyIncreasingTime();
1080 }
1081 }
1082
1083 void Heap::flushOldStructureIDTables()
1084 {
1085 GCPHASE(FlushOldStructureIDTables);
1086 m_structureIDTable.flushOldTables();
1087 }
1088
1089 void Heap::flushWriteBarrierBuffer()
1090 {
1091 GCPHASE(FlushWriteBarrierBuffer);
1092 if (m_operationInProgress == EdenCollection) {
1093 m_writeBarrierBuffer.flush(*this);
1094 return;
1095 }
1096 m_writeBarrierBuffer.reset();
1097 }
1098
1099 void Heap::stopAllocation()
1100 {
1101 GCPHASE(StopAllocation);
1102 m_objectSpace.stopAllocating();
1103 if (m_operationInProgress == FullCollection)
1104 m_storageSpace.didStartFullCollection();
1105 }
1106
1107 void Heap::reapWeakHandles()
1108 {
1109 GCPHASE(ReapingWeakHandles);
1110 m_objectSpace.reapWeakSets();
1111 }
1112
1113 void Heap::sweepArrayBuffers()
1114 {
1115 GCPHASE(SweepingArrayBuffers);
1116 m_arrayBuffers.sweep();
1117 }
1118
1119 struct MarkedBlockSnapshotFunctor : public MarkedBlock::VoidFunctor {
1120 MarkedBlockSnapshotFunctor(Vector<MarkedBlock*>& blocks)
1121 : m_index(0)
1122 , m_blocks(blocks)
1123 {
1124 }
1125
1126 void operator()(MarkedBlock* block) { m_blocks[m_index++] = block; }
1127
1128 size_t m_index;
1129 Vector<MarkedBlock*>& m_blocks;
1130 };
1131
1132 void Heap::snapshotMarkedSpace()
1133 {
1134 GCPHASE(SnapshotMarkedSpace);
1135 if (m_operationInProgress != FullCollection)
1136 return;
1137
1138 m_blockSnapshot.resize(m_objectSpace.blocks().set().size());
1139 MarkedBlockSnapshotFunctor functor(m_blockSnapshot);
1140 m_objectSpace.forEachBlock(functor);
1141 }
1142
1143 void Heap::deleteSourceProviderCaches()
1144 {
1145 GCPHASE(DeleteSourceProviderCaches);
1146 m_vm->clearSourceProviderCaches();
1147 }
1148
1149 void Heap::notifyIncrementalSweeper()
1150 {
1151 GCPHASE(NotifyIncrementalSweeper);
1152 if (m_operationInProgress != FullCollection)
1153 return;
1154 m_sweeper->startSweeping(m_blockSnapshot);
1155 }
1156
1157 void Heap::rememberCurrentlyExecutingCodeBlocks()
1158 {
1159 GCPHASE(RememberCurrentlyExecutingCodeBlocks);
1160 m_codeBlocks.rememberCurrentlyExecutingCodeBlocks(this);
1161 }
1162
1163 void Heap::resetAllocators()
1164 {
1165 GCPHASE(ResetAllocators);
1166 m_objectSpace.resetAllocators();
1167 }
1168
1169 void Heap::updateAllocationLimits()
1170 {
1171 GCPHASE(UpdateAllocationLimits);
1172 size_t currentHeapSize = sizeAfterCollect();
1173 if (Options::gcMaxHeapSize() && currentHeapSize > Options::gcMaxHeapSize())
1174 HeapStatistics::exitWithFailure();
1175
1176 if (m_operationInProgress == FullCollection) {
1177 // To avoid pathological GC churn in very small and very large heaps, we set
1178 // the new allocation limit based on the current size of the heap, with a
1179 // fixed minimum.
1180 m_maxHeapSize = max(minHeapSize(m_heapType, m_ramSize), proportionalHeapSize(currentHeapSize, m_ramSize));
1181 m_maxEdenSize = m_maxHeapSize - currentHeapSize;
1182 m_sizeAfterLastFullCollect = currentHeapSize;
1183 m_bytesAbandonedSinceLastFullCollect = 0;
1184 } else {
1185 ASSERT(currentHeapSize >= m_sizeAfterLastCollect);
1186 m_maxEdenSize = m_maxHeapSize - currentHeapSize;
1187 m_sizeAfterLastEdenCollect = currentHeapSize;
1188 double edenToOldGenerationRatio = (double)m_maxEdenSize / (double)m_maxHeapSize;
1189 double minEdenToOldGenerationRatio = 1.0 / 3.0;
1190 if (edenToOldGenerationRatio < minEdenToOldGenerationRatio)
1191 m_shouldDoFullCollection = true;
1192 m_maxHeapSize += currentHeapSize - m_sizeAfterLastCollect;
1193 m_maxEdenSize = m_maxHeapSize - currentHeapSize;
1194 if (m_fullActivityCallback) {
1195 ASSERT(currentHeapSize >= m_sizeAfterLastFullCollect);
1196 m_fullActivityCallback->didAllocate(currentHeapSize - m_sizeAfterLastFullCollect);
1197 }
1198 }
1199
1200 m_sizeAfterLastCollect = currentHeapSize;
1201 m_bytesAllocatedThisCycle = 0;
1202
1203 if (Options::logGC())
1204 dataLog(currentHeapSize / 1024, " kb, ");
1205 }
1206
1207 void Heap::didFinishCollection(double gcStartTime)
1208 {
1209 GCPHASE(FinishingCollection);
1210 double gcEndTime = WTF::monotonicallyIncreasingTime();
1211 if (m_operationInProgress == FullCollection)
1212 m_lastFullGCLength = gcEndTime - gcStartTime;
1213 else
1214 m_lastEdenGCLength = gcEndTime - gcStartTime;
1215
1216 if (Options::recordGCPauseTimes())
1217 HeapStatistics::recordGCPauseTime(gcStartTime, gcEndTime);
1218 RELEASE_ASSERT(m_operationInProgress == EdenCollection || m_operationInProgress == FullCollection);
1219
1220 m_operationInProgress = NoOperation;
1221 JAVASCRIPTCORE_GC_END();
1222
1223 if (Options::useZombieMode())
1224 zombifyDeadObjects();
1225
1226 if (Options::objectsAreImmortal())
1227 markDeadObjects();
1228
1229 if (Options::showObjectStatistics())
1230 HeapStatistics::showObjectStatistics(this);
1231
1232 if (Options::logGC() == GCLogging::Verbose)
1233 GCLogging::dumpObjectGraph(this);
1234 }
1235
1236 void Heap::resumeCompilerThreads()
1237 {
1238 #if ENABLE(DFG_JIT)
1239 GCPHASE(ResumeCompilerThreads);
1240 for (auto worklist : m_suspendedCompilerWorklists)
1241 worklist->resumeAllThreads();
1242 m_suspendedCompilerWorklists.clear();
1243 #endif
1244 }
1245
1246 void Heap::markDeadObjects()
1247 {
1248 HeapIterationScope iterationScope(*this);
1249 m_objectSpace.forEachDeadCell<MarkObject>(iterationScope);
1250 }
1251
1252 void Heap::setFullActivityCallback(PassRefPtr<FullGCActivityCallback> activityCallback)
1253 {
1254 m_fullActivityCallback = activityCallback;
1255 }
1256
1257 void Heap::setEdenActivityCallback(PassRefPtr<EdenGCActivityCallback> activityCallback)
1258 {
1259 m_edenActivityCallback = activityCallback;
1260 }
1261
1262 GCActivityCallback* Heap::fullActivityCallback()
1263 {
1264 return m_fullActivityCallback.get();
1265 }
1266
1267 GCActivityCallback* Heap::edenActivityCallback()
1268 {
1269 return m_edenActivityCallback.get();
1270 }
1271
1272 void Heap::setIncrementalSweeper(PassOwnPtr<IncrementalSweeper> sweeper)
1273 {
1274 m_sweeper = sweeper;
1275 }
1276
1277 IncrementalSweeper* Heap::sweeper()
1278 {
1279 return m_sweeper.get();
1280 }
1281
1282 void Heap::setGarbageCollectionTimerEnabled(bool enable)
1283 {
1284 if (m_fullActivityCallback)
1285 m_fullActivityCallback->setEnabled(enable);
1286 if (m_edenActivityCallback)
1287 m_edenActivityCallback->setEnabled(enable);
1288 }
1289
1290 void Heap::didAllocate(size_t bytes)
1291 {
1292 if (m_edenActivityCallback)
1293 m_edenActivityCallback->didAllocate(m_bytesAllocatedThisCycle + m_bytesAbandonedSinceLastFullCollect);
1294 m_bytesAllocatedThisCycle += bytes;
1295 }
1296
1297 bool Heap::isValidAllocation(size_t)
1298 {
1299 if (!isValidThreadState(m_vm))
1300 return false;
1301
1302 if (m_operationInProgress != NoOperation)
1303 return false;
1304
1305 return true;
1306 }
1307
1308 void Heap::addFinalizer(JSCell* cell, Finalizer finalizer)
1309 {
1310 WeakSet::allocate(cell, &m_finalizerOwner, reinterpret_cast<void*>(finalizer)); // Balanced by FinalizerOwner::finalize().
1311 }
1312
1313 void Heap::FinalizerOwner::finalize(Handle<Unknown> handle, void* context)
1314 {
1315 HandleSlot slot = handle.slot();
1316 Finalizer finalizer = reinterpret_cast<Finalizer>(context);
1317 finalizer(slot->asCell());
1318 WeakSet::deallocate(WeakImpl::asWeakImpl(slot));
1319 }
1320
1321 void Heap::addCompiledCode(ExecutableBase* executable)
1322 {
1323 m_compiledCode.append(executable);
1324 }
1325
1326 class Zombify : public MarkedBlock::VoidFunctor {
1327 public:
1328 void operator()(JSCell* cell)
1329 {
1330 void** current = reinterpret_cast<void**>(cell);
1331
1332 // We want to maintain zapped-ness because that's how we know if we've called
1333 // the destructor.
1334 if (cell->isZapped())
1335 current++;
1336
1337 void* limit = static_cast<void*>(reinterpret_cast<char*>(cell) + MarkedBlock::blockFor(cell)->cellSize());
1338 for (; current < limit; current++)
1339 *current = zombifiedBits;
1340 }
1341 };
1342
1343 void Heap::zombifyDeadObjects()
1344 {
1345 // Sweep now because destructors will crash once we're zombified.
1346 {
1347 SamplingRegion samplingRegion("Garbage Collection: Sweeping");
1348 DelayedReleaseScope delayedReleaseScope(m_objectSpace);
1349 m_objectSpace.zombifySweep();
1350 }
1351 HeapIterationScope iterationScope(*this);
1352 m_objectSpace.forEachDeadCell<Zombify>(iterationScope);
1353 }
1354
1355 void Heap::flushWriteBarrierBuffer(JSCell* cell)
1356 {
1357 #if ENABLE(GGC)
1358 m_writeBarrierBuffer.flush(*this);
1359 m_writeBarrierBuffer.add(cell);
1360 #else
1361 UNUSED_PARAM(cell);
1362 #endif
1363 }
1364
1365 bool Heap::shouldDoFullCollection(HeapOperation requestedCollectionType) const
1366 {
1367 #if ENABLE(GGC)
1368 if (Options::alwaysDoFullCollection())
1369 return true;
1370
1371 switch (requestedCollectionType) {
1372 case EdenCollection:
1373 return false;
1374 case FullCollection:
1375 return true;
1376 case AnyCollection:
1377 return m_shouldDoFullCollection;
1378 default:
1379 RELEASE_ASSERT_NOT_REACHED();
1380 return false;
1381 }
1382 RELEASE_ASSERT_NOT_REACHED();
1383 return false;
1384 #else
1385 UNUSED_PARAM(requestedCollectionType);
1386 return true;
1387 #endif
1388 }
1389
1390 } // namespace JSC