]> git.saurik.com Git - apple/javascriptcore.git/blob - heap/Heap.cpp
JavaScriptCore-1097.13.tar.gz
[apple/javascriptcore.git] / heap / Heap.cpp
1 /*
2 * Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2011 Apple Inc. All rights reserved.
3 * Copyright (C) 2007 Eric Seidel <eric@webkit.org>
4 *
5 * This library is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Lesser General Public
7 * License as published by the Free Software Foundation; either
8 * version 2 of the License, or (at your option) any later version.
9 *
10 * This library is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Lesser General Public License for more details.
14 *
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this library; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
18 *
19 */
20
21 #include "config.h"
22 #include "Heap.h"
23
24 #include "CopiedSpace.h"
25 #include "CopiedSpaceInlineMethods.h"
26 #include "CodeBlock.h"
27 #include "ConservativeRoots.h"
28 #include "GCActivityCallback.h"
29 #include "HeapRootVisitor.h"
30 #include "Interpreter.h"
31 #include "JSGlobalData.h"
32 #include "JSGlobalObject.h"
33 #include "JSLock.h"
34 #include "JSONObject.h"
35 #include "Tracing.h"
36 #include "WeakSetInlines.h"
37 #include <algorithm>
38 #include <wtf/CurrentTime.h>
39
40
41 using namespace std;
42 using namespace JSC;
43
44 namespace JSC {
45
46 namespace {
47
48 #if CPU(X86) || CPU(X86_64)
49 static const size_t largeHeapSize = 16 * 1024 * 1024;
50 #else
51 static const size_t largeHeapSize = 8 * 1024 * 1024;
52 #endif
53 static const size_t smallHeapSize = 512 * 1024;
54
55 #if ENABLE(GC_LOGGING)
56 #if COMPILER(CLANG)
57 #define DEFINE_GC_LOGGING_GLOBAL(type, name, arguments) \
58 _Pragma("clang diagnostic push") \
59 _Pragma("clang diagnostic ignored \"-Wglobal-constructors\"") \
60 _Pragma("clang diagnostic ignored \"-Wexit-time-destructors\"") \
61 static type name arguments; \
62 _Pragma("clang diagnostic pop")
63 #else
64 #define DEFINE_GC_LOGGING_GLOBAL(type, name, arguments) \
65 static type name arguments;
66 #endif // COMPILER(CLANG)
67
68 struct GCTimer {
69 GCTimer(const char* name)
70 : m_time(0)
71 , m_min(100000000)
72 , m_max(0)
73 , m_count(0)
74 , m_name(name)
75 {
76 }
77 ~GCTimer()
78 {
79 dataLog("%s: %.2lfms (avg. %.2lf, min. %.2lf, max. %.2lf)\n", m_name, m_time * 1000, m_time * 1000 / m_count, m_min*1000, m_max*1000);
80 }
81 double m_time;
82 double m_min;
83 double m_max;
84 size_t m_count;
85 const char* m_name;
86 };
87
88 struct GCTimerScope {
89 GCTimerScope(GCTimer* timer)
90 : m_timer(timer)
91 , m_start(WTF::currentTime())
92 {
93 }
94 ~GCTimerScope()
95 {
96 double delta = WTF::currentTime() - m_start;
97 if (delta < m_timer->m_min)
98 m_timer->m_min = delta;
99 if (delta > m_timer->m_max)
100 m_timer->m_max = delta;
101 m_timer->m_count++;
102 m_timer->m_time += delta;
103 }
104 GCTimer* m_timer;
105 double m_start;
106 };
107
108 struct GCCounter {
109 GCCounter(const char* name)
110 : m_name(name)
111 , m_count(0)
112 , m_total(0)
113 , m_min(10000000)
114 , m_max(0)
115 {
116 }
117
118 void count(size_t amount)
119 {
120 m_count++;
121 m_total += amount;
122 if (amount < m_min)
123 m_min = amount;
124 if (amount > m_max)
125 m_max = amount;
126 }
127 ~GCCounter()
128 {
129 dataLog("%s: %zu values (avg. %zu, min. %zu, max. %zu)\n", m_name, m_total, m_total / m_count, m_min, m_max);
130 }
131 const char* m_name;
132 size_t m_count;
133 size_t m_total;
134 size_t m_min;
135 size_t m_max;
136 };
137
138 #define GCPHASE(name) DEFINE_GC_LOGGING_GLOBAL(GCTimer, name##Timer, (#name)); GCTimerScope name##TimerScope(&name##Timer)
139 #define COND_GCPHASE(cond, name1, name2) DEFINE_GC_LOGGING_GLOBAL(GCTimer, name1##Timer, (#name1)); DEFINE_GC_LOGGING_GLOBAL(GCTimer, name2##Timer, (#name2)); GCTimerScope name1##CondTimerScope(cond ? &name1##Timer : &name2##Timer)
140 #define GCCOUNTER(name, value) do { DEFINE_GC_LOGGING_GLOBAL(GCCounter, name##Counter, (#name)); name##Counter.count(value); } while (false)
141
142 #else
143
144 #define GCPHASE(name) do { } while (false)
145 #define COND_GCPHASE(cond, name1, name2) do { } while (false)
146 #define GCCOUNTER(name, value) do { } while (false)
147 #endif
148
149 static size_t heapSizeForHint(HeapSize heapSize)
150 {
151 if (heapSize == LargeHeap)
152 return largeHeapSize;
153 ASSERT(heapSize == SmallHeap);
154 return smallHeapSize;
155 }
156
157 static inline bool isValidSharedInstanceThreadState(JSGlobalData* globalData)
158 {
159 return globalData->apiLock().currentThreadIsHoldingLock();
160 }
161
162 static inline bool isValidThreadState(JSGlobalData* globalData)
163 {
164 if (globalData->identifierTable != wtfThreadData().currentIdentifierTable())
165 return false;
166
167 if (globalData->isSharedInstance() && !isValidSharedInstanceThreadState(globalData))
168 return false;
169
170 return true;
171 }
172
173 class CountFunctor {
174 public:
175 typedef size_t ReturnType;
176
177 CountFunctor();
178 void count(size_t);
179 ReturnType returnValue();
180
181 private:
182 ReturnType m_count;
183 };
184
185 inline CountFunctor::CountFunctor()
186 : m_count(0)
187 {
188 }
189
190 inline void CountFunctor::count(size_t count)
191 {
192 m_count += count;
193 }
194
195 inline CountFunctor::ReturnType CountFunctor::returnValue()
196 {
197 return m_count;
198 }
199
200 struct ClearMarks : MarkedBlock::VoidFunctor {
201 void operator()(MarkedBlock*);
202 };
203
204 inline void ClearMarks::operator()(MarkedBlock* block)
205 {
206 block->clearMarks();
207 }
208
209 struct Sweep : MarkedBlock::VoidFunctor {
210 void operator()(MarkedBlock*);
211 };
212
213 inline void Sweep::operator()(MarkedBlock* block)
214 {
215 block->sweep();
216 }
217
218 struct MarkCount : CountFunctor {
219 void operator()(MarkedBlock*);
220 };
221
222 inline void MarkCount::operator()(MarkedBlock* block)
223 {
224 count(block->markCount());
225 }
226
227 struct Size : CountFunctor {
228 void operator()(MarkedBlock*);
229 };
230
231 inline void Size::operator()(MarkedBlock* block)
232 {
233 count(block->markCount() * block->cellSize());
234 }
235
236 struct Capacity : CountFunctor {
237 void operator()(MarkedBlock*);
238 };
239
240 inline void Capacity::operator()(MarkedBlock* block)
241 {
242 count(block->capacity());
243 }
244
245 struct Count : public CountFunctor {
246 void operator()(JSCell*);
247 };
248
249 inline void Count::operator()(JSCell*)
250 {
251 count(1);
252 }
253
254 struct CountIfGlobalObject : CountFunctor {
255 void operator()(JSCell*);
256 };
257
258 inline void CountIfGlobalObject::operator()(JSCell* cell)
259 {
260 if (!cell->isObject())
261 return;
262 if (!asObject(cell)->isGlobalObject())
263 return;
264 count(1);
265 }
266
267 class RecordType {
268 public:
269 typedef PassOwnPtr<TypeCountSet> ReturnType;
270
271 RecordType();
272 void operator()(JSCell*);
273 ReturnType returnValue();
274
275 private:
276 const char* typeName(JSCell*);
277 OwnPtr<TypeCountSet> m_typeCountSet;
278 };
279
280 inline RecordType::RecordType()
281 : m_typeCountSet(adoptPtr(new TypeCountSet))
282 {
283 }
284
285 inline const char* RecordType::typeName(JSCell* cell)
286 {
287 const ClassInfo* info = cell->classInfo();
288 if (!info || !info->className)
289 return "[unknown]";
290 return info->className;
291 }
292
293 inline void RecordType::operator()(JSCell* cell)
294 {
295 m_typeCountSet->add(typeName(cell));
296 }
297
298 inline PassOwnPtr<TypeCountSet> RecordType::returnValue()
299 {
300 return m_typeCountSet.release();
301 }
302
303 } // anonymous namespace
304
305 Heap::Heap(JSGlobalData* globalData, HeapSize heapSize)
306 : m_heapSize(heapSize)
307 , m_minBytesPerCycle(heapSizeForHint(heapSize))
308 , m_sizeAfterLastCollect(0)
309 , m_bytesAllocatedLimit(m_minBytesPerCycle)
310 , m_bytesAllocated(0)
311 , m_bytesAbandoned(0)
312 , m_operationInProgress(NoOperation)
313 , m_objectSpace(this)
314 , m_storageSpace(this)
315 , m_markListSet(0)
316 , m_machineThreads(this)
317 , m_sharedData(globalData)
318 , m_slotVisitor(m_sharedData)
319 , m_weakSet(this)
320 , m_handleSet(globalData)
321 , m_isSafeToCollect(false)
322 , m_globalData(globalData)
323 , m_lastGCLength(0)
324 , m_lastCodeDiscardTime(WTF::currentTime())
325 , m_activityCallback(DefaultGCActivityCallback::create(this))
326 {
327 m_storageSpace.init();
328 }
329
330 Heap::~Heap()
331 {
332 delete m_markListSet;
333
334 m_objectSpace.shrink();
335 m_storageSpace.freeAllBlocks();
336
337 ASSERT(!size());
338 ASSERT(!capacity());
339 }
340
341 bool Heap::isPagedOut(double deadline)
342 {
343 return m_objectSpace.isPagedOut(deadline) || m_storageSpace.isPagedOut(deadline);
344 }
345
346 // The JSGlobalData is being destroyed and the collector will never run again.
347 // Run all pending finalizers now because we won't get another chance.
348 void Heap::lastChanceToFinalize()
349 {
350 ASSERT(!m_globalData->dynamicGlobalObject);
351 ASSERT(m_operationInProgress == NoOperation);
352
353 // FIXME: Make this a release-mode crash once we're sure no one's doing this.
354 if (size_t size = m_protectedValues.size())
355 WTFLogAlways("ERROR: JavaScriptCore heap deallocated while %ld values were still protected", static_cast<unsigned long>(size));
356
357 m_weakSet.finalizeAll();
358 canonicalizeCellLivenessData();
359 clearMarks();
360 sweep();
361 m_globalData->smallStrings.finalizeSmallStrings();
362
363 #if ENABLE(SIMPLE_HEAP_PROFILING)
364 m_slotVisitor.m_visitedTypeCounts.dump(WTF::dataFile(), "Visited Type Counts");
365 m_destroyedTypeCounts.dump(WTF::dataFile(), "Destroyed Type Counts");
366 #endif
367 }
368
369 void Heap::reportExtraMemoryCostSlowCase(size_t cost)
370 {
371 // Our frequency of garbage collection tries to balance memory use against speed
372 // by collecting based on the number of newly created values. However, for values
373 // that hold on to a great deal of memory that's not in the form of other JS values,
374 // that is not good enough - in some cases a lot of those objects can pile up and
375 // use crazy amounts of memory without a GC happening. So we track these extra
376 // memory costs. Only unusually large objects are noted, and we only keep track
377 // of this extra cost until the next GC. In garbage collected languages, most values
378 // are either very short lived temporaries, or have extremely long lifetimes. So
379 // if a large value survives one garbage collection, there is not much point to
380 // collecting more frequently as long as it stays alive.
381
382 didAllocate(cost);
383 if (shouldCollect())
384 collect(DoNotSweep);
385 }
386
387 void Heap::reportAbandonedObjectGraph()
388 {
389 // Our clients don't know exactly how much memory they
390 // are abandoning so we just guess for them.
391 double abandonedBytes = 0.10 * m_sizeAfterLastCollect;
392
393 // We want to accelerate the next collection. Because memory has just
394 // been abandoned, the next collection has the potential to
395 // be more profitable. Since allocation is the trigger for collection,
396 // we hasten the next collection by pretending that we've allocated more memory.
397 didAbandon(abandonedBytes);
398 }
399
400 void Heap::didAbandon(size_t bytes)
401 {
402 if (m_activityCallback)
403 m_activityCallback->didAllocate(m_bytesAllocated + m_bytesAbandoned);
404 m_bytesAbandoned += bytes;
405 }
406
407 void Heap::protect(JSValue k)
408 {
409 ASSERT(k);
410 ASSERT(m_globalData->apiLock().currentThreadIsHoldingLock());
411
412 if (!k.isCell())
413 return;
414
415 m_protectedValues.add(k.asCell());
416 }
417
418 bool Heap::unprotect(JSValue k)
419 {
420 ASSERT(k);
421 ASSERT(m_globalData->apiLock().currentThreadIsHoldingLock());
422
423 if (!k.isCell())
424 return false;
425
426 return m_protectedValues.remove(k.asCell());
427 }
428
429 void Heap::jettisonDFGCodeBlock(PassOwnPtr<CodeBlock> codeBlock)
430 {
431 m_dfgCodeBlocks.jettison(codeBlock);
432 }
433
434 void Heap::markProtectedObjects(HeapRootVisitor& heapRootVisitor)
435 {
436 ProtectCountSet::iterator end = m_protectedValues.end();
437 for (ProtectCountSet::iterator it = m_protectedValues.begin(); it != end; ++it)
438 heapRootVisitor.visit(&it->first);
439 }
440
441 void Heap::pushTempSortVector(Vector<ValueStringPair>* tempVector)
442 {
443 m_tempSortingVectors.append(tempVector);
444 }
445
446 void Heap::popTempSortVector(Vector<ValueStringPair>* tempVector)
447 {
448 ASSERT_UNUSED(tempVector, tempVector == m_tempSortingVectors.last());
449 m_tempSortingVectors.removeLast();
450 }
451
452 void Heap::markTempSortVectors(HeapRootVisitor& heapRootVisitor)
453 {
454 typedef Vector<Vector<ValueStringPair>* > VectorOfValueStringVectors;
455
456 VectorOfValueStringVectors::iterator end = m_tempSortingVectors.end();
457 for (VectorOfValueStringVectors::iterator it = m_tempSortingVectors.begin(); it != end; ++it) {
458 Vector<ValueStringPair>* tempSortingVector = *it;
459
460 Vector<ValueStringPair>::iterator vectorEnd = tempSortingVector->end();
461 for (Vector<ValueStringPair>::iterator vectorIt = tempSortingVector->begin(); vectorIt != vectorEnd; ++vectorIt) {
462 if (vectorIt->first)
463 heapRootVisitor.visit(&vectorIt->first);
464 }
465 }
466 }
467
468 void Heap::harvestWeakReferences()
469 {
470 m_slotVisitor.harvestWeakReferences();
471 }
472
473 void Heap::finalizeUnconditionalFinalizers()
474 {
475 m_slotVisitor.finalizeUnconditionalFinalizers();
476 }
477
478 inline RegisterFile& Heap::registerFile()
479 {
480 return m_globalData->interpreter->registerFile();
481 }
482
483 void Heap::getConservativeRegisterRoots(HashSet<JSCell*>& roots)
484 {
485 ASSERT(isValidThreadState(m_globalData));
486 ConservativeRoots registerFileRoots(&m_objectSpace.blocks(), &m_storageSpace);
487 registerFile().gatherConservativeRoots(registerFileRoots);
488 size_t registerFileRootCount = registerFileRoots.size();
489 JSCell** registerRoots = registerFileRoots.roots();
490 for (size_t i = 0; i < registerFileRootCount; i++) {
491 setMarked(registerRoots[i]);
492 roots.add(registerRoots[i]);
493 }
494 }
495
496 void Heap::markRoots(bool fullGC)
497 {
498 SamplingRegion samplingRegion("Garbage Collection: Tracing");
499
500 COND_GCPHASE(fullGC, MarkFullRoots, MarkYoungRoots);
501 UNUSED_PARAM(fullGC);
502 ASSERT(isValidThreadState(m_globalData));
503
504 void* dummy;
505
506 // We gather conservative roots before clearing mark bits because conservative
507 // gathering uses the mark bits to determine whether a reference is valid.
508 ConservativeRoots machineThreadRoots(&m_objectSpace.blocks(), &m_storageSpace);
509 {
510 GCPHASE(GatherConservativeRoots);
511 m_machineThreads.gatherConservativeRoots(machineThreadRoots, &dummy);
512 }
513
514 ConservativeRoots registerFileRoots(&m_objectSpace.blocks(), &m_storageSpace);
515 m_dfgCodeBlocks.clearMarks();
516 {
517 GCPHASE(GatherRegisterFileRoots);
518 registerFile().gatherConservativeRoots(registerFileRoots, m_dfgCodeBlocks);
519 }
520
521 #if ENABLE(DFG_JIT)
522 ConservativeRoots scratchBufferRoots(&m_objectSpace.blocks(), &m_storageSpace);
523 {
524 GCPHASE(GatherScratchBufferRoots);
525 m_globalData->gatherConservativeRoots(scratchBufferRoots);
526 }
527 #endif
528
529 #if ENABLE(GGC)
530 MarkedBlock::DirtyCellVector dirtyCells;
531 if (!fullGC) {
532 GCPHASE(GatheringDirtyCells);
533 m_objectSpace.gatherDirtyCells(dirtyCells);
534 } else
535 #endif
536 {
537 GCPHASE(clearMarks);
538 clearMarks();
539 }
540
541 m_storageSpace.startedCopying();
542 SlotVisitor& visitor = m_slotVisitor;
543 HeapRootVisitor heapRootVisitor(visitor);
544
545 {
546 ParallelModeEnabler enabler(visitor);
547 #if ENABLE(GGC)
548 {
549 size_t dirtyCellCount = dirtyCells.size();
550 GCPHASE(VisitDirtyCells);
551 GCCOUNTER(DirtyCellCount, dirtyCellCount);
552 for (size_t i = 0; i < dirtyCellCount; i++) {
553 heapRootVisitor.visitChildren(dirtyCells[i]);
554 visitor.donateAndDrain();
555 }
556 }
557 #endif
558
559 if (m_globalData->codeBlocksBeingCompiled.size()) {
560 GCPHASE(VisitActiveCodeBlock);
561 for (size_t i = 0; i < m_globalData->codeBlocksBeingCompiled.size(); i++)
562 m_globalData->codeBlocksBeingCompiled[i]->visitAggregate(visitor);
563 }
564
565 {
566 GCPHASE(VisitMachineRoots);
567 visitor.append(machineThreadRoots);
568 visitor.donateAndDrain();
569 }
570 {
571 GCPHASE(VisitRegisterFileRoots);
572 visitor.append(registerFileRoots);
573 visitor.donateAndDrain();
574 }
575 #if ENABLE(DFG_JIT)
576 {
577 GCPHASE(VisitScratchBufferRoots);
578 visitor.append(scratchBufferRoots);
579 visitor.donateAndDrain();
580 }
581 #endif
582 {
583 GCPHASE(VisitProtectedObjects);
584 markProtectedObjects(heapRootVisitor);
585 visitor.donateAndDrain();
586 }
587 {
588 GCPHASE(VisitTempSortVectors);
589 markTempSortVectors(heapRootVisitor);
590 visitor.donateAndDrain();
591 }
592
593 {
594 GCPHASE(MarkingArgumentBuffers);
595 if (m_markListSet && m_markListSet->size()) {
596 MarkedArgumentBuffer::markLists(heapRootVisitor, *m_markListSet);
597 visitor.donateAndDrain();
598 }
599 }
600 if (m_globalData->exception) {
601 GCPHASE(MarkingException);
602 heapRootVisitor.visit(&m_globalData->exception);
603 visitor.donateAndDrain();
604 }
605
606 {
607 GCPHASE(VisitStrongHandles);
608 m_handleSet.visitStrongHandles(heapRootVisitor);
609 visitor.donateAndDrain();
610 }
611
612 {
613 GCPHASE(HandleStack);
614 m_handleStack.visit(heapRootVisitor);
615 visitor.donateAndDrain();
616 }
617
618 {
619 GCPHASE(TraceCodeBlocks);
620 m_dfgCodeBlocks.traceMarkedCodeBlocks(visitor);
621 visitor.donateAndDrain();
622 }
623
624 #if ENABLE(PARALLEL_GC)
625 {
626 GCPHASE(Convergence);
627 visitor.drainFromShared(SlotVisitor::MasterDrain);
628 }
629 #endif
630 }
631
632 // Weak references must be marked last because their liveness depends on
633 // the liveness of the rest of the object graph.
634 {
635 GCPHASE(VisitingLiveWeakHandles);
636 while (true) {
637 m_weakSet.visitLiveWeakImpls(heapRootVisitor);
638 harvestWeakReferences();
639 if (visitor.isEmpty())
640 break;
641 {
642 ParallelModeEnabler enabler(visitor);
643 visitor.donateAndDrain();
644 #if ENABLE(PARALLEL_GC)
645 visitor.drainFromShared(SlotVisitor::MasterDrain);
646 #endif
647 }
648 }
649 }
650
651 {
652 GCPHASE(VisitingDeadWeakHandles);
653 m_weakSet.visitDeadWeakImpls(heapRootVisitor);
654 }
655
656 GCCOUNTER(VisitedValueCount, visitor.visitCount());
657
658 visitor.doneCopying();
659 visitor.reset();
660 m_sharedData.reset();
661 m_storageSpace.doneCopying();
662
663 }
664
665 void Heap::clearMarks()
666 {
667 m_objectSpace.forEachBlock<ClearMarks>();
668 }
669
670 void Heap::sweep()
671 {
672 m_objectSpace.forEachBlock<Sweep>();
673 }
674
675 size_t Heap::objectCount()
676 {
677 return m_objectSpace.forEachBlock<MarkCount>();
678 }
679
680 size_t Heap::size()
681 {
682 return m_objectSpace.forEachBlock<Size>() + m_storageSpace.size();
683 }
684
685 size_t Heap::capacity()
686 {
687 return m_objectSpace.forEachBlock<Capacity>() + m_storageSpace.capacity();
688 }
689
690 size_t Heap::protectedGlobalObjectCount()
691 {
692 return forEachProtectedCell<CountIfGlobalObject>();
693 }
694
695 size_t Heap::globalObjectCount()
696 {
697 return m_objectSpace.forEachCell<CountIfGlobalObject>();
698 }
699
700 size_t Heap::protectedObjectCount()
701 {
702 return forEachProtectedCell<Count>();
703 }
704
705 PassOwnPtr<TypeCountSet> Heap::protectedObjectTypeCounts()
706 {
707 return forEachProtectedCell<RecordType>();
708 }
709
710 PassOwnPtr<TypeCountSet> Heap::objectTypeCounts()
711 {
712 return m_objectSpace.forEachCell<RecordType>();
713 }
714
715 void Heap::discardAllCompiledCode()
716 {
717 // If JavaScript is running, it's not safe to recompile, since we'll end
718 // up throwing away code that is live on the stack.
719 if (m_globalData->dynamicGlobalObject)
720 return;
721
722 for (FunctionExecutable* current = m_functions.head(); current; current = current->next())
723 current->discardCode();
724 }
725
726 void Heap::collectAllGarbage()
727 {
728 if (!m_isSafeToCollect)
729 return;
730
731 collect(DoSweep);
732 }
733
734 static double minute = 60.0;
735
736 void Heap::collect(SweepToggle sweepToggle)
737 {
738 SamplingRegion samplingRegion("Garbage Collection");
739
740 GCPHASE(Collect);
741 ASSERT(globalData()->apiLock().currentThreadIsHoldingLock());
742 ASSERT(globalData()->identifierTable == wtfThreadData().currentIdentifierTable());
743 ASSERT(m_isSafeToCollect);
744 JAVASCRIPTCORE_GC_BEGIN();
745 if (m_operationInProgress != NoOperation)
746 CRASH();
747 m_operationInProgress = Collection;
748
749 if (m_activityCallback)
750 m_activityCallback->willCollect();
751
752 double lastGCStartTime = WTF::currentTime();
753 if (lastGCStartTime - m_lastCodeDiscardTime > minute) {
754 discardAllCompiledCode();
755 m_lastCodeDiscardTime = WTF::currentTime();
756 }
757
758 #if ENABLE(GGC)
759 bool fullGC = sweepToggle == DoSweep;
760 if (!fullGC)
761 fullGC = (capacity() > 4 * m_sizeAfterLastCollect);
762 #else
763 bool fullGC = true;
764 #endif
765 {
766 GCPHASE(Canonicalize);
767 canonicalizeCellLivenessData();
768 }
769
770 markRoots(fullGC);
771
772 {
773 GCPHASE(FinalizeUnconditionalFinalizers);
774 finalizeUnconditionalFinalizers();
775 }
776
777 {
778 GCPHASE(FinalizeWeakHandles);
779 m_weakSet.sweep();
780 m_globalData->smallStrings.finalizeSmallStrings();
781 }
782
783 JAVASCRIPTCORE_GC_MARKED();
784
785 {
786 GCPHASE(ResetAllocator);
787 resetAllocators();
788 }
789
790 {
791 GCPHASE(DeleteCodeBlocks);
792 m_dfgCodeBlocks.deleteUnmarkedJettisonedCodeBlocks();
793 }
794
795 if (sweepToggle == DoSweep) {
796 SamplingRegion samplingRegion("Garbage Collection: Sweeping");
797 GCPHASE(Sweeping);
798 sweep();
799 m_objectSpace.shrink();
800 m_weakSet.shrink();
801 m_bytesAbandoned = 0;
802 }
803
804 // To avoid pathological GC churn in large heaps, we set the new allocation
805 // limit to be the current size of the heap. This heuristic
806 // is a bit arbitrary. Using the current size of the heap after this
807 // collection gives us a 2X multiplier, which is a 1:1 (heap size :
808 // new bytes allocated) proportion, and seems to work well in benchmarks.
809 size_t newSize = size();
810 if (fullGC) {
811 m_sizeAfterLastCollect = newSize;
812 m_bytesAllocatedLimit = max(newSize, m_minBytesPerCycle);
813 }
814 m_bytesAllocated = 0;
815 double lastGCEndTime = WTF::currentTime();
816 m_lastGCLength = lastGCEndTime - lastGCStartTime;
817 if (m_operationInProgress != Collection)
818 CRASH();
819 m_operationInProgress = NoOperation;
820 JAVASCRIPTCORE_GC_END();
821 }
822
823 void Heap::canonicalizeCellLivenessData()
824 {
825 m_objectSpace.canonicalizeCellLivenessData();
826 }
827
828 void Heap::resetAllocators()
829 {
830 m_objectSpace.resetAllocators();
831 m_weakSet.resetAllocator();
832 }
833
834 void Heap::setActivityCallback(GCActivityCallback* activityCallback)
835 {
836 m_activityCallback = activityCallback;
837 }
838
839 GCActivityCallback* Heap::activityCallback()
840 {
841 return m_activityCallback;
842 }
843
844 void Heap::didAllocate(size_t bytes)
845 {
846 if (m_activityCallback)
847 m_activityCallback->didAllocate(m_bytesAllocated + m_bytesAbandoned);
848 m_bytesAllocated += bytes;
849 }
850
851 bool Heap::isValidAllocation(size_t bytes)
852 {
853 if (!isValidThreadState(m_globalData))
854 return false;
855
856 if (bytes > MarkedSpace::maxCellSize)
857 return false;
858
859 if (m_operationInProgress != NoOperation)
860 return false;
861
862 return true;
863 }
864
865 void Heap::addFinalizer(JSCell* cell, Finalizer finalizer)
866 {
867 WeakSet::allocate(cell, &m_finalizerOwner, reinterpret_cast<void*>(finalizer)); // Balanced by FinalizerOwner::finalize().
868 }
869
870 void Heap::FinalizerOwner::finalize(Handle<Unknown> handle, void* context)
871 {
872 HandleSlot slot = handle.slot();
873 Finalizer finalizer = reinterpret_cast<Finalizer>(context);
874 finalizer(slot->asCell());
875 WeakSet::deallocate(WeakImpl::asWeakImpl(slot));
876 }
877
878 void Heap::addFunctionExecutable(FunctionExecutable* executable)
879 {
880 m_functions.append(executable);
881 }
882
883 void Heap::removeFunctionExecutable(FunctionExecutable* executable)
884 {
885 m_functions.remove(executable);
886 }
887
888 } // namespace JSC