]> git.saurik.com Git - apple/javascriptcore.git/blame - heap/Heap.cpp
JavaScriptCore-1097.3.3.tar.gz
[apple/javascriptcore.git] / heap / Heap.cpp
CommitLineData
14957cd0
A
1/*
2 * Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2011 Apple Inc. All rights reserved.
3 * Copyright (C) 2007 Eric Seidel <eric@webkit.org>
4 *
5 * This library is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Lesser General Public
7 * License as published by the Free Software Foundation; either
8 * version 2 of the License, or (at your option) any later version.
9 *
10 * This library is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Lesser General Public License for more details.
14 *
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this library; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
18 *
19 */
20
21#include "config.h"
22#include "Heap.h"
23
6fe7ccc8
A
24#include "CopiedSpace.h"
25#include "CopiedSpaceInlineMethods.h"
14957cd0
A
26#include "CodeBlock.h"
27#include "ConservativeRoots.h"
28#include "GCActivityCallback.h"
6fe7ccc8 29#include "HeapRootVisitor.h"
14957cd0
A
30#include "Interpreter.h"
31#include "JSGlobalData.h"
32#include "JSGlobalObject.h"
33#include "JSLock.h"
34#include "JSONObject.h"
35#include "Tracing.h"
6fe7ccc8 36#include "WeakSetInlines.h"
14957cd0 37#include <algorithm>
6fe7ccc8 38#include <wtf/CurrentTime.h>
14957cd0 39
14957cd0
A
40
41using namespace std;
6fe7ccc8 42using namespace JSC;
14957cd0
A
43
44namespace JSC {
45
6fe7ccc8 46namespace {
14957cd0 47
6fe7ccc8
A
48#if CPU(X86) || CPU(X86_64)
49static const size_t largeHeapSize = 16 * 1024 * 1024;
50#else
51static const size_t largeHeapSize = 8 * 1024 * 1024;
52#endif
53static const size_t smallHeapSize = 512 * 1024;
54
55#if ENABLE(GC_LOGGING)
56#if COMPILER(CLANG)
57#define DEFINE_GC_LOGGING_GLOBAL(type, name, arguments) \
58_Pragma("clang diagnostic push") \
59_Pragma("clang diagnostic ignored \"-Wglobal-constructors\"") \
60_Pragma("clang diagnostic ignored \"-Wexit-time-destructors\"") \
61static type name arguments; \
62_Pragma("clang diagnostic pop")
63#else
64#define DEFINE_GC_LOGGING_GLOBAL(type, name, arguments) \
65static type name arguments;
66#endif // COMPILER(CLANG)
67
68struct GCTimer {
69 GCTimer(const char* name)
70 : m_time(0)
71 , m_min(100000000)
72 , m_max(0)
73 , m_count(0)
74 , m_name(name)
75 {
76 }
77 ~GCTimer()
78 {
79 dataLog("%s: %.2lfms (avg. %.2lf, min. %.2lf, max. %.2lf)\n", m_name, m_time * 1000, m_time * 1000 / m_count, m_min*1000, m_max*1000);
80 }
81 double m_time;
82 double m_min;
83 double m_max;
84 size_t m_count;
85 const char* m_name;
86};
87
88struct GCTimerScope {
89 GCTimerScope(GCTimer* timer)
90 : m_timer(timer)
91 , m_start(WTF::currentTime())
92 {
93 }
94 ~GCTimerScope()
95 {
96 double delta = WTF::currentTime() - m_start;
97 if (delta < m_timer->m_min)
98 m_timer->m_min = delta;
99 if (delta > m_timer->m_max)
100 m_timer->m_max = delta;
101 m_timer->m_count++;
102 m_timer->m_time += delta;
103 }
104 GCTimer* m_timer;
105 double m_start;
106};
107
108struct GCCounter {
109 GCCounter(const char* name)
110 : m_name(name)
111 , m_count(0)
112 , m_total(0)
113 , m_min(10000000)
114 , m_max(0)
115 {
116 }
117
118 void count(size_t amount)
119 {
120 m_count++;
121 m_total += amount;
122 if (amount < m_min)
123 m_min = amount;
124 if (amount > m_max)
125 m_max = amount;
126 }
127 ~GCCounter()
128 {
129 dataLog("%s: %zu values (avg. %zu, min. %zu, max. %zu)\n", m_name, m_total, m_total / m_count, m_min, m_max);
130 }
131 const char* m_name;
132 size_t m_count;
133 size_t m_total;
134 size_t m_min;
135 size_t m_max;
136};
137
138#define GCPHASE(name) DEFINE_GC_LOGGING_GLOBAL(GCTimer, name##Timer, (#name)); GCTimerScope name##TimerScope(&name##Timer)
139#define COND_GCPHASE(cond, name1, name2) DEFINE_GC_LOGGING_GLOBAL(GCTimer, name1##Timer, (#name1)); DEFINE_GC_LOGGING_GLOBAL(GCTimer, name2##Timer, (#name2)); GCTimerScope name1##CondTimerScope(cond ? &name1##Timer : &name2##Timer)
140#define GCCOUNTER(name, value) do { DEFINE_GC_LOGGING_GLOBAL(GCCounter, name##Counter, (#name)); name##Counter.count(value); } while (false)
141
142#else
143
144#define GCPHASE(name) do { } while (false)
145#define COND_GCPHASE(cond, name1, name2) do { } while (false)
146#define GCCOUNTER(name, value) do { } while (false)
147#endif
148
149static size_t heapSizeForHint(HeapSize heapSize)
150{
151 if (heapSize == LargeHeap)
152 return largeHeapSize;
153 ASSERT(heapSize == SmallHeap);
154 return smallHeapSize;
155}
156
157static inline bool isValidSharedInstanceThreadState(JSGlobalData* globalData)
158{
159 return globalData->apiLock().currentThreadIsHoldingLock();
160}
161
162static inline bool isValidThreadState(JSGlobalData* globalData)
163{
164 if (globalData->identifierTable != wtfThreadData().currentIdentifierTable())
165 return false;
166
167 if (globalData->isSharedInstance() && !isValidSharedInstanceThreadState(globalData))
168 return false;
169
170 return true;
171}
172
173class CountFunctor {
174public:
175 typedef size_t ReturnType;
176
177 CountFunctor();
178 void count(size_t);
179 ReturnType returnValue();
180
181private:
182 ReturnType m_count;
183};
184
185inline CountFunctor::CountFunctor()
186 : m_count(0)
187{
188}
189
190inline void CountFunctor::count(size_t count)
191{
192 m_count += count;
193}
194
195inline CountFunctor::ReturnType CountFunctor::returnValue()
196{
197 return m_count;
198}
199
200struct ClearMarks : MarkedBlock::VoidFunctor {
201 void operator()(MarkedBlock*);
202};
203
204inline void ClearMarks::operator()(MarkedBlock* block)
205{
206 block->clearMarks();
207}
208
209struct Sweep : MarkedBlock::VoidFunctor {
210 void operator()(MarkedBlock*);
211};
212
213inline void Sweep::operator()(MarkedBlock* block)
214{
215 block->sweep();
216}
217
218struct MarkCount : CountFunctor {
219 void operator()(MarkedBlock*);
220};
221
222inline void MarkCount::operator()(MarkedBlock* block)
223{
224 count(block->markCount());
225}
226
227struct Size : CountFunctor {
228 void operator()(MarkedBlock*);
229};
230
231inline void Size::operator()(MarkedBlock* block)
232{
233 count(block->markCount() * block->cellSize());
234}
235
236struct Capacity : CountFunctor {
237 void operator()(MarkedBlock*);
238};
239
240inline void Capacity::operator()(MarkedBlock* block)
241{
242 count(block->capacity());
243}
244
245struct Count : public CountFunctor {
246 void operator()(JSCell*);
247};
248
249inline void Count::operator()(JSCell*)
250{
251 count(1);
252}
253
254struct CountIfGlobalObject : CountFunctor {
255 void operator()(JSCell*);
256};
257
258inline void CountIfGlobalObject::operator()(JSCell* cell)
259{
260 if (!cell->isObject())
261 return;
262 if (!asObject(cell)->isGlobalObject())
263 return;
264 count(1);
265}
266
267class RecordType {
268public:
269 typedef PassOwnPtr<TypeCountSet> ReturnType;
270
271 RecordType();
272 void operator()(JSCell*);
273 ReturnType returnValue();
274
275private:
276 const char* typeName(JSCell*);
277 OwnPtr<TypeCountSet> m_typeCountSet;
278};
279
280inline RecordType::RecordType()
281 : m_typeCountSet(adoptPtr(new TypeCountSet))
282{
283}
284
285inline const char* RecordType::typeName(JSCell* cell)
286{
287 const ClassInfo* info = cell->classInfo();
288 if (!info || !info->className)
289 return "[unknown]";
290 return info->className;
291}
292
293inline void RecordType::operator()(JSCell* cell)
294{
295 m_typeCountSet->add(typeName(cell));
296}
297
298inline PassOwnPtr<TypeCountSet> RecordType::returnValue()
299{
300 return m_typeCountSet.release();
301}
302
303} // anonymous namespace
304
305Heap::Heap(JSGlobalData* globalData, HeapSize heapSize)
306 : m_heapSize(heapSize)
307 , m_minBytesPerCycle(heapSizeForHint(heapSize))
308 , m_sizeAfterLastCollect(0)
309 , m_bytesAllocatedLimit(m_minBytesPerCycle)
310 , m_bytesAllocated(0)
311 , m_bytesAbandoned(0)
312 , m_operationInProgress(NoOperation)
313 , m_objectSpace(this)
314 , m_storageSpace(this)
14957cd0 315 , m_markListSet(0)
14957cd0 316 , m_machineThreads(this)
6fe7ccc8
A
317 , m_sharedData(globalData)
318 , m_slotVisitor(m_sharedData)
319 , m_weakSet(this)
320 , m_handleSet(globalData)
321 , m_isSafeToCollect(false)
322 , m_globalData(globalData)
323 , m_lastGCLength(0)
324 , m_lastCodeDiscardTime(WTF::currentTime())
325 , m_activityCallback(DefaultGCActivityCallback::create(this))
14957cd0 326{
6fe7ccc8 327 m_storageSpace.init();
14957cd0
A
328}
329
330Heap::~Heap()
331{
6fe7ccc8
A
332 delete m_markListSet;
333
334 m_objectSpace.shrink();
335 m_storageSpace.freeAllBlocks();
336
337 ASSERT(!size());
338 ASSERT(!capacity());
14957cd0
A
339}
340
6fe7ccc8 341bool Heap::isPagedOut(double deadline)
14957cd0 342{
6fe7ccc8
A
343 return m_objectSpace.isPagedOut(deadline) || m_storageSpace.isPagedOut(deadline);
344}
14957cd0 345
6fe7ccc8
A
346// The JSGlobalData is being destroyed and the collector will never run again.
347// Run all pending finalizers now because we won't get another chance.
348void Heap::lastChanceToFinalize()
349{
14957cd0
A
350 ASSERT(!m_globalData->dynamicGlobalObject);
351 ASSERT(m_operationInProgress == NoOperation);
14957cd0 352
6fe7ccc8
A
353 // FIXME: Make this a release-mode crash once we're sure no one's doing this.
354 if (size_t size = m_protectedValues.size())
355 WTFLogAlways("ERROR: JavaScriptCore heap deallocated while %ld values were still protected", static_cast<unsigned long>(size));
14957cd0 356
6fe7ccc8
A
357 m_weakSet.finalizeAll();
358 canonicalizeCellLivenessData();
359 clearMarks();
360 sweep();
361 m_globalData->smallStrings.finalizeSmallStrings();
14957cd0 362
6fe7ccc8
A
363#if ENABLE(SIMPLE_HEAP_PROFILING)
364 m_slotVisitor.m_visitedTypeCounts.dump(WTF::dataFile(), "Visited Type Counts");
365 m_destroyedTypeCounts.dump(WTF::dataFile(), "Destroyed Type Counts");
366#endif
14957cd0
A
367}
368
369void Heap::reportExtraMemoryCostSlowCase(size_t cost)
370{
371 // Our frequency of garbage collection tries to balance memory use against speed
372 // by collecting based on the number of newly created values. However, for values
373 // that hold on to a great deal of memory that's not in the form of other JS values,
374 // that is not good enough - in some cases a lot of those objects can pile up and
375 // use crazy amounts of memory without a GC happening. So we track these extra
376 // memory costs. Only unusually large objects are noted, and we only keep track
377 // of this extra cost until the next GC. In garbage collected languages, most values
378 // are either very short lived temporaries, or have extremely long lifetimes. So
379 // if a large value survives one garbage collection, there is not much point to
380 // collecting more frequently as long as it stays alive.
381
6fe7ccc8
A
382 didAllocate(cost);
383 if (shouldCollect())
384 collect(DoNotSweep);
14957cd0
A
385}
386
6fe7ccc8 387void Heap::reportAbandonedObjectGraph()
14957cd0 388{
6fe7ccc8
A
389 // Our clients don't know exactly how much memory they
390 // are abandoning so we just guess for them.
391 double abandonedBytes = 0.10 * m_sizeAfterLastCollect;
392
393 // We want to accelerate the next collection. Because memory has just
394 // been abandoned, the next collection has the potential to
395 // be more profitable. Since allocation is the trigger for collection,
396 // we hasten the next collection by pretending that we've allocated more memory.
397 didAbandon(abandonedBytes);
398}
14957cd0 399
6fe7ccc8
A
400void Heap::didAbandon(size_t bytes)
401{
402 if (m_activityCallback)
403 m_activityCallback->didAllocate(m_bytesAllocated + m_bytesAbandoned);
404 m_bytesAbandoned += bytes;
14957cd0
A
405}
406
407void Heap::protect(JSValue k)
408{
409 ASSERT(k);
6fe7ccc8 410 ASSERT(m_globalData->apiLock().currentThreadIsHoldingLock());
14957cd0
A
411
412 if (!k.isCell())
413 return;
414
415 m_protectedValues.add(k.asCell());
416}
417
418bool Heap::unprotect(JSValue k)
419{
420 ASSERT(k);
6fe7ccc8 421 ASSERT(m_globalData->apiLock().currentThreadIsHoldingLock());
14957cd0
A
422
423 if (!k.isCell())
424 return false;
425
426 return m_protectedValues.remove(k.asCell());
427}
428
6fe7ccc8
A
429void Heap::jettisonDFGCodeBlock(PassOwnPtr<CodeBlock> codeBlock)
430{
431 m_dfgCodeBlocks.jettison(codeBlock);
432}
433
434void Heap::markProtectedObjects(HeapRootVisitor& heapRootVisitor)
14957cd0
A
435{
436 ProtectCountSet::iterator end = m_protectedValues.end();
437 for (ProtectCountSet::iterator it = m_protectedValues.begin(); it != end; ++it)
6fe7ccc8 438 heapRootVisitor.visit(&it->first);
14957cd0
A
439}
440
441void Heap::pushTempSortVector(Vector<ValueStringPair>* tempVector)
442{
443 m_tempSortingVectors.append(tempVector);
444}
445
446void Heap::popTempSortVector(Vector<ValueStringPair>* tempVector)
447{
448 ASSERT_UNUSED(tempVector, tempVector == m_tempSortingVectors.last());
449 m_tempSortingVectors.removeLast();
450}
6fe7ccc8
A
451
452void Heap::markTempSortVectors(HeapRootVisitor& heapRootVisitor)
14957cd0
A
453{
454 typedef Vector<Vector<ValueStringPair>* > VectorOfValueStringVectors;
455
456 VectorOfValueStringVectors::iterator end = m_tempSortingVectors.end();
457 for (VectorOfValueStringVectors::iterator it = m_tempSortingVectors.begin(); it != end; ++it) {
458 Vector<ValueStringPair>* tempSortingVector = *it;
459
460 Vector<ValueStringPair>::iterator vectorEnd = tempSortingVector->end();
461 for (Vector<ValueStringPair>::iterator vectorIt = tempSortingVector->begin(); vectorIt != vectorEnd; ++vectorIt) {
462 if (vectorIt->first)
6fe7ccc8 463 heapRootVisitor.visit(&vectorIt->first);
14957cd0
A
464 }
465 }
466}
467
6fe7ccc8
A
468void Heap::harvestWeakReferences()
469{
470 m_slotVisitor.harvestWeakReferences();
471}
472
473void Heap::finalizeUnconditionalFinalizers()
474{
475 m_slotVisitor.finalizeUnconditionalFinalizers();
476}
477
14957cd0
A
478inline RegisterFile& Heap::registerFile()
479{
480 return m_globalData->interpreter->registerFile();
481}
482
483void Heap::getConservativeRegisterRoots(HashSet<JSCell*>& roots)
484{
6fe7ccc8
A
485 ASSERT(isValidThreadState(m_globalData));
486 ConservativeRoots registerFileRoots(&m_objectSpace.blocks(), &m_storageSpace);
14957cd0
A
487 registerFile().gatherConservativeRoots(registerFileRoots);
488 size_t registerFileRootCount = registerFileRoots.size();
489 JSCell** registerRoots = registerFileRoots.roots();
490 for (size_t i = 0; i < registerFileRootCount; i++) {
491 setMarked(registerRoots[i]);
492 roots.add(registerRoots[i]);
493 }
14957cd0
A
494}
495
6fe7ccc8 496void Heap::markRoots(bool fullGC)
14957cd0 497{
6fe7ccc8 498 SamplingRegion samplingRegion("Garbage Collection: Tracing");
14957cd0 499
6fe7ccc8
A
500 COND_GCPHASE(fullGC, MarkFullRoots, MarkYoungRoots);
501 UNUSED_PARAM(fullGC);
502 ASSERT(isValidThreadState(m_globalData));
14957cd0 503
6fe7ccc8 504 void* dummy;
14957cd0 505
6fe7ccc8
A
506 // We gather conservative roots before clearing mark bits because conservative
507 // gathering uses the mark bits to determine whether a reference is valid.
508 ConservativeRoots machineThreadRoots(&m_objectSpace.blocks(), &m_storageSpace);
509 {
510 GCPHASE(GatherConservativeRoots);
511 m_machineThreads.gatherConservativeRoots(machineThreadRoots, &dummy);
512 }
14957cd0 513
6fe7ccc8
A
514 ConservativeRoots registerFileRoots(&m_objectSpace.blocks(), &m_storageSpace);
515 m_dfgCodeBlocks.clearMarks();
516 {
517 GCPHASE(GatherRegisterFileRoots);
518 registerFile().gatherConservativeRoots(registerFileRoots, m_dfgCodeBlocks);
519 }
14957cd0 520
6fe7ccc8
A
521#if ENABLE(DFG_JIT)
522 ConservativeRoots scratchBufferRoots(&m_objectSpace.blocks(), &m_storageSpace);
523 {
524 GCPHASE(GatherScratchBufferRoots);
525 m_globalData->gatherConservativeRoots(scratchBufferRoots);
526 }
527#endif
14957cd0 528
6fe7ccc8
A
529#if ENABLE(GGC)
530 MarkedBlock::DirtyCellVector dirtyCells;
531 if (!fullGC) {
532 GCPHASE(GatheringDirtyCells);
533 m_objectSpace.gatherDirtyCells(dirtyCells);
534 } else
535#endif
536 {
537 GCPHASE(clearMarks);
538 clearMarks();
539 }
14957cd0 540
6fe7ccc8
A
541 m_storageSpace.startedCopying();
542 SlotVisitor& visitor = m_slotVisitor;
543 HeapRootVisitor heapRootVisitor(visitor);
544
545 {
546 ParallelModeEnabler enabler(visitor);
547#if ENABLE(GGC)
548 {
549 size_t dirtyCellCount = dirtyCells.size();
550 GCPHASE(VisitDirtyCells);
551 GCCOUNTER(DirtyCellCount, dirtyCellCount);
552 for (size_t i = 0; i < dirtyCellCount; i++) {
553 heapRootVisitor.visitChildren(dirtyCells[i]);
554 visitor.donateAndDrain();
555 }
556 }
557#endif
558
559 if (m_globalData->codeBlocksBeingCompiled.size()) {
560 GCPHASE(VisitActiveCodeBlock);
561 for (size_t i = 0; i < m_globalData->codeBlocksBeingCompiled.size(); i++)
562 m_globalData->codeBlocksBeingCompiled[i]->visitAggregate(visitor);
563 }
564
565 {
566 GCPHASE(VisitMachineRoots);
567 visitor.append(machineThreadRoots);
568 visitor.donateAndDrain();
569 }
570 {
571 GCPHASE(VisitRegisterFileRoots);
572 visitor.append(registerFileRoots);
573 visitor.donateAndDrain();
574 }
575#if ENABLE(DFG_JIT)
576 {
577 GCPHASE(VisitScratchBufferRoots);
578 visitor.append(scratchBufferRoots);
579 visitor.donateAndDrain();
580 }
581#endif
582 {
583 GCPHASE(VisitProtectedObjects);
584 markProtectedObjects(heapRootVisitor);
585 visitor.donateAndDrain();
586 }
587 {
588 GCPHASE(VisitTempSortVectors);
589 markTempSortVectors(heapRootVisitor);
590 visitor.donateAndDrain();
591 }
14957cd0 592
6fe7ccc8
A
593 {
594 GCPHASE(MarkingArgumentBuffers);
595 if (m_markListSet && m_markListSet->size()) {
596 MarkedArgumentBuffer::markLists(heapRootVisitor, *m_markListSet);
597 visitor.donateAndDrain();
598 }
599 }
600 if (m_globalData->exception) {
601 GCPHASE(MarkingException);
602 heapRootVisitor.visit(&m_globalData->exception);
603 visitor.donateAndDrain();
604 }
14957cd0 605
6fe7ccc8
A
606 {
607 GCPHASE(VisitStrongHandles);
608 m_handleSet.visitStrongHandles(heapRootVisitor);
609 visitor.donateAndDrain();
610 }
14957cd0 611
6fe7ccc8
A
612 {
613 GCPHASE(HandleStack);
614 m_handleStack.visit(heapRootVisitor);
615 visitor.donateAndDrain();
616 }
617
618 {
619 GCPHASE(TraceCodeBlocks);
620 m_dfgCodeBlocks.traceMarkedCodeBlocks(visitor);
621 visitor.donateAndDrain();
622 }
623
624#if ENABLE(PARALLEL_GC)
625 {
626 GCPHASE(Convergence);
627 visitor.drainFromShared(SlotVisitor::MasterDrain);
628 }
629#endif
630 }
14957cd0 631
6fe7ccc8
A
632 // Weak references must be marked last because their liveness depends on
633 // the liveness of the rest of the object graph.
634 {
635 GCPHASE(VisitingLiveWeakHandles);
636 while (true) {
637 m_weakSet.visitLiveWeakImpls(heapRootVisitor);
638 harvestWeakReferences();
639 if (visitor.isEmpty())
640 break;
641 {
642 ParallelModeEnabler enabler(visitor);
643 visitor.donateAndDrain();
644#if ENABLE(PARALLEL_GC)
645 visitor.drainFromShared(SlotVisitor::MasterDrain);
646#endif
647 }
648 }
649 }
14957cd0 650
6fe7ccc8
A
651 {
652 GCPHASE(VisitingDeadWeakHandles);
653 m_weakSet.visitDeadWeakImpls(heapRootVisitor);
654 }
14957cd0 655
6fe7ccc8
A
656 GCCOUNTER(VisitedValueCount, visitor.visitCount());
657
658 visitor.doneCopying();
659 visitor.reset();
660 m_sharedData.reset();
661 m_storageSpace.doneCopying();
14957cd0 662
14957cd0
A
663}
664
6fe7ccc8 665void Heap::clearMarks()
14957cd0 666{
6fe7ccc8 667 m_objectSpace.forEachBlock<ClearMarks>();
14957cd0
A
668}
669
6fe7ccc8 670void Heap::sweep()
14957cd0 671{
6fe7ccc8 672 m_objectSpace.forEachBlock<Sweep>();
14957cd0
A
673}
674
6fe7ccc8 675size_t Heap::objectCount()
14957cd0 676{
6fe7ccc8 677 return m_objectSpace.forEachBlock<MarkCount>();
14957cd0
A
678}
679
6fe7ccc8 680size_t Heap::size()
14957cd0 681{
6fe7ccc8 682 return m_objectSpace.forEachBlock<Size>() + m_storageSpace.size();
14957cd0
A
683}
684
6fe7ccc8 685size_t Heap::capacity()
14957cd0 686{
6fe7ccc8 687 return m_objectSpace.forEachBlock<Capacity>() + m_storageSpace.capacity();
14957cd0
A
688}
689
6fe7ccc8 690size_t Heap::protectedGlobalObjectCount()
14957cd0 691{
6fe7ccc8 692 return forEachProtectedCell<CountIfGlobalObject>();
14957cd0
A
693}
694
6fe7ccc8 695size_t Heap::globalObjectCount()
14957cd0 696{
6fe7ccc8 697 return m_objectSpace.forEachCell<CountIfGlobalObject>();
14957cd0
A
698}
699
6fe7ccc8 700size_t Heap::protectedObjectCount()
14957cd0 701{
6fe7ccc8 702 return forEachProtectedCell<Count>();
14957cd0
A
703}
704
705PassOwnPtr<TypeCountSet> Heap::protectedObjectTypeCounts()
706{
6fe7ccc8 707 return forEachProtectedCell<RecordType>();
14957cd0
A
708}
709
6fe7ccc8 710PassOwnPtr<TypeCountSet> Heap::objectTypeCounts()
14957cd0 711{
6fe7ccc8 712 return m_objectSpace.forEachCell<RecordType>();
14957cd0
A
713}
714
6fe7ccc8 715void Heap::discardAllCompiledCode()
14957cd0 716{
6fe7ccc8
A
717 // If JavaScript is running, it's not safe to recompile, since we'll end
718 // up throwing away code that is live on the stack.
719 if (m_globalData->dynamicGlobalObject)
720 return;
721
722 for (FunctionExecutable* current = m_functions.head(); current; current = current->next())
723 current->discardCode();
14957cd0
A
724}
725
726void Heap::collectAllGarbage()
727{
6fe7ccc8
A
728 if (!m_isSafeToCollect)
729 return;
730
731 collect(DoSweep);
14957cd0
A
732}
733
6fe7ccc8
A
734static double minute = 60.0;
735
736void Heap::collect(SweepToggle sweepToggle)
14957cd0 737{
6fe7ccc8
A
738 SamplingRegion samplingRegion("Garbage Collection");
739
740 GCPHASE(Collect);
741 ASSERT(globalData()->apiLock().currentThreadIsHoldingLock());
14957cd0 742 ASSERT(globalData()->identifierTable == wtfThreadData().currentIdentifierTable());
6fe7ccc8 743 ASSERT(m_isSafeToCollect);
14957cd0 744 JAVASCRIPTCORE_GC_BEGIN();
6fe7ccc8
A
745 if (m_operationInProgress != NoOperation)
746 CRASH();
747 m_operationInProgress = Collection;
14957cd0 748
6fe7ccc8
A
749 if (m_activityCallback)
750 m_activityCallback->willCollect();
14957cd0 751
6fe7ccc8
A
752 double lastGCStartTime = WTF::currentTime();
753 if (lastGCStartTime - m_lastCodeDiscardTime > minute) {
754 discardAllCompiledCode();
755 m_lastCodeDiscardTime = WTF::currentTime();
756 }
14957cd0 757
6fe7ccc8
A
758#if ENABLE(GGC)
759 bool fullGC = sweepToggle == DoSweep;
760 if (!fullGC)
761 fullGC = (capacity() > 4 * m_sizeAfterLastCollect);
762#else
763 bool fullGC = true;
14957cd0 764#endif
6fe7ccc8
A
765 {
766 GCPHASE(Canonicalize);
767 canonicalizeCellLivenessData();
768 }
769
770 markRoots(fullGC);
771
772 {
773 GCPHASE(FinalizeUnconditionalFinalizers);
774 finalizeUnconditionalFinalizers();
775 }
776
777 {
778 GCPHASE(FinalizeWeakHandles);
779 m_weakSet.sweep();
780 m_globalData->smallStrings.finalizeSmallStrings();
781 }
782
783 JAVASCRIPTCORE_GC_MARKED();
784
785 {
786 GCPHASE(ResetAllocator);
787 resetAllocators();
788 }
789
790 {
791 GCPHASE(DeleteCodeBlocks);
792 m_dfgCodeBlocks.deleteUnmarkedJettisonedCodeBlocks();
793 }
14957cd0
A
794
795 if (sweepToggle == DoSweep) {
6fe7ccc8
A
796 SamplingRegion samplingRegion("Garbage Collection: Sweeping");
797 GCPHASE(Sweeping);
798 sweep();
799 m_objectSpace.shrink();
800 m_weakSet.shrink();
801 m_bytesAbandoned = 0;
14957cd0
A
802 }
803
6fe7ccc8
A
804 // To avoid pathological GC churn in large heaps, we set the new allocation
805 // limit to be the current size of the heap. This heuristic
806 // is a bit arbitrary. Using the current size of the heap after this
807 // collection gives us a 2X multiplier, which is a 1:1 (heap size :
14957cd0 808 // new bytes allocated) proportion, and seems to work well in benchmarks.
6fe7ccc8
A
809 size_t newSize = size();
810 if (fullGC) {
811 m_sizeAfterLastCollect = newSize;
812 m_bytesAllocatedLimit = max(newSize, m_minBytesPerCycle);
813 }
814 m_bytesAllocated = 0;
815 double lastGCEndTime = WTF::currentTime();
816 m_lastGCLength = lastGCEndTime - lastGCStartTime;
817 if (m_operationInProgress != Collection)
818 CRASH();
819 m_operationInProgress = NoOperation;
14957cd0 820 JAVASCRIPTCORE_GC_END();
6fe7ccc8
A
821}
822
823void Heap::canonicalizeCellLivenessData()
824{
825 m_objectSpace.canonicalizeCellLivenessData();
826}
14957cd0 827
6fe7ccc8
A
828void Heap::resetAllocators()
829{
830 m_objectSpace.resetAllocators();
831 m_weakSet.resetAllocator();
14957cd0
A
832}
833
6fe7ccc8 834void Heap::setActivityCallback(GCActivityCallback* activityCallback)
14957cd0
A
835{
836 m_activityCallback = activityCallback;
837}
838
839GCActivityCallback* Heap::activityCallback()
840{
6fe7ccc8
A
841 return m_activityCallback;
842}
843
844void Heap::didAllocate(size_t bytes)
845{
846 if (m_activityCallback)
847 m_activityCallback->didAllocate(m_bytesAllocated + m_bytesAbandoned);
848 m_bytesAllocated += bytes;
849}
850
851bool Heap::isValidAllocation(size_t bytes)
852{
853 if (!isValidThreadState(m_globalData))
854 return false;
855
856 if (bytes > MarkedSpace::maxCellSize)
857 return false;
858
859 if (m_operationInProgress != NoOperation)
860 return false;
861
862 return true;
863}
864
865void Heap::addFinalizer(JSCell* cell, Finalizer finalizer)
866{
867 WeakSet::allocate(cell, &m_finalizerOwner, reinterpret_cast<void*>(finalizer)); // Balanced by FinalizerOwner::finalize().
868}
869
870void Heap::FinalizerOwner::finalize(Handle<Unknown> handle, void* context)
871{
872 HandleSlot slot = handle.slot();
873 Finalizer finalizer = reinterpret_cast<Finalizer>(context);
874 finalizer(slot->asCell());
875 WeakSet::deallocate(WeakImpl::asWeakImpl(slot));
876}
877
878void Heap::addFunctionExecutable(FunctionExecutable* executable)
879{
880 m_functions.append(executable);
881}
882
883void Heap::removeFunctionExecutable(FunctionExecutable* executable)
884{
885 m_functions.remove(executable);
14957cd0
A
886}
887
888} // namespace JSC