]> git.saurik.com Git - apple/javascriptcore.git/blame - heap/Heap.cpp
JavaScriptCore-1218.34.tar.gz
[apple/javascriptcore.git] / heap / Heap.cpp
CommitLineData
14957cd0
A
1/*
2 * Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2011 Apple Inc. All rights reserved.
3 * Copyright (C) 2007 Eric Seidel <eric@webkit.org>
4 *
5 * This library is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Lesser General Public
7 * License as published by the Free Software Foundation; either
8 * version 2 of the License, or (at your option) any later version.
9 *
10 * This library is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Lesser General Public License for more details.
14 *
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this library; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
18 *
19 */
20
21#include "config.h"
22#include "Heap.h"
23
24#include "CodeBlock.h"
25#include "ConservativeRoots.h"
93a37866
A
26#include "CopiedSpace.h"
27#include "CopiedSpaceInlines.h"
28#include "CopyVisitorInlines.h"
14957cd0 29#include "GCActivityCallback.h"
6fe7ccc8 30#include "HeapRootVisitor.h"
93a37866
A
31#include "HeapStatistics.h"
32#include "IncrementalSweeper.h"
14957cd0 33#include "Interpreter.h"
93a37866 34#include "VM.h"
14957cd0
A
35#include "JSGlobalObject.h"
36#include "JSLock.h"
37#include "JSONObject.h"
93a37866 38#include "Operations.h"
14957cd0 39#include "Tracing.h"
93a37866 40#include "UnlinkedCodeBlock.h"
6fe7ccc8 41#include "WeakSetInlines.h"
14957cd0 42#include <algorithm>
93a37866 43#include <wtf/RAMSize.h>
6fe7ccc8 44#include <wtf/CurrentTime.h>
14957cd0 45
14957cd0 46using namespace std;
6fe7ccc8 47using namespace JSC;
14957cd0
A
48
49namespace JSC {
50
6fe7ccc8 51namespace {
14957cd0 52
93a37866
A
53static const size_t largeHeapSize = 32 * MB; // About 1.5X the average webpage.
54static const size_t smallHeapSize = 1 * MB; // Matches the FastMalloc per-thread cache.
6fe7ccc8
A
55
56#if ENABLE(GC_LOGGING)
57#if COMPILER(CLANG)
58#define DEFINE_GC_LOGGING_GLOBAL(type, name, arguments) \
59_Pragma("clang diagnostic push") \
60_Pragma("clang diagnostic ignored \"-Wglobal-constructors\"") \
61_Pragma("clang diagnostic ignored \"-Wexit-time-destructors\"") \
62static type name arguments; \
63_Pragma("clang diagnostic pop")
64#else
65#define DEFINE_GC_LOGGING_GLOBAL(type, name, arguments) \
66static type name arguments;
67#endif // COMPILER(CLANG)
68
69struct GCTimer {
70 GCTimer(const char* name)
71 : m_time(0)
72 , m_min(100000000)
73 , m_max(0)
74 , m_count(0)
75 , m_name(name)
76 {
77 }
78 ~GCTimer()
79 {
93a37866 80 dataLogF("%s: %.2lfms (avg. %.2lf, min. %.2lf, max. %.2lf)\n", m_name, m_time * 1000, m_time * 1000 / m_count, m_min*1000, m_max*1000);
6fe7ccc8
A
81 }
82 double m_time;
83 double m_min;
84 double m_max;
85 size_t m_count;
86 const char* m_name;
87};
88
89struct GCTimerScope {
90 GCTimerScope(GCTimer* timer)
91 : m_timer(timer)
92 , m_start(WTF::currentTime())
93 {
94 }
95 ~GCTimerScope()
96 {
97 double delta = WTF::currentTime() - m_start;
98 if (delta < m_timer->m_min)
99 m_timer->m_min = delta;
100 if (delta > m_timer->m_max)
101 m_timer->m_max = delta;
102 m_timer->m_count++;
103 m_timer->m_time += delta;
104 }
105 GCTimer* m_timer;
106 double m_start;
107};
108
109struct GCCounter {
110 GCCounter(const char* name)
111 : m_name(name)
112 , m_count(0)
113 , m_total(0)
114 , m_min(10000000)
115 , m_max(0)
116 {
117 }
118
119 void count(size_t amount)
120 {
121 m_count++;
122 m_total += amount;
123 if (amount < m_min)
124 m_min = amount;
125 if (amount > m_max)
126 m_max = amount;
127 }
128 ~GCCounter()
129 {
93a37866 130 dataLogF("%s: %zu values (avg. %zu, min. %zu, max. %zu)\n", m_name, m_total, m_total / m_count, m_min, m_max);
6fe7ccc8
A
131 }
132 const char* m_name;
133 size_t m_count;
134 size_t m_total;
135 size_t m_min;
136 size_t m_max;
137};
138
139#define GCPHASE(name) DEFINE_GC_LOGGING_GLOBAL(GCTimer, name##Timer, (#name)); GCTimerScope name##TimerScope(&name##Timer)
140#define COND_GCPHASE(cond, name1, name2) DEFINE_GC_LOGGING_GLOBAL(GCTimer, name1##Timer, (#name1)); DEFINE_GC_LOGGING_GLOBAL(GCTimer, name2##Timer, (#name2)); GCTimerScope name1##CondTimerScope(cond ? &name1##Timer : &name2##Timer)
141#define GCCOUNTER(name, value) do { DEFINE_GC_LOGGING_GLOBAL(GCCounter, name##Counter, (#name)); name##Counter.count(value); } while (false)
142
143#else
144
145#define GCPHASE(name) do { } while (false)
146#define COND_GCPHASE(cond, name1, name2) do { } while (false)
147#define GCCOUNTER(name, value) do { } while (false)
148#endif
149
93a37866 150static inline size_t minHeapSize(HeapType heapType, size_t ramSize)
6fe7ccc8 151{
93a37866
A
152 if (heapType == LargeHeap)
153 return min(largeHeapSize, ramSize / 4);
6fe7ccc8
A
154 return smallHeapSize;
155}
156
93a37866 157static inline size_t proportionalHeapSize(size_t heapSize, size_t ramSize)
6fe7ccc8 158{
93a37866
A
159 // Try to stay under 1/2 RAM size to leave room for the DOM, rendering, networking, etc.
160 if (heapSize < ramSize / 4)
161 return 2 * heapSize;
162 if (heapSize < ramSize / 2)
163 return 1.5 * heapSize;
164 return 1.25 * heapSize;
6fe7ccc8
A
165}
166
93a37866 167static inline bool isValidSharedInstanceThreadState(VM* vm)
6fe7ccc8 168{
93a37866 169 return vm->apiLock().currentThreadIsHoldingLock();
6fe7ccc8
A
170}
171
93a37866 172static inline bool isValidThreadState(VM* vm)
6fe7ccc8 173{
93a37866
A
174 if (vm->identifierTable != wtfThreadData().currentIdentifierTable())
175 return false;
6fe7ccc8 176
93a37866
A
177 if (vm->isSharedInstance() && !isValidSharedInstanceThreadState(vm))
178 return false;
6fe7ccc8 179
93a37866 180 return true;
6fe7ccc8
A
181}
182
93a37866
A
183struct MarkObject : public MarkedBlock::VoidFunctor {
184 void operator()(JSCell* cell)
185 {
186 if (cell->isZapped())
187 return;
188 Heap::heap(cell)->setMarked(cell);
189 }
6fe7ccc8
A
190};
191
93a37866
A
192struct Count : public MarkedBlock::CountFunctor {
193 void operator()(JSCell*) { count(1); }
6fe7ccc8
A
194};
195
93a37866
A
196struct CountIfGlobalObject : MarkedBlock::CountFunctor {
197 void operator()(JSCell* cell) {
198 if (!cell->isObject())
199 return;
200 if (!asObject(cell)->isGlobalObject())
201 return;
202 count(1);
203 }
6fe7ccc8
A
204};
205
6fe7ccc8
A
206class RecordType {
207public:
208 typedef PassOwnPtr<TypeCountSet> ReturnType;
209
210 RecordType();
211 void operator()(JSCell*);
212 ReturnType returnValue();
213
214private:
215 const char* typeName(JSCell*);
216 OwnPtr<TypeCountSet> m_typeCountSet;
217};
218
219inline RecordType::RecordType()
220 : m_typeCountSet(adoptPtr(new TypeCountSet))
221{
222}
223
224inline const char* RecordType::typeName(JSCell* cell)
225{
226 const ClassInfo* info = cell->classInfo();
227 if (!info || !info->className)
228 return "[unknown]";
229 return info->className;
230}
231
232inline void RecordType::operator()(JSCell* cell)
233{
234 m_typeCountSet->add(typeName(cell));
235}
236
237inline PassOwnPtr<TypeCountSet> RecordType::returnValue()
238{
239 return m_typeCountSet.release();
240}
241
242} // anonymous namespace
243
93a37866
A
244Heap::Heap(VM* vm, HeapType heapType)
245 : m_heapType(heapType)
246 , m_ramSize(ramSize())
247 , m_minBytesPerCycle(minHeapSize(m_heapType, m_ramSize))
6fe7ccc8
A
248 , m_sizeAfterLastCollect(0)
249 , m_bytesAllocatedLimit(m_minBytesPerCycle)
250 , m_bytesAllocated(0)
251 , m_bytesAbandoned(0)
252 , m_operationInProgress(NoOperation)
93a37866 253 , m_blockAllocator()
6fe7ccc8
A
254 , m_objectSpace(this)
255 , m_storageSpace(this)
14957cd0 256 , m_machineThreads(this)
93a37866 257 , m_sharedData(vm)
6fe7ccc8 258 , m_slotVisitor(m_sharedData)
93a37866
A
259 , m_copyVisitor(m_sharedData)
260 , m_handleSet(vm)
6fe7ccc8 261 , m_isSafeToCollect(false)
93a37866 262 , m_vm(vm)
6fe7ccc8
A
263 , m_lastGCLength(0)
264 , m_lastCodeDiscardTime(WTF::currentTime())
265 , m_activityCallback(DefaultGCActivityCallback::create(this))
93a37866 266 , m_sweeper(IncrementalSweeper::create(this))
14957cd0 267{
6fe7ccc8 268 m_storageSpace.init();
14957cd0
A
269}
270
271Heap::~Heap()
272{
14957cd0
A
273}
274
6fe7ccc8 275bool Heap::isPagedOut(double deadline)
14957cd0 276{
6fe7ccc8
A
277 return m_objectSpace.isPagedOut(deadline) || m_storageSpace.isPagedOut(deadline);
278}
14957cd0 279
93a37866 280// The VM is being destroyed and the collector will never run again.
6fe7ccc8
A
281// Run all pending finalizers now because we won't get another chance.
282void Heap::lastChanceToFinalize()
283{
93a37866
A
284 RELEASE_ASSERT(!m_vm->dynamicGlobalObject);
285 RELEASE_ASSERT(m_operationInProgress == NoOperation);
14957cd0 286
93a37866 287 m_objectSpace.lastChanceToFinalize();
14957cd0 288
6fe7ccc8
A
289#if ENABLE(SIMPLE_HEAP_PROFILING)
290 m_slotVisitor.m_visitedTypeCounts.dump(WTF::dataFile(), "Visited Type Counts");
291 m_destroyedTypeCounts.dump(WTF::dataFile(), "Destroyed Type Counts");
292#endif
14957cd0
A
293}
294
295void Heap::reportExtraMemoryCostSlowCase(size_t cost)
296{
297 // Our frequency of garbage collection tries to balance memory use against speed
298 // by collecting based on the number of newly created values. However, for values
299 // that hold on to a great deal of memory that's not in the form of other JS values,
300 // that is not good enough - in some cases a lot of those objects can pile up and
301 // use crazy amounts of memory without a GC happening. So we track these extra
302 // memory costs. Only unusually large objects are noted, and we only keep track
303 // of this extra cost until the next GC. In garbage collected languages, most values
304 // are either very short lived temporaries, or have extremely long lifetimes. So
305 // if a large value survives one garbage collection, there is not much point to
306 // collecting more frequently as long as it stays alive.
307
6fe7ccc8
A
308 didAllocate(cost);
309 if (shouldCollect())
310 collect(DoNotSweep);
14957cd0
A
311}
312
6fe7ccc8 313void Heap::reportAbandonedObjectGraph()
14957cd0 314{
6fe7ccc8
A
315 // Our clients don't know exactly how much memory they
316 // are abandoning so we just guess for them.
317 double abandonedBytes = 0.10 * m_sizeAfterLastCollect;
318
319 // We want to accelerate the next collection. Because memory has just
320 // been abandoned, the next collection has the potential to
321 // be more profitable. Since allocation is the trigger for collection,
322 // we hasten the next collection by pretending that we've allocated more memory.
323 didAbandon(abandonedBytes);
324}
14957cd0 325
6fe7ccc8
A
326void Heap::didAbandon(size_t bytes)
327{
93a37866 328#if PLATFORM(IOS)
6fe7ccc8
A
329 if (m_activityCallback)
330 m_activityCallback->didAllocate(m_bytesAllocated + m_bytesAbandoned);
93a37866
A
331#else
332 m_activityCallback->didAllocate(m_bytesAllocated + m_bytesAbandoned);
333#endif // PLATFORM(IOS)
6fe7ccc8 334 m_bytesAbandoned += bytes;
14957cd0
A
335}
336
337void Heap::protect(JSValue k)
338{
339 ASSERT(k);
93a37866 340 ASSERT(m_vm->apiLock().currentThreadIsHoldingLock());
14957cd0
A
341
342 if (!k.isCell())
343 return;
344
345 m_protectedValues.add(k.asCell());
346}
347
348bool Heap::unprotect(JSValue k)
349{
350 ASSERT(k);
93a37866 351 ASSERT(m_vm->apiLock().currentThreadIsHoldingLock());
14957cd0
A
352
353 if (!k.isCell())
354 return false;
355
356 return m_protectedValues.remove(k.asCell());
357}
358
6fe7ccc8
A
359void Heap::jettisonDFGCodeBlock(PassOwnPtr<CodeBlock> codeBlock)
360{
361 m_dfgCodeBlocks.jettison(codeBlock);
362}
363
364void Heap::markProtectedObjects(HeapRootVisitor& heapRootVisitor)
14957cd0
A
365{
366 ProtectCountSet::iterator end = m_protectedValues.end();
367 for (ProtectCountSet::iterator it = m_protectedValues.begin(); it != end; ++it)
93a37866 368 heapRootVisitor.visit(&it->key);
14957cd0
A
369}
370
93a37866 371void Heap::pushTempSortVector(Vector<ValueStringPair, 0, UnsafeVectorOverflow>* tempVector)
14957cd0
A
372{
373 m_tempSortingVectors.append(tempVector);
374}
375
93a37866 376void Heap::popTempSortVector(Vector<ValueStringPair, 0, UnsafeVectorOverflow>* tempVector)
14957cd0
A
377{
378 ASSERT_UNUSED(tempVector, tempVector == m_tempSortingVectors.last());
379 m_tempSortingVectors.removeLast();
380}
6fe7ccc8
A
381
382void Heap::markTempSortVectors(HeapRootVisitor& heapRootVisitor)
14957cd0 383{
93a37866 384 typedef Vector<Vector<ValueStringPair, 0, UnsafeVectorOverflow>* > VectorOfValueStringVectors;
14957cd0
A
385
386 VectorOfValueStringVectors::iterator end = m_tempSortingVectors.end();
387 for (VectorOfValueStringVectors::iterator it = m_tempSortingVectors.begin(); it != end; ++it) {
93a37866 388 Vector<ValueStringPair, 0, UnsafeVectorOverflow>* tempSortingVector = *it;
14957cd0
A
389
390 Vector<ValueStringPair>::iterator vectorEnd = tempSortingVector->end();
391 for (Vector<ValueStringPair>::iterator vectorIt = tempSortingVector->begin(); vectorIt != vectorEnd; ++vectorIt) {
392 if (vectorIt->first)
6fe7ccc8 393 heapRootVisitor.visit(&vectorIt->first);
14957cd0
A
394 }
395 }
396}
397
6fe7ccc8
A
398void Heap::harvestWeakReferences()
399{
400 m_slotVisitor.harvestWeakReferences();
401}
402
403void Heap::finalizeUnconditionalFinalizers()
404{
405 m_slotVisitor.finalizeUnconditionalFinalizers();
406}
407
93a37866
A
408inline JSStack& Heap::stack()
409{
410 return m_vm->interpreter->stack();
411}
412
413void Heap::canonicalizeCellLivenessData()
14957cd0 414{
93a37866 415 m_objectSpace.canonicalizeCellLivenessData();
14957cd0
A
416}
417
418void Heap::getConservativeRegisterRoots(HashSet<JSCell*>& roots)
419{
93a37866
A
420 ASSERT(isValidThreadState(m_vm));
421 ConservativeRoots stackRoots(&m_objectSpace.blocks(), &m_storageSpace);
422 stack().gatherConservativeRoots(stackRoots);
423 size_t stackRootCount = stackRoots.size();
424 JSCell** registerRoots = stackRoots.roots();
425 for (size_t i = 0; i < stackRootCount; i++) {
14957cd0
A
426 setMarked(registerRoots[i]);
427 roots.add(registerRoots[i]);
428 }
14957cd0
A
429}
430
93a37866 431void Heap::markRoots()
14957cd0 432{
6fe7ccc8 433 SamplingRegion samplingRegion("Garbage Collection: Tracing");
14957cd0 434
93a37866
A
435 GCPHASE(MarkRoots);
436 ASSERT(isValidThreadState(m_vm));
437
438#if ENABLE(OBJECT_MARK_LOGGING)
439 double gcStartTime = WTF::currentTime();
440#endif
14957cd0 441
6fe7ccc8 442 void* dummy;
14957cd0 443
6fe7ccc8
A
444 // We gather conservative roots before clearing mark bits because conservative
445 // gathering uses the mark bits to determine whether a reference is valid.
446 ConservativeRoots machineThreadRoots(&m_objectSpace.blocks(), &m_storageSpace);
93a37866 447 m_jitStubRoutines.clearMarks();
6fe7ccc8
A
448 {
449 GCPHASE(GatherConservativeRoots);
450 m_machineThreads.gatherConservativeRoots(machineThreadRoots, &dummy);
451 }
14957cd0 452
93a37866 453 ConservativeRoots stackRoots(&m_objectSpace.blocks(), &m_storageSpace);
6fe7ccc8
A
454 m_dfgCodeBlocks.clearMarks();
455 {
93a37866
A
456 GCPHASE(GatherStackRoots);
457 stack().gatherConservativeRoots(
458 stackRoots, m_jitStubRoutines, m_dfgCodeBlocks);
6fe7ccc8 459 }
14957cd0 460
6fe7ccc8
A
461#if ENABLE(DFG_JIT)
462 ConservativeRoots scratchBufferRoots(&m_objectSpace.blocks(), &m_storageSpace);
463 {
464 GCPHASE(GatherScratchBufferRoots);
93a37866 465 m_vm->gatherConservativeRoots(scratchBufferRoots);
6fe7ccc8
A
466 }
467#endif
14957cd0 468
6fe7ccc8
A
469 {
470 GCPHASE(clearMarks);
93a37866 471 m_objectSpace.clearMarks();
6fe7ccc8 472 }
14957cd0 473
93a37866 474 m_sharedData.didStartMarking();
6fe7ccc8 475 SlotVisitor& visitor = m_slotVisitor;
93a37866 476 visitor.setup();
6fe7ccc8
A
477 HeapRootVisitor heapRootVisitor(visitor);
478
479 {
480 ParallelModeEnabler enabler(visitor);
93a37866
A
481
482 if (m_vm->codeBlocksBeingCompiled.size()) {
6fe7ccc8 483 GCPHASE(VisitActiveCodeBlock);
93a37866
A
484 for (size_t i = 0; i < m_vm->codeBlocksBeingCompiled.size(); i++)
485 m_vm->codeBlocksBeingCompiled[i]->visitAggregate(visitor);
6fe7ccc8 486 }
93a37866
A
487
488 m_vm->smallStrings.visitStrongReferences(visitor);
489
6fe7ccc8
A
490 {
491 GCPHASE(VisitMachineRoots);
93a37866 492 MARK_LOG_ROOT(visitor, "C++ Stack");
6fe7ccc8
A
493 visitor.append(machineThreadRoots);
494 visitor.donateAndDrain();
495 }
496 {
93a37866
A
497 GCPHASE(VisitStackRoots);
498 MARK_LOG_ROOT(visitor, "Stack");
499 visitor.append(stackRoots);
6fe7ccc8
A
500 visitor.donateAndDrain();
501 }
502#if ENABLE(DFG_JIT)
503 {
504 GCPHASE(VisitScratchBufferRoots);
93a37866 505 MARK_LOG_ROOT(visitor, "Scratch Buffers");
6fe7ccc8
A
506 visitor.append(scratchBufferRoots);
507 visitor.donateAndDrain();
508 }
509#endif
510 {
511 GCPHASE(VisitProtectedObjects);
93a37866 512 MARK_LOG_ROOT(visitor, "Protected Objects");
6fe7ccc8
A
513 markProtectedObjects(heapRootVisitor);
514 visitor.donateAndDrain();
515 }
516 {
517 GCPHASE(VisitTempSortVectors);
93a37866 518 MARK_LOG_ROOT(visitor, "Temp Sort Vectors");
6fe7ccc8
A
519 markTempSortVectors(heapRootVisitor);
520 visitor.donateAndDrain();
521 }
14957cd0 522
6fe7ccc8
A
523 {
524 GCPHASE(MarkingArgumentBuffers);
525 if (m_markListSet && m_markListSet->size()) {
93a37866 526 MARK_LOG_ROOT(visitor, "Argument Buffers");
6fe7ccc8
A
527 MarkedArgumentBuffer::markLists(heapRootVisitor, *m_markListSet);
528 visitor.donateAndDrain();
529 }
530 }
93a37866 531 if (m_vm->exception) {
6fe7ccc8 532 GCPHASE(MarkingException);
93a37866
A
533 MARK_LOG_ROOT(visitor, "Exceptions");
534 heapRootVisitor.visit(&m_vm->exception);
6fe7ccc8
A
535 visitor.donateAndDrain();
536 }
14957cd0 537
6fe7ccc8
A
538 {
539 GCPHASE(VisitStrongHandles);
93a37866 540 MARK_LOG_ROOT(visitor, "Strong Handles");
6fe7ccc8
A
541 m_handleSet.visitStrongHandles(heapRootVisitor);
542 visitor.donateAndDrain();
543 }
14957cd0 544
6fe7ccc8
A
545 {
546 GCPHASE(HandleStack);
93a37866 547 MARK_LOG_ROOT(visitor, "Handle Stack");
6fe7ccc8
A
548 m_handleStack.visit(heapRootVisitor);
549 visitor.donateAndDrain();
550 }
551
552 {
93a37866
A
553 GCPHASE(TraceCodeBlocksAndJITStubRoutines);
554 MARK_LOG_ROOT(visitor, "Trace Code Blocks and JIT Stub Routines");
6fe7ccc8 555 m_dfgCodeBlocks.traceMarkedCodeBlocks(visitor);
93a37866 556 m_jitStubRoutines.traceMarkedStubRoutines(visitor);
6fe7ccc8
A
557 visitor.donateAndDrain();
558 }
559
560#if ENABLE(PARALLEL_GC)
561 {
562 GCPHASE(Convergence);
563 visitor.drainFromShared(SlotVisitor::MasterDrain);
564 }
565#endif
566 }
14957cd0 567
6fe7ccc8
A
568 // Weak references must be marked last because their liveness depends on
569 // the liveness of the rest of the object graph.
570 {
571 GCPHASE(VisitingLiveWeakHandles);
93a37866 572 MARK_LOG_ROOT(visitor, "Live Weak Handles");
6fe7ccc8 573 while (true) {
93a37866 574 m_objectSpace.visitWeakSets(heapRootVisitor);
6fe7ccc8
A
575 harvestWeakReferences();
576 if (visitor.isEmpty())
577 break;
578 {
579 ParallelModeEnabler enabler(visitor);
580 visitor.donateAndDrain();
581#if ENABLE(PARALLEL_GC)
582 visitor.drainFromShared(SlotVisitor::MasterDrain);
583#endif
584 }
585 }
586 }
14957cd0 587
6fe7ccc8
A
588 GCCOUNTER(VisitedValueCount, visitor.visitCount());
589
93a37866
A
590 m_sharedData.didFinishMarking();
591#if ENABLE(OBJECT_MARK_LOGGING)
592 size_t visitCount = visitor.visitCount();
593#if ENABLE(PARALLEL_GC)
594 visitCount += m_sharedData.childVisitCount();
595#endif
596 MARK_LOG_MESSAGE2("\nNumber of live Objects after full GC %lu, took %.6f secs\n", visitCount, WTF::currentTime() - gcStartTime);
597#endif
598
6fe7ccc8 599 visitor.reset();
93a37866
A
600#if ENABLE(PARALLEL_GC)
601 m_sharedData.resetChildren();
602#endif
6fe7ccc8 603 m_sharedData.reset();
14957cd0
A
604}
605
93a37866 606void Heap::copyBackingStores()
14957cd0 607{
93a37866
A
608 m_storageSpace.startedCopying();
609 if (m_storageSpace.shouldDoCopyPhase()) {
610 m_sharedData.didStartCopying();
611 m_copyVisitor.startCopying();
612 m_copyVisitor.copyFromShared();
613 m_copyVisitor.doneCopying();
614 // We need to wait for everybody to finish and return their CopiedBlocks
615 // before signaling that the phase is complete.
616 m_storageSpace.doneCopying();
617 m_sharedData.didFinishCopying();
618 } else
619 m_storageSpace.doneCopying();
14957cd0
A
620}
621
6fe7ccc8 622size_t Heap::objectCount()
14957cd0 623{
93a37866 624 return m_objectSpace.objectCount();
14957cd0
A
625}
626
6fe7ccc8 627size_t Heap::size()
14957cd0 628{
93a37866 629 return m_objectSpace.size() + m_storageSpace.size();
14957cd0
A
630}
631
6fe7ccc8 632size_t Heap::capacity()
14957cd0 633{
93a37866 634 return m_objectSpace.capacity() + m_storageSpace.capacity();
14957cd0
A
635}
636
6fe7ccc8 637size_t Heap::protectedGlobalObjectCount()
14957cd0 638{
6fe7ccc8 639 return forEachProtectedCell<CountIfGlobalObject>();
14957cd0
A
640}
641
6fe7ccc8 642size_t Heap::globalObjectCount()
14957cd0 643{
93a37866 644 return m_objectSpace.forEachLiveCell<CountIfGlobalObject>();
14957cd0
A
645}
646
6fe7ccc8 647size_t Heap::protectedObjectCount()
14957cd0 648{
6fe7ccc8 649 return forEachProtectedCell<Count>();
14957cd0
A
650}
651
652PassOwnPtr<TypeCountSet> Heap::protectedObjectTypeCounts()
653{
6fe7ccc8 654 return forEachProtectedCell<RecordType>();
14957cd0
A
655}
656
6fe7ccc8 657PassOwnPtr<TypeCountSet> Heap::objectTypeCounts()
14957cd0 658{
93a37866 659 return m_objectSpace.forEachLiveCell<RecordType>();
14957cd0
A
660}
661
93a37866 662void Heap::deleteAllCompiledCode()
14957cd0 663{
93a37866
A
664 // If JavaScript is running, it's not safe to delete code, since we'll end
665 // up deleting code that is live on the stack.
666 if (m_vm->dynamicGlobalObject)
6fe7ccc8
A
667 return;
668
93a37866
A
669 for (ExecutableBase* current = m_compiledCode.head(); current; current = current->next()) {
670 if (!current->isFunctionExecutable())
671 continue;
672 static_cast<FunctionExecutable*>(current)->clearCodeIfNotCompiling();
673 }
674
675 m_dfgCodeBlocks.clearMarks();
676 m_dfgCodeBlocks.deleteUnmarkedJettisonedCodeBlocks();
677}
678
679void Heap::deleteUnmarkedCompiledCode()
680{
681 ExecutableBase* next;
682 for (ExecutableBase* current = m_compiledCode.head(); current; current = next) {
683 next = current->next();
684 if (isMarked(current))
685 continue;
686
687 // We do this because executable memory is limited on some platforms and because
688 // CodeBlock requires eager finalization.
689 ExecutableBase::clearCodeVirtual(current);
690 m_compiledCode.remove(current);
691 }
692
693 m_dfgCodeBlocks.deleteUnmarkedJettisonedCodeBlocks();
694 m_jitStubRoutines.deleteUnmarkedJettisonedStubRoutines();
14957cd0
A
695}
696
697void Heap::collectAllGarbage()
698{
6fe7ccc8
A
699 if (!m_isSafeToCollect)
700 return;
701
702 collect(DoSweep);
14957cd0
A
703}
704
6fe7ccc8
A
705static double minute = 60.0;
706
707void Heap::collect(SweepToggle sweepToggle)
14957cd0 708{
6fe7ccc8
A
709 SamplingRegion samplingRegion("Garbage Collection");
710
711 GCPHASE(Collect);
93a37866
A
712 ASSERT(vm()->apiLock().currentThreadIsHoldingLock());
713 RELEASE_ASSERT(vm()->identifierTable == wtfThreadData().currentIdentifierTable());
6fe7ccc8 714 ASSERT(m_isSafeToCollect);
14957cd0 715 JAVASCRIPTCORE_GC_BEGIN();
93a37866 716 RELEASE_ASSERT(m_operationInProgress == NoOperation);
6fe7ccc8 717 m_operationInProgress = Collection;
14957cd0 718
93a37866 719#if PLATFORM(IOS)
6fe7ccc8
A
720 if (m_activityCallback)
721 m_activityCallback->willCollect();
93a37866
A
722#else
723 m_activityCallback->willCollect();
724#endif // PLATFORM(IOS)
14957cd0 725
6fe7ccc8
A
726 double lastGCStartTime = WTF::currentTime();
727 if (lastGCStartTime - m_lastCodeDiscardTime > minute) {
93a37866 728 deleteAllCompiledCode();
6fe7ccc8
A
729 m_lastCodeDiscardTime = WTF::currentTime();
730 }
14957cd0 731
6fe7ccc8
A
732 {
733 GCPHASE(Canonicalize);
93a37866 734 m_objectSpace.canonicalizeCellLivenessData();
6fe7ccc8
A
735 }
736
93a37866 737 markRoots();
6fe7ccc8 738
93a37866
A
739 {
740 GCPHASE(ReapingWeakHandles);
741 m_objectSpace.reapWeakSets();
742 }
743
744 JAVASCRIPTCORE_GC_MARKED();
745
746 {
747 m_blockSnapshot.resize(m_objectSpace.blocks().set().size());
748 MarkedBlockSnapshotFunctor functor(m_blockSnapshot);
749 m_objectSpace.forEachBlock(functor);
750 }
751
752 copyBackingStores();
753
6fe7ccc8
A
754 {
755 GCPHASE(FinalizeUnconditionalFinalizers);
756 finalizeUnconditionalFinalizers();
757 }
93a37866 758
6fe7ccc8 759 {
93a37866
A
760 GCPHASE(finalizeSmallStrings);
761 m_vm->smallStrings.finalizeSmallStrings();
6fe7ccc8 762 }
6fe7ccc8
A
763
764 {
93a37866
A
765 GCPHASE(DeleteCodeBlocks);
766 deleteUnmarkedCompiledCode();
6fe7ccc8 767 }
93a37866 768
6fe7ccc8 769 {
93a37866
A
770 GCPHASE(DeleteSourceProviderCaches);
771 m_vm->clearSourceProviderCaches();
6fe7ccc8 772 }
14957cd0
A
773
774 if (sweepToggle == DoSweep) {
6fe7ccc8
A
775 SamplingRegion samplingRegion("Garbage Collection: Sweeping");
776 GCPHASE(Sweeping);
93a37866 777 m_objectSpace.sweep();
6fe7ccc8 778 m_objectSpace.shrink();
14957cd0
A
779 }
780
93a37866
A
781 m_sweeper->startSweeping(m_blockSnapshot);
782 m_bytesAbandoned = 0;
783
784 {
785 GCPHASE(ResetAllocators);
786 m_objectSpace.resetAllocators();
6fe7ccc8 787 }
93a37866
A
788
789 size_t currentHeapSize = size();
790 if (Options::gcMaxHeapSize() && currentHeapSize > Options::gcMaxHeapSize())
791 HeapStatistics::exitWithFailure();
792
793 m_sizeAfterLastCollect = currentHeapSize;
794
795 // To avoid pathological GC churn in very small and very large heaps, we set
796 // the new allocation limit based on the current size of the heap, with a
797 // fixed minimum.
798 size_t maxHeapSize = max(minHeapSize(m_heapType, m_ramSize), proportionalHeapSize(currentHeapSize, m_ramSize));
799 m_bytesAllocatedLimit = maxHeapSize - currentHeapSize;
800
6fe7ccc8
A
801 m_bytesAllocated = 0;
802 double lastGCEndTime = WTF::currentTime();
803 m_lastGCLength = lastGCEndTime - lastGCStartTime;
93a37866
A
804
805 if (Options::recordGCPauseTimes())
806 HeapStatistics::recordGCPauseTime(lastGCStartTime, lastGCEndTime);
807 RELEASE_ASSERT(m_operationInProgress == Collection);
808
6fe7ccc8 809 m_operationInProgress = NoOperation;
14957cd0 810 JAVASCRIPTCORE_GC_END();
6fe7ccc8 811
93a37866
A
812 if (Options::useZombieMode())
813 zombifyDeadObjects();
814
815 if (Options::objectsAreImmortal())
816 markDeadObjects();
817
818 if (Options::showObjectStatistics())
819 HeapStatistics::showObjectStatistics(this);
6fe7ccc8 820}
14957cd0 821
93a37866 822void Heap::markDeadObjects()
6fe7ccc8 823{
93a37866 824 m_objectSpace.forEachDeadCell<MarkObject>();
14957cd0
A
825}
826
93a37866 827void Heap::setActivityCallback(PassOwnPtr<GCActivityCallback> activityCallback)
14957cd0
A
828{
829 m_activityCallback = activityCallback;
830}
831
832GCActivityCallback* Heap::activityCallback()
833{
93a37866
A
834 return m_activityCallback.get();
835}
836
837#if PLATFORM(IOS)
838void Heap::setIncrementalSweeper(PassOwnPtr<IncrementalSweeper> sweeper)
839{
840 m_sweeper = sweeper;
841}
842#endif // PLATFORM(IOS)
843
844IncrementalSweeper* Heap::sweeper()
845{
846 return m_sweeper.get();
847}
848
849void Heap::setGarbageCollectionTimerEnabled(bool enable)
850{
851#if PLATFORM(IOS)
852 if (m_activityCallback)
853 m_activityCallback->setEnabled(enable);
854#else
855 activityCallback()->setEnabled(enable);
856#endif // PLATFORM(IOS)
6fe7ccc8
A
857}
858
859void Heap::didAllocate(size_t bytes)
860{
93a37866 861#if PLATFORM(IOS)
6fe7ccc8
A
862 if (m_activityCallback)
863 m_activityCallback->didAllocate(m_bytesAllocated + m_bytesAbandoned);
93a37866
A
864#else
865 m_activityCallback->didAllocate(m_bytesAllocated + m_bytesAbandoned);
866#endif // PLATFORM(IOS)
6fe7ccc8
A
867 m_bytesAllocated += bytes;
868}
869
93a37866 870bool Heap::isValidAllocation(size_t)
6fe7ccc8 871{
93a37866 872 if (!isValidThreadState(m_vm))
6fe7ccc8
A
873 return false;
874
875 if (m_operationInProgress != NoOperation)
876 return false;
877
878 return true;
879}
880
881void Heap::addFinalizer(JSCell* cell, Finalizer finalizer)
882{
883 WeakSet::allocate(cell, &m_finalizerOwner, reinterpret_cast<void*>(finalizer)); // Balanced by FinalizerOwner::finalize().
884}
885
886void Heap::FinalizerOwner::finalize(Handle<Unknown> handle, void* context)
887{
888 HandleSlot slot = handle.slot();
889 Finalizer finalizer = reinterpret_cast<Finalizer>(context);
890 finalizer(slot->asCell());
891 WeakSet::deallocate(WeakImpl::asWeakImpl(slot));
892}
893
93a37866 894void Heap::addCompiledCode(ExecutableBase* executable)
6fe7ccc8 895{
93a37866 896 m_compiledCode.append(executable);
6fe7ccc8
A
897}
898
93a37866
A
899class Zombify : public MarkedBlock::VoidFunctor {
900public:
901 void operator()(JSCell* cell)
902 {
903 void** current = reinterpret_cast<void**>(cell);
904
905 // We want to maintain zapped-ness because that's how we know if we've called
906 // the destructor.
907 if (cell->isZapped())
908 current++;
909
910 void* limit = static_cast<void*>(reinterpret_cast<char*>(cell) + MarkedBlock::blockFor(cell)->cellSize());
911 for (; current < limit; current++)
912 *current = reinterpret_cast<void*>(0xbbadbeef);
913 }
914};
915
916void Heap::zombifyDeadObjects()
6fe7ccc8 917{
93a37866
A
918 // Sweep now because destructors will crash once we're zombified.
919 m_objectSpace.sweep();
920 m_objectSpace.forEachDeadCell<Zombify>();
14957cd0
A
921}
922
923} // namespace JSC