]> git.saurik.com Git - apple/javascriptcore.git/blob - heap/Heap.cpp
JavaScriptCore-1218.33.tar.gz
[apple/javascriptcore.git] / heap / Heap.cpp
1 /*
2 * Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2011 Apple Inc. All rights reserved.
3 * Copyright (C) 2007 Eric Seidel <eric@webkit.org>
4 *
5 * This library is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Lesser General Public
7 * License as published by the Free Software Foundation; either
8 * version 2 of the License, or (at your option) any later version.
9 *
10 * This library is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Lesser General Public License for more details.
14 *
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this library; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
18 *
19 */
20
21 #include "config.h"
22 #include "Heap.h"
23
24 #include "CodeBlock.h"
25 #include "ConservativeRoots.h"
26 #include "CopiedSpace.h"
27 #include "CopiedSpaceInlines.h"
28 #include "CopyVisitorInlines.h"
29 #include "GCActivityCallback.h"
30 #include "HeapRootVisitor.h"
31 #include "HeapStatistics.h"
32 #include "IncrementalSweeper.h"
33 #include "Interpreter.h"
34 #include "VM.h"
35 #include "JSGlobalObject.h"
36 #include "JSLock.h"
37 #include "JSONObject.h"
38 #include "Operations.h"
39 #include "Tracing.h"
40 #include "UnlinkedCodeBlock.h"
41 #include "WeakSetInlines.h"
42 #include <algorithm>
43 #include <wtf/RAMSize.h>
44 #include <wtf/CurrentTime.h>
45
46 using namespace std;
47 using namespace JSC;
48
49 namespace JSC {
50
51 namespace {
52
53 static const size_t largeHeapSize = 32 * MB; // About 1.5X the average webpage.
54 static const size_t smallHeapSize = 1 * MB; // Matches the FastMalloc per-thread cache.
55
56 #if ENABLE(GC_LOGGING)
57 #if COMPILER(CLANG)
58 #define DEFINE_GC_LOGGING_GLOBAL(type, name, arguments) \
59 _Pragma("clang diagnostic push") \
60 _Pragma("clang diagnostic ignored \"-Wglobal-constructors\"") \
61 _Pragma("clang diagnostic ignored \"-Wexit-time-destructors\"") \
62 static type name arguments; \
63 _Pragma("clang diagnostic pop")
64 #else
65 #define DEFINE_GC_LOGGING_GLOBAL(type, name, arguments) \
66 static type name arguments;
67 #endif // COMPILER(CLANG)
68
69 struct GCTimer {
70 GCTimer(const char* name)
71 : m_time(0)
72 , m_min(100000000)
73 , m_max(0)
74 , m_count(0)
75 , m_name(name)
76 {
77 }
78 ~GCTimer()
79 {
80 dataLogF("%s: %.2lfms (avg. %.2lf, min. %.2lf, max. %.2lf)\n", m_name, m_time * 1000, m_time * 1000 / m_count, m_min*1000, m_max*1000);
81 }
82 double m_time;
83 double m_min;
84 double m_max;
85 size_t m_count;
86 const char* m_name;
87 };
88
89 struct GCTimerScope {
90 GCTimerScope(GCTimer* timer)
91 : m_timer(timer)
92 , m_start(WTF::currentTime())
93 {
94 }
95 ~GCTimerScope()
96 {
97 double delta = WTF::currentTime() - m_start;
98 if (delta < m_timer->m_min)
99 m_timer->m_min = delta;
100 if (delta > m_timer->m_max)
101 m_timer->m_max = delta;
102 m_timer->m_count++;
103 m_timer->m_time += delta;
104 }
105 GCTimer* m_timer;
106 double m_start;
107 };
108
109 struct GCCounter {
110 GCCounter(const char* name)
111 : m_name(name)
112 , m_count(0)
113 , m_total(0)
114 , m_min(10000000)
115 , m_max(0)
116 {
117 }
118
119 void count(size_t amount)
120 {
121 m_count++;
122 m_total += amount;
123 if (amount < m_min)
124 m_min = amount;
125 if (amount > m_max)
126 m_max = amount;
127 }
128 ~GCCounter()
129 {
130 dataLogF("%s: %zu values (avg. %zu, min. %zu, max. %zu)\n", m_name, m_total, m_total / m_count, m_min, m_max);
131 }
132 const char* m_name;
133 size_t m_count;
134 size_t m_total;
135 size_t m_min;
136 size_t m_max;
137 };
138
139 #define GCPHASE(name) DEFINE_GC_LOGGING_GLOBAL(GCTimer, name##Timer, (#name)); GCTimerScope name##TimerScope(&name##Timer)
140 #define COND_GCPHASE(cond, name1, name2) DEFINE_GC_LOGGING_GLOBAL(GCTimer, name1##Timer, (#name1)); DEFINE_GC_LOGGING_GLOBAL(GCTimer, name2##Timer, (#name2)); GCTimerScope name1##CondTimerScope(cond ? &name1##Timer : &name2##Timer)
141 #define GCCOUNTER(name, value) do { DEFINE_GC_LOGGING_GLOBAL(GCCounter, name##Counter, (#name)); name##Counter.count(value); } while (false)
142
143 #else
144
145 #define GCPHASE(name) do { } while (false)
146 #define COND_GCPHASE(cond, name1, name2) do { } while (false)
147 #define GCCOUNTER(name, value) do { } while (false)
148 #endif
149
150 static inline size_t minHeapSize(HeapType heapType, size_t ramSize)
151 {
152 if (heapType == LargeHeap)
153 return min(largeHeapSize, ramSize / 4);
154 return smallHeapSize;
155 }
156
157 static inline size_t proportionalHeapSize(size_t heapSize, size_t ramSize)
158 {
159 // Try to stay under 1/2 RAM size to leave room for the DOM, rendering, networking, etc.
160 if (heapSize < ramSize / 4)
161 return 2 * heapSize;
162 if (heapSize < ramSize / 2)
163 return 1.5 * heapSize;
164 return 1.25 * heapSize;
165 }
166
167 static inline bool isValidSharedInstanceThreadState(VM* vm)
168 {
169 return vm->apiLock().currentThreadIsHoldingLock();
170 }
171
172 static inline bool isValidThreadState(VM* vm)
173 {
174 if (vm->identifierTable != wtfThreadData().currentIdentifierTable())
175 return false;
176
177 if (vm->isSharedInstance() && !isValidSharedInstanceThreadState(vm))
178 return false;
179
180 return true;
181 }
182
183 struct MarkObject : public MarkedBlock::VoidFunctor {
184 void operator()(JSCell* cell)
185 {
186 if (cell->isZapped())
187 return;
188 Heap::heap(cell)->setMarked(cell);
189 }
190 };
191
192 struct Count : public MarkedBlock::CountFunctor {
193 void operator()(JSCell*) { count(1); }
194 };
195
196 struct CountIfGlobalObject : MarkedBlock::CountFunctor {
197 void operator()(JSCell* cell) {
198 if (!cell->isObject())
199 return;
200 if (!asObject(cell)->isGlobalObject())
201 return;
202 count(1);
203 }
204 };
205
206 class RecordType {
207 public:
208 typedef PassOwnPtr<TypeCountSet> ReturnType;
209
210 RecordType();
211 void operator()(JSCell*);
212 ReturnType returnValue();
213
214 private:
215 const char* typeName(JSCell*);
216 OwnPtr<TypeCountSet> m_typeCountSet;
217 };
218
219 inline RecordType::RecordType()
220 : m_typeCountSet(adoptPtr(new TypeCountSet))
221 {
222 }
223
224 inline const char* RecordType::typeName(JSCell* cell)
225 {
226 const ClassInfo* info = cell->classInfo();
227 if (!info || !info->className)
228 return "[unknown]";
229 return info->className;
230 }
231
232 inline void RecordType::operator()(JSCell* cell)
233 {
234 m_typeCountSet->add(typeName(cell));
235 }
236
237 inline PassOwnPtr<TypeCountSet> RecordType::returnValue()
238 {
239 return m_typeCountSet.release();
240 }
241
242 } // anonymous namespace
243
244 Heap::Heap(VM* vm, HeapType heapType)
245 : m_heapType(heapType)
246 , m_ramSize(ramSize())
247 , m_minBytesPerCycle(minHeapSize(m_heapType, m_ramSize))
248 , m_sizeAfterLastCollect(0)
249 , m_bytesAllocatedLimit(m_minBytesPerCycle)
250 , m_bytesAllocated(0)
251 , m_bytesAbandoned(0)
252 , m_operationInProgress(NoOperation)
253 , m_blockAllocator()
254 , m_objectSpace(this)
255 , m_storageSpace(this)
256 , m_machineThreads(this)
257 , m_sharedData(vm)
258 , m_slotVisitor(m_sharedData)
259 , m_copyVisitor(m_sharedData)
260 , m_handleSet(vm)
261 , m_isSafeToCollect(false)
262 , m_vm(vm)
263 , m_lastGCLength(0)
264 , m_lastCodeDiscardTime(WTF::currentTime())
265 , m_activityCallback(DefaultGCActivityCallback::create(this))
266 , m_sweeper(IncrementalSweeper::create(this))
267 {
268 m_storageSpace.init();
269 }
270
271 Heap::~Heap()
272 {
273 }
274
275 bool Heap::isPagedOut(double deadline)
276 {
277 return m_objectSpace.isPagedOut(deadline) || m_storageSpace.isPagedOut(deadline);
278 }
279
280 // The VM is being destroyed and the collector will never run again.
281 // Run all pending finalizers now because we won't get another chance.
282 void Heap::lastChanceToFinalize()
283 {
284 RELEASE_ASSERT(!m_vm->dynamicGlobalObject);
285 RELEASE_ASSERT(m_operationInProgress == NoOperation);
286
287 m_objectSpace.lastChanceToFinalize();
288
289 #if ENABLE(SIMPLE_HEAP_PROFILING)
290 m_slotVisitor.m_visitedTypeCounts.dump(WTF::dataFile(), "Visited Type Counts");
291 m_destroyedTypeCounts.dump(WTF::dataFile(), "Destroyed Type Counts");
292 #endif
293 }
294
295 void Heap::reportExtraMemoryCostSlowCase(size_t cost)
296 {
297 // Our frequency of garbage collection tries to balance memory use against speed
298 // by collecting based on the number of newly created values. However, for values
299 // that hold on to a great deal of memory that's not in the form of other JS values,
300 // that is not good enough - in some cases a lot of those objects can pile up and
301 // use crazy amounts of memory without a GC happening. So we track these extra
302 // memory costs. Only unusually large objects are noted, and we only keep track
303 // of this extra cost until the next GC. In garbage collected languages, most values
304 // are either very short lived temporaries, or have extremely long lifetimes. So
305 // if a large value survives one garbage collection, there is not much point to
306 // collecting more frequently as long as it stays alive.
307
308 didAllocate(cost);
309 if (shouldCollect())
310 collect(DoNotSweep);
311 }
312
313 void Heap::reportAbandonedObjectGraph()
314 {
315 // Our clients don't know exactly how much memory they
316 // are abandoning so we just guess for them.
317 double abandonedBytes = 0.10 * m_sizeAfterLastCollect;
318
319 // We want to accelerate the next collection. Because memory has just
320 // been abandoned, the next collection has the potential to
321 // be more profitable. Since allocation is the trigger for collection,
322 // we hasten the next collection by pretending that we've allocated more memory.
323 didAbandon(abandonedBytes);
324 }
325
326 void Heap::didAbandon(size_t bytes)
327 {
328 #if PLATFORM(IOS)
329 if (m_activityCallback)
330 m_activityCallback->didAllocate(m_bytesAllocated + m_bytesAbandoned);
331 #else
332 m_activityCallback->didAllocate(m_bytesAllocated + m_bytesAbandoned);
333 #endif // PLATFORM(IOS)
334 m_bytesAbandoned += bytes;
335 }
336
337 void Heap::protect(JSValue k)
338 {
339 ASSERT(k);
340 ASSERT(m_vm->apiLock().currentThreadIsHoldingLock());
341
342 if (!k.isCell())
343 return;
344
345 m_protectedValues.add(k.asCell());
346 }
347
348 bool Heap::unprotect(JSValue k)
349 {
350 ASSERT(k);
351 ASSERT(m_vm->apiLock().currentThreadIsHoldingLock());
352
353 if (!k.isCell())
354 return false;
355
356 return m_protectedValues.remove(k.asCell());
357 }
358
359 void Heap::jettisonDFGCodeBlock(PassOwnPtr<CodeBlock> codeBlock)
360 {
361 m_dfgCodeBlocks.jettison(codeBlock);
362 }
363
364 void Heap::markProtectedObjects(HeapRootVisitor& heapRootVisitor)
365 {
366 ProtectCountSet::iterator end = m_protectedValues.end();
367 for (ProtectCountSet::iterator it = m_protectedValues.begin(); it != end; ++it)
368 heapRootVisitor.visit(&it->key);
369 }
370
371 void Heap::pushTempSortVector(Vector<ValueStringPair, 0, UnsafeVectorOverflow>* tempVector)
372 {
373 m_tempSortingVectors.append(tempVector);
374 }
375
376 void Heap::popTempSortVector(Vector<ValueStringPair, 0, UnsafeVectorOverflow>* tempVector)
377 {
378 ASSERT_UNUSED(tempVector, tempVector == m_tempSortingVectors.last());
379 m_tempSortingVectors.removeLast();
380 }
381
382 void Heap::markTempSortVectors(HeapRootVisitor& heapRootVisitor)
383 {
384 typedef Vector<Vector<ValueStringPair, 0, UnsafeVectorOverflow>* > VectorOfValueStringVectors;
385
386 VectorOfValueStringVectors::iterator end = m_tempSortingVectors.end();
387 for (VectorOfValueStringVectors::iterator it = m_tempSortingVectors.begin(); it != end; ++it) {
388 Vector<ValueStringPair, 0, UnsafeVectorOverflow>* tempSortingVector = *it;
389
390 Vector<ValueStringPair>::iterator vectorEnd = tempSortingVector->end();
391 for (Vector<ValueStringPair>::iterator vectorIt = tempSortingVector->begin(); vectorIt != vectorEnd; ++vectorIt) {
392 if (vectorIt->first)
393 heapRootVisitor.visit(&vectorIt->first);
394 }
395 }
396 }
397
398 void Heap::harvestWeakReferences()
399 {
400 m_slotVisitor.harvestWeakReferences();
401 }
402
403 void Heap::finalizeUnconditionalFinalizers()
404 {
405 m_slotVisitor.finalizeUnconditionalFinalizers();
406 }
407
408 inline JSStack& Heap::stack()
409 {
410 return m_vm->interpreter->stack();
411 }
412
413 void Heap::canonicalizeCellLivenessData()
414 {
415 m_objectSpace.canonicalizeCellLivenessData();
416 }
417
418 void Heap::getConservativeRegisterRoots(HashSet<JSCell*>& roots)
419 {
420 ASSERT(isValidThreadState(m_vm));
421 ConservativeRoots stackRoots(&m_objectSpace.blocks(), &m_storageSpace);
422 stack().gatherConservativeRoots(stackRoots);
423 size_t stackRootCount = stackRoots.size();
424 JSCell** registerRoots = stackRoots.roots();
425 for (size_t i = 0; i < stackRootCount; i++) {
426 setMarked(registerRoots[i]);
427 roots.add(registerRoots[i]);
428 }
429 }
430
431 void Heap::markRoots()
432 {
433 SamplingRegion samplingRegion("Garbage Collection: Tracing");
434
435 GCPHASE(MarkRoots);
436 ASSERT(isValidThreadState(m_vm));
437
438 #if ENABLE(OBJECT_MARK_LOGGING)
439 double gcStartTime = WTF::currentTime();
440 #endif
441
442 void* dummy;
443
444 // We gather conservative roots before clearing mark bits because conservative
445 // gathering uses the mark bits to determine whether a reference is valid.
446 ConservativeRoots machineThreadRoots(&m_objectSpace.blocks(), &m_storageSpace);
447 m_jitStubRoutines.clearMarks();
448 {
449 GCPHASE(GatherConservativeRoots);
450 m_machineThreads.gatherConservativeRoots(machineThreadRoots, &dummy);
451 }
452
453 ConservativeRoots stackRoots(&m_objectSpace.blocks(), &m_storageSpace);
454 m_dfgCodeBlocks.clearMarks();
455 {
456 GCPHASE(GatherStackRoots);
457 stack().gatherConservativeRoots(
458 stackRoots, m_jitStubRoutines, m_dfgCodeBlocks);
459 }
460
461 #if ENABLE(DFG_JIT)
462 ConservativeRoots scratchBufferRoots(&m_objectSpace.blocks(), &m_storageSpace);
463 {
464 GCPHASE(GatherScratchBufferRoots);
465 m_vm->gatherConservativeRoots(scratchBufferRoots);
466 }
467 #endif
468
469 {
470 GCPHASE(clearMarks);
471 m_objectSpace.clearMarks();
472 }
473
474 m_sharedData.didStartMarking();
475 SlotVisitor& visitor = m_slotVisitor;
476 visitor.setup();
477 HeapRootVisitor heapRootVisitor(visitor);
478
479 {
480 ParallelModeEnabler enabler(visitor);
481
482 if (m_vm->codeBlocksBeingCompiled.size()) {
483 GCPHASE(VisitActiveCodeBlock);
484 for (size_t i = 0; i < m_vm->codeBlocksBeingCompiled.size(); i++)
485 m_vm->codeBlocksBeingCompiled[i]->visitAggregate(visitor);
486 }
487
488 m_vm->smallStrings.visitStrongReferences(visitor);
489
490 {
491 GCPHASE(VisitMachineRoots);
492 MARK_LOG_ROOT(visitor, "C++ Stack");
493 visitor.append(machineThreadRoots);
494 visitor.donateAndDrain();
495 }
496 {
497 GCPHASE(VisitStackRoots);
498 MARK_LOG_ROOT(visitor, "Stack");
499 visitor.append(stackRoots);
500 visitor.donateAndDrain();
501 }
502 #if ENABLE(DFG_JIT)
503 {
504 GCPHASE(VisitScratchBufferRoots);
505 MARK_LOG_ROOT(visitor, "Scratch Buffers");
506 visitor.append(scratchBufferRoots);
507 visitor.donateAndDrain();
508 }
509 #endif
510 {
511 GCPHASE(VisitProtectedObjects);
512 MARK_LOG_ROOT(visitor, "Protected Objects");
513 markProtectedObjects(heapRootVisitor);
514 visitor.donateAndDrain();
515 }
516 {
517 GCPHASE(VisitTempSortVectors);
518 MARK_LOG_ROOT(visitor, "Temp Sort Vectors");
519 markTempSortVectors(heapRootVisitor);
520 visitor.donateAndDrain();
521 }
522
523 {
524 GCPHASE(MarkingArgumentBuffers);
525 if (m_markListSet && m_markListSet->size()) {
526 MARK_LOG_ROOT(visitor, "Argument Buffers");
527 MarkedArgumentBuffer::markLists(heapRootVisitor, *m_markListSet);
528 visitor.donateAndDrain();
529 }
530 }
531 if (m_vm->exception) {
532 GCPHASE(MarkingException);
533 MARK_LOG_ROOT(visitor, "Exceptions");
534 heapRootVisitor.visit(&m_vm->exception);
535 visitor.donateAndDrain();
536 }
537
538 {
539 GCPHASE(VisitStrongHandles);
540 MARK_LOG_ROOT(visitor, "Strong Handles");
541 m_handleSet.visitStrongHandles(heapRootVisitor);
542 visitor.donateAndDrain();
543 }
544
545 {
546 GCPHASE(HandleStack);
547 MARK_LOG_ROOT(visitor, "Handle Stack");
548 m_handleStack.visit(heapRootVisitor);
549 visitor.donateAndDrain();
550 }
551
552 {
553 GCPHASE(TraceCodeBlocksAndJITStubRoutines);
554 MARK_LOG_ROOT(visitor, "Trace Code Blocks and JIT Stub Routines");
555 m_dfgCodeBlocks.traceMarkedCodeBlocks(visitor);
556 m_jitStubRoutines.traceMarkedStubRoutines(visitor);
557 visitor.donateAndDrain();
558 }
559
560 #if ENABLE(PARALLEL_GC)
561 {
562 GCPHASE(Convergence);
563 visitor.drainFromShared(SlotVisitor::MasterDrain);
564 }
565 #endif
566 }
567
568 // Weak references must be marked last because their liveness depends on
569 // the liveness of the rest of the object graph.
570 {
571 GCPHASE(VisitingLiveWeakHandles);
572 MARK_LOG_ROOT(visitor, "Live Weak Handles");
573 while (true) {
574 m_objectSpace.visitWeakSets(heapRootVisitor);
575 harvestWeakReferences();
576 if (visitor.isEmpty())
577 break;
578 {
579 ParallelModeEnabler enabler(visitor);
580 visitor.donateAndDrain();
581 #if ENABLE(PARALLEL_GC)
582 visitor.drainFromShared(SlotVisitor::MasterDrain);
583 #endif
584 }
585 }
586 }
587
588 GCCOUNTER(VisitedValueCount, visitor.visitCount());
589
590 m_sharedData.didFinishMarking();
591 #if ENABLE(OBJECT_MARK_LOGGING)
592 size_t visitCount = visitor.visitCount();
593 #if ENABLE(PARALLEL_GC)
594 visitCount += m_sharedData.childVisitCount();
595 #endif
596 MARK_LOG_MESSAGE2("\nNumber of live Objects after full GC %lu, took %.6f secs\n", visitCount, WTF::currentTime() - gcStartTime);
597 #endif
598
599 visitor.reset();
600 #if ENABLE(PARALLEL_GC)
601 m_sharedData.resetChildren();
602 #endif
603 m_sharedData.reset();
604 }
605
606 void Heap::copyBackingStores()
607 {
608 m_storageSpace.startedCopying();
609 if (m_storageSpace.shouldDoCopyPhase()) {
610 m_sharedData.didStartCopying();
611 m_copyVisitor.startCopying();
612 m_copyVisitor.copyFromShared();
613 m_copyVisitor.doneCopying();
614 // We need to wait for everybody to finish and return their CopiedBlocks
615 // before signaling that the phase is complete.
616 m_storageSpace.doneCopying();
617 m_sharedData.didFinishCopying();
618 } else
619 m_storageSpace.doneCopying();
620 }
621
622 size_t Heap::objectCount()
623 {
624 return m_objectSpace.objectCount();
625 }
626
627 size_t Heap::size()
628 {
629 return m_objectSpace.size() + m_storageSpace.size();
630 }
631
632 size_t Heap::capacity()
633 {
634 return m_objectSpace.capacity() + m_storageSpace.capacity();
635 }
636
637 size_t Heap::protectedGlobalObjectCount()
638 {
639 return forEachProtectedCell<CountIfGlobalObject>();
640 }
641
642 size_t Heap::globalObjectCount()
643 {
644 return m_objectSpace.forEachLiveCell<CountIfGlobalObject>();
645 }
646
647 size_t Heap::protectedObjectCount()
648 {
649 return forEachProtectedCell<Count>();
650 }
651
652 PassOwnPtr<TypeCountSet> Heap::protectedObjectTypeCounts()
653 {
654 return forEachProtectedCell<RecordType>();
655 }
656
657 PassOwnPtr<TypeCountSet> Heap::objectTypeCounts()
658 {
659 return m_objectSpace.forEachLiveCell<RecordType>();
660 }
661
662 void Heap::deleteAllCompiledCode()
663 {
664 // If JavaScript is running, it's not safe to delete code, since we'll end
665 // up deleting code that is live on the stack.
666 if (m_vm->dynamicGlobalObject)
667 return;
668
669 for (ExecutableBase* current = m_compiledCode.head(); current; current = current->next()) {
670 if (!current->isFunctionExecutable())
671 continue;
672 static_cast<FunctionExecutable*>(current)->clearCodeIfNotCompiling();
673 }
674
675 m_dfgCodeBlocks.clearMarks();
676 m_dfgCodeBlocks.deleteUnmarkedJettisonedCodeBlocks();
677 }
678
679 void Heap::deleteUnmarkedCompiledCode()
680 {
681 ExecutableBase* next;
682 for (ExecutableBase* current = m_compiledCode.head(); current; current = next) {
683 next = current->next();
684 if (isMarked(current))
685 continue;
686
687 // We do this because executable memory is limited on some platforms and because
688 // CodeBlock requires eager finalization.
689 ExecutableBase::clearCodeVirtual(current);
690 m_compiledCode.remove(current);
691 }
692
693 m_dfgCodeBlocks.deleteUnmarkedJettisonedCodeBlocks();
694 m_jitStubRoutines.deleteUnmarkedJettisonedStubRoutines();
695 }
696
697 void Heap::collectAllGarbage()
698 {
699 if (!m_isSafeToCollect)
700 return;
701
702 collect(DoSweep);
703 }
704
705 static double minute = 60.0;
706
707 void Heap::collect(SweepToggle sweepToggle)
708 {
709 SamplingRegion samplingRegion("Garbage Collection");
710
711 GCPHASE(Collect);
712 ASSERT(vm()->apiLock().currentThreadIsHoldingLock());
713 RELEASE_ASSERT(vm()->identifierTable == wtfThreadData().currentIdentifierTable());
714 ASSERT(m_isSafeToCollect);
715 JAVASCRIPTCORE_GC_BEGIN();
716 RELEASE_ASSERT(m_operationInProgress == NoOperation);
717 m_operationInProgress = Collection;
718
719 #if PLATFORM(IOS)
720 if (m_activityCallback)
721 m_activityCallback->willCollect();
722 #else
723 m_activityCallback->willCollect();
724 #endif // PLATFORM(IOS)
725
726 double lastGCStartTime = WTF::currentTime();
727 if (lastGCStartTime - m_lastCodeDiscardTime > minute) {
728 deleteAllCompiledCode();
729 m_lastCodeDiscardTime = WTF::currentTime();
730 }
731
732 {
733 GCPHASE(Canonicalize);
734 m_objectSpace.canonicalizeCellLivenessData();
735 }
736
737 markRoots();
738
739 {
740 GCPHASE(ReapingWeakHandles);
741 m_objectSpace.reapWeakSets();
742 }
743
744 JAVASCRIPTCORE_GC_MARKED();
745
746 {
747 m_blockSnapshot.resize(m_objectSpace.blocks().set().size());
748 MarkedBlockSnapshotFunctor functor(m_blockSnapshot);
749 m_objectSpace.forEachBlock(functor);
750 }
751
752 copyBackingStores();
753
754 {
755 GCPHASE(FinalizeUnconditionalFinalizers);
756 finalizeUnconditionalFinalizers();
757 }
758
759 {
760 GCPHASE(finalizeSmallStrings);
761 m_vm->smallStrings.finalizeSmallStrings();
762 }
763
764 {
765 GCPHASE(DeleteCodeBlocks);
766 deleteUnmarkedCompiledCode();
767 }
768
769 {
770 GCPHASE(DeleteSourceProviderCaches);
771 m_vm->clearSourceProviderCaches();
772 }
773
774 if (sweepToggle == DoSweep) {
775 SamplingRegion samplingRegion("Garbage Collection: Sweeping");
776 GCPHASE(Sweeping);
777 m_objectSpace.sweep();
778 m_objectSpace.shrink();
779 }
780
781 m_sweeper->startSweeping(m_blockSnapshot);
782 m_bytesAbandoned = 0;
783
784 {
785 GCPHASE(ResetAllocators);
786 m_objectSpace.resetAllocators();
787 }
788
789 size_t currentHeapSize = size();
790 if (Options::gcMaxHeapSize() && currentHeapSize > Options::gcMaxHeapSize())
791 HeapStatistics::exitWithFailure();
792
793 m_sizeAfterLastCollect = currentHeapSize;
794
795 // To avoid pathological GC churn in very small and very large heaps, we set
796 // the new allocation limit based on the current size of the heap, with a
797 // fixed minimum.
798 size_t maxHeapSize = max(minHeapSize(m_heapType, m_ramSize), proportionalHeapSize(currentHeapSize, m_ramSize));
799 m_bytesAllocatedLimit = maxHeapSize - currentHeapSize;
800
801 m_bytesAllocated = 0;
802 double lastGCEndTime = WTF::currentTime();
803 m_lastGCLength = lastGCEndTime - lastGCStartTime;
804
805 if (Options::recordGCPauseTimes())
806 HeapStatistics::recordGCPauseTime(lastGCStartTime, lastGCEndTime);
807 RELEASE_ASSERT(m_operationInProgress == Collection);
808
809 m_operationInProgress = NoOperation;
810 JAVASCRIPTCORE_GC_END();
811
812 if (Options::useZombieMode())
813 zombifyDeadObjects();
814
815 if (Options::objectsAreImmortal())
816 markDeadObjects();
817
818 if (Options::showObjectStatistics())
819 HeapStatistics::showObjectStatistics(this);
820 }
821
822 void Heap::markDeadObjects()
823 {
824 m_objectSpace.forEachDeadCell<MarkObject>();
825 }
826
827 void Heap::setActivityCallback(PassOwnPtr<GCActivityCallback> activityCallback)
828 {
829 m_activityCallback = activityCallback;
830 }
831
832 GCActivityCallback* Heap::activityCallback()
833 {
834 return m_activityCallback.get();
835 }
836
837 #if PLATFORM(IOS)
838 void Heap::setIncrementalSweeper(PassOwnPtr<IncrementalSweeper> sweeper)
839 {
840 m_sweeper = sweeper;
841 }
842 #endif // PLATFORM(IOS)
843
844 IncrementalSweeper* Heap::sweeper()
845 {
846 return m_sweeper.get();
847 }
848
849 void Heap::setGarbageCollectionTimerEnabled(bool enable)
850 {
851 #if PLATFORM(IOS)
852 if (m_activityCallback)
853 m_activityCallback->setEnabled(enable);
854 #else
855 activityCallback()->setEnabled(enable);
856 #endif // PLATFORM(IOS)
857 }
858
859 void Heap::didAllocate(size_t bytes)
860 {
861 #if PLATFORM(IOS)
862 if (m_activityCallback)
863 m_activityCallback->didAllocate(m_bytesAllocated + m_bytesAbandoned);
864 #else
865 m_activityCallback->didAllocate(m_bytesAllocated + m_bytesAbandoned);
866 #endif // PLATFORM(IOS)
867 m_bytesAllocated += bytes;
868 }
869
870 bool Heap::isValidAllocation(size_t)
871 {
872 if (!isValidThreadState(m_vm))
873 return false;
874
875 if (m_operationInProgress != NoOperation)
876 return false;
877
878 return true;
879 }
880
881 void Heap::addFinalizer(JSCell* cell, Finalizer finalizer)
882 {
883 WeakSet::allocate(cell, &m_finalizerOwner, reinterpret_cast<void*>(finalizer)); // Balanced by FinalizerOwner::finalize().
884 }
885
886 void Heap::FinalizerOwner::finalize(Handle<Unknown> handle, void* context)
887 {
888 HandleSlot slot = handle.slot();
889 Finalizer finalizer = reinterpret_cast<Finalizer>(context);
890 finalizer(slot->asCell());
891 WeakSet::deallocate(WeakImpl::asWeakImpl(slot));
892 }
893
894 void Heap::addCompiledCode(ExecutableBase* executable)
895 {
896 m_compiledCode.append(executable);
897 }
898
899 class Zombify : public MarkedBlock::VoidFunctor {
900 public:
901 void operator()(JSCell* cell)
902 {
903 void** current = reinterpret_cast<void**>(cell);
904
905 // We want to maintain zapped-ness because that's how we know if we've called
906 // the destructor.
907 if (cell->isZapped())
908 current++;
909
910 void* limit = static_cast<void*>(reinterpret_cast<char*>(cell) + MarkedBlock::blockFor(cell)->cellSize());
911 for (; current < limit; current++)
912 *current = reinterpret_cast<void*>(0xbbadbeef);
913 }
914 };
915
916 void Heap::zombifyDeadObjects()
917 {
918 // Sweep now because destructors will crash once we're zombified.
919 m_objectSpace.sweep();
920 m_objectSpace.forEachDeadCell<Zombify>();
921 }
922
923 } // namespace JSC