2 * Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2011 Apple Inc. All rights reserved.
3 * Copyright (C) 2007 Eric Seidel <eric@webkit.org>
5 * This library is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Lesser General Public
7 * License as published by the Free Software Foundation; either
8 * version 2 of the License, or (at your option) any later version.
10 * This library is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Lesser General Public License for more details.
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this library; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
24 #include "CodeBlock.h"
25 #include "ConservativeRoots.h"
26 #include "GCActivityCallback.h"
27 #include "Interpreter.h"
28 #include "JSGlobalData.h"
29 #include "JSGlobalObject.h"
31 #include "JSONObject.h"
35 #define COLLECT_ON_EVERY_SLOW_ALLOCATION 0
41 const size_t minBytesPerCycle
= 512 * 1024;
43 Heap::Heap(JSGlobalData
* globalData
)
44 : m_operationInProgress(NoOperation
)
45 , m_markedSpace(globalData
)
47 , m_activityCallback(DefaultGCActivityCallback::create(this))
48 , m_globalData(globalData
)
49 , m_machineThreads(this)
50 , m_markStack(globalData
->jsArrayVPtr
)
51 , m_handleHeap(globalData
)
54 m_markedSpace
.setHighWaterMark(minBytesPerCycle
);
55 (*m_activityCallback
)();
60 // The destroy function must already have been called, so assert this.
61 ASSERT(!m_globalData
);
66 JSLock
lock(SilenceAssertionsOnly
);
71 ASSERT(!m_globalData
->dynamicGlobalObject
);
72 ASSERT(m_operationInProgress
== NoOperation
);
74 // The global object is not GC protected at this point, so sweeping may delete it
75 // (and thus the global data) before other objects that may use the global data.
76 RefPtr
<JSGlobalData
> protect(m_globalData
);
79 m_globalData
->jitStubs
->clearHostFunctionStubs();
84 m_markedSpace
.clearMarks();
85 m_handleHeap
.finalizeWeakHandles();
86 m_markedSpace
.destroy();
91 void Heap::reportExtraMemoryCostSlowCase(size_t cost
)
93 // Our frequency of garbage collection tries to balance memory use against speed
94 // by collecting based on the number of newly created values. However, for values
95 // that hold on to a great deal of memory that's not in the form of other JS values,
96 // that is not good enough - in some cases a lot of those objects can pile up and
97 // use crazy amounts of memory without a GC happening. So we track these extra
98 // memory costs. Only unusually large objects are noted, and we only keep track
99 // of this extra cost until the next GC. In garbage collected languages, most values
100 // are either very short lived temporaries, or have extremely long lifetimes. So
101 // if a large value survives one garbage collection, there is not much point to
102 // collecting more frequently as long as it stays alive.
104 if (m_extraCost
> maxExtraCost
&& m_extraCost
> m_markedSpace
.highWaterMark() / 2)
109 void* Heap::allocateSlowCase(size_t bytes
)
111 ASSERT(globalData()->identifierTable
== wtfThreadData().currentIdentifierTable());
112 ASSERT(JSLock::lockCount() > 0);
113 ASSERT(JSLock::currentThreadIsHoldingLock());
114 ASSERT(bytes
<= MarkedSpace::maxCellSize
);
115 ASSERT(m_operationInProgress
== NoOperation
);
117 #if COLLECT_ON_EVERY_SLOW_ALLOCATION
119 ASSERT(m_operationInProgress
== NoOperation
);
124 m_operationInProgress
= Allocation
;
125 void* result
= m_markedSpace
.allocate(bytes
);
126 m_operationInProgress
= NoOperation
;
132 void Heap::protect(JSValue k
)
135 ASSERT(JSLock::currentThreadIsHoldingLock() || !m_globalData
->isSharedInstance());
140 m_protectedValues
.add(k
.asCell());
143 bool Heap::unprotect(JSValue k
)
146 ASSERT(JSLock::currentThreadIsHoldingLock() || !m_globalData
->isSharedInstance());
151 return m_protectedValues
.remove(k
.asCell());
154 void Heap::markProtectedObjects(HeapRootVisitor
& heapRootMarker
)
156 ProtectCountSet::iterator end
= m_protectedValues
.end();
157 for (ProtectCountSet::iterator it
= m_protectedValues
.begin(); it
!= end
; ++it
)
158 heapRootMarker
.mark(&it
->first
);
161 void Heap::pushTempSortVector(Vector
<ValueStringPair
>* tempVector
)
163 m_tempSortingVectors
.append(tempVector
);
166 void Heap::popTempSortVector(Vector
<ValueStringPair
>* tempVector
)
168 ASSERT_UNUSED(tempVector
, tempVector
== m_tempSortingVectors
.last());
169 m_tempSortingVectors
.removeLast();
172 void Heap::markTempSortVectors(HeapRootVisitor
& heapRootMarker
)
174 typedef Vector
<Vector
<ValueStringPair
>* > VectorOfValueStringVectors
;
176 VectorOfValueStringVectors::iterator end
= m_tempSortingVectors
.end();
177 for (VectorOfValueStringVectors::iterator it
= m_tempSortingVectors
.begin(); it
!= end
; ++it
) {
178 Vector
<ValueStringPair
>* tempSortingVector
= *it
;
180 Vector
<ValueStringPair
>::iterator vectorEnd
= tempSortingVector
->end();
181 for (Vector
<ValueStringPair
>::iterator vectorIt
= tempSortingVector
->begin(); vectorIt
!= vectorEnd
; ++vectorIt
) {
183 heapRootMarker
.mark(&vectorIt
->first
);
188 inline RegisterFile
& Heap::registerFile()
190 return m_globalData
->interpreter
->registerFile();
193 void Heap::getConservativeRegisterRoots(HashSet
<JSCell
*>& roots
)
196 if (m_globalData
->isSharedInstance()) {
197 ASSERT(JSLock::lockCount() > 0);
198 ASSERT(JSLock::currentThreadIsHoldingLock());
201 if (m_operationInProgress
!= NoOperation
)
203 m_operationInProgress
= Collection
;
204 ConservativeRoots
registerFileRoots(this);
205 registerFile().gatherConservativeRoots(registerFileRoots
);
206 size_t registerFileRootCount
= registerFileRoots
.size();
207 JSCell
** registerRoots
= registerFileRoots
.roots();
208 for (size_t i
= 0; i
< registerFileRootCount
; i
++) {
209 setMarked(registerRoots
[i
]);
210 roots
.add(registerRoots
[i
]);
212 m_operationInProgress
= NoOperation
;
215 void Heap::markRoots()
218 if (m_globalData
->isSharedInstance()) {
219 ASSERT(JSLock::lockCount() > 0);
220 ASSERT(JSLock::currentThreadIsHoldingLock());
226 ASSERT(m_operationInProgress
== NoOperation
);
227 if (m_operationInProgress
!= NoOperation
)
230 m_operationInProgress
= Collection
;
232 MarkStack
& visitor
= m_markStack
;
233 HeapRootVisitor
heapRootMarker(visitor
);
235 // We gather conservative roots before clearing mark bits because
236 // conservative gathering uses the mark bits from our last mark pass to
237 // determine whether a reference is valid.
238 ConservativeRoots
machineThreadRoots(this);
239 m_machineThreads
.gatherConservativeRoots(machineThreadRoots
, &dummy
);
241 ConservativeRoots
registerFileRoots(this);
242 registerFile().gatherConservativeRoots(registerFileRoots
);
244 m_markedSpace
.clearMarks();
246 visitor
.append(machineThreadRoots
);
249 visitor
.append(registerFileRoots
);
252 markProtectedObjects(heapRootMarker
);
255 markTempSortVectors(heapRootMarker
);
258 if (m_markListSet
&& m_markListSet
->size())
259 MarkedArgumentBuffer::markLists(heapRootMarker
, *m_markListSet
);
260 if (m_globalData
->exception
)
261 heapRootMarker
.mark(&m_globalData
->exception
);
264 m_handleHeap
.markStrongHandles(heapRootMarker
);
267 m_handleStack
.mark(heapRootMarker
);
270 // Mark the small strings cache as late as possible, since it will clear
271 // itself if nothing else has marked it.
272 // FIXME: Change the small strings cache to use Weak<T>.
273 m_globalData
->smallStrings
.visitChildren(heapRootMarker
);
276 // Weak handles must be marked last, because their owners use the set of
277 // opaque roots to determine reachability.
278 int lastOpaqueRootCount
;
280 lastOpaqueRootCount
= visitor
.opaqueRootCount();
281 m_handleHeap
.markWeakHandles(heapRootMarker
);
283 // If the set of opaque roots has grown, more weak handles may have become reachable.
284 } while (lastOpaqueRootCount
!= visitor
.opaqueRootCount());
288 m_operationInProgress
= NoOperation
;
291 size_t Heap::objectCount() const
293 return m_markedSpace
.objectCount();
296 size_t Heap::size() const
298 return m_markedSpace
.size();
301 size_t Heap::capacity() const
303 return m_markedSpace
.capacity();
306 size_t Heap::globalObjectCount()
308 return m_globalData
->globalObjectCount
;
311 size_t Heap::protectedGlobalObjectCount()
313 size_t count
= m_handleHeap
.protectedGlobalObjectCount();
315 ProtectCountSet::iterator end
= m_protectedValues
.end();
316 for (ProtectCountSet::iterator it
= m_protectedValues
.begin(); it
!= end
; ++it
) {
317 if (it
->first
->isObject() && asObject(it
->first
)->isGlobalObject())
324 size_t Heap::protectedObjectCount()
326 return m_protectedValues
.size();
332 void operator()(JSCell
*);
333 PassOwnPtr
<TypeCountSet
> take();
336 const char* typeName(JSCell
*);
337 OwnPtr
<TypeCountSet
> m_typeCountSet
;
338 HashSet
<JSCell
*> m_cells
;
341 inline TypeCounter::TypeCounter()
342 : m_typeCountSet(adoptPtr(new TypeCountSet
))
346 inline const char* TypeCounter::typeName(JSCell
* cell
)
348 if (cell
->isString())
350 if (cell
->isGetterSetter())
351 return "Getter-Setter";
352 if (cell
->isAPIValueWrapper())
353 return "API wrapper";
354 if (cell
->isPropertyNameIterator())
355 return "For-in iterator";
356 if (const ClassInfo
* info
= cell
->classInfo())
357 return info
->className
;
358 if (!cell
->isObject())
359 return "[empty cell]";
363 inline void TypeCounter::operator()(JSCell
* cell
)
365 if (!m_cells
.add(cell
).second
)
367 m_typeCountSet
->add(typeName(cell
));
370 inline PassOwnPtr
<TypeCountSet
> TypeCounter::take()
372 return m_typeCountSet
.release();
375 PassOwnPtr
<TypeCountSet
> Heap::protectedObjectTypeCounts()
377 TypeCounter typeCounter
;
379 ProtectCountSet::iterator end
= m_protectedValues
.end();
380 for (ProtectCountSet::iterator it
= m_protectedValues
.begin(); it
!= end
; ++it
)
381 typeCounter(it
->first
);
382 m_handleHeap
.protectedObjectTypeCounts(typeCounter
);
384 return typeCounter
.take();
387 void HandleHeap::protectedObjectTypeCounts(TypeCounter
& typeCounter
)
389 Node
* end
= m_strongList
.end();
390 for (Node
* node
= m_strongList
.begin(); node
!= end
; node
= node
->next()) {
391 JSValue value
= *node
->slot();
392 if (value
&& value
.isCell())
393 typeCounter(value
.asCell());
397 PassOwnPtr
<TypeCountSet
> Heap::objectTypeCounts()
399 TypeCounter typeCounter
;
400 forEach(typeCounter
);
401 return typeCounter
.take();
404 void Heap::collectAllGarbage()
406 m_markStack
.setShouldUnlinkCalls(true);
408 m_markStack
.setShouldUnlinkCalls(false);
411 void Heap::reset(SweepToggle sweepToggle
)
413 ASSERT(globalData()->identifierTable
== wtfThreadData().currentIdentifierTable());
414 JAVASCRIPTCORE_GC_BEGIN();
417 m_handleHeap
.finalizeWeakHandles();
419 JAVASCRIPTCORE_GC_MARKED();
421 m_markedSpace
.reset();
424 #if ENABLE(JSC_ZOMBIES)
425 sweepToggle
= DoSweep
;
428 if (sweepToggle
== DoSweep
) {
429 m_markedSpace
.sweep();
430 m_markedSpace
.shrink();
433 // To avoid pathological GC churn in large heaps, we set the allocation high
434 // water mark to be proportional to the current size of the heap. The exact
435 // proportion is a bit arbitrary. A 2X multiplier gives a 1:1 (heap size :
436 // new bytes allocated) proportion, and seems to work well in benchmarks.
437 size_t proportionalBytes
= 2 * m_markedSpace
.size();
438 m_markedSpace
.setHighWaterMark(max(proportionalBytes
, minBytesPerCycle
));
440 JAVASCRIPTCORE_GC_END();
442 (*m_activityCallback
)();
445 void Heap::setActivityCallback(PassOwnPtr
<GCActivityCallback
> activityCallback
)
447 m_activityCallback
= activityCallback
;
450 GCActivityCallback
* Heap::activityCallback()
452 return m_activityCallback
.get();