]> git.saurik.com Git - apple/javascriptcore.git/blame_incremental - heap/Heap.cpp
JavaScriptCore-903.tar.gz
[apple/javascriptcore.git] / heap / Heap.cpp
... / ...
CommitLineData
1/*
2 * Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2011 Apple Inc. All rights reserved.
3 * Copyright (C) 2007 Eric Seidel <eric@webkit.org>
4 *
5 * This library is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Lesser General Public
7 * License as published by the Free Software Foundation; either
8 * version 2 of the License, or (at your option) any later version.
9 *
10 * This library is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Lesser General Public License for more details.
14 *
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this library; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
18 *
19 */
20
21#include "config.h"
22#include "Heap.h"
23
24#include "CodeBlock.h"
25#include "ConservativeRoots.h"
26#include "GCActivityCallback.h"
27#include "Interpreter.h"
28#include "JSGlobalData.h"
29#include "JSGlobalObject.h"
30#include "JSLock.h"
31#include "JSONObject.h"
32#include "Tracing.h"
33#include <algorithm>
34
35#define COLLECT_ON_EVERY_SLOW_ALLOCATION 0
36
37using namespace std;
38
39namespace JSC {
40
41const size_t minBytesPerCycle = 512 * 1024;
42
43Heap::Heap(JSGlobalData* globalData)
44 : m_operationInProgress(NoOperation)
45 , m_markedSpace(globalData)
46 , m_markListSet(0)
47 , m_activityCallback(DefaultGCActivityCallback::create(this))
48 , m_globalData(globalData)
49 , m_machineThreads(this)
50 , m_markStack(globalData->jsArrayVPtr)
51 , m_handleHeap(globalData)
52 , m_extraCost(0)
53{
54 m_markedSpace.setHighWaterMark(minBytesPerCycle);
55 (*m_activityCallback)();
56}
57
58Heap::~Heap()
59{
60 // The destroy function must already have been called, so assert this.
61 ASSERT(!m_globalData);
62}
63
64void Heap::destroy()
65{
66 JSLock lock(SilenceAssertionsOnly);
67
68 if (!m_globalData)
69 return;
70
71 ASSERT(!m_globalData->dynamicGlobalObject);
72 ASSERT(m_operationInProgress == NoOperation);
73
74 // The global object is not GC protected at this point, so sweeping may delete it
75 // (and thus the global data) before other objects that may use the global data.
76 RefPtr<JSGlobalData> protect(m_globalData);
77
78#if ENABLE(JIT)
79 m_globalData->jitStubs->clearHostFunctionStubs();
80#endif
81
82 delete m_markListSet;
83 m_markListSet = 0;
84 m_markedSpace.clearMarks();
85 m_handleHeap.finalizeWeakHandles();
86 m_markedSpace.destroy();
87
88 m_globalData = 0;
89}
90
91void Heap::reportExtraMemoryCostSlowCase(size_t cost)
92{
93 // Our frequency of garbage collection tries to balance memory use against speed
94 // by collecting based on the number of newly created values. However, for values
95 // that hold on to a great deal of memory that's not in the form of other JS values,
96 // that is not good enough - in some cases a lot of those objects can pile up and
97 // use crazy amounts of memory without a GC happening. So we track these extra
98 // memory costs. Only unusually large objects are noted, and we only keep track
99 // of this extra cost until the next GC. In garbage collected languages, most values
100 // are either very short lived temporaries, or have extremely long lifetimes. So
101 // if a large value survives one garbage collection, there is not much point to
102 // collecting more frequently as long as it stays alive.
103
104 if (m_extraCost > maxExtraCost && m_extraCost > m_markedSpace.highWaterMark() / 2)
105 collectAllGarbage();
106 m_extraCost += cost;
107}
108
109void* Heap::allocateSlowCase(size_t bytes)
110{
111 ASSERT(globalData()->identifierTable == wtfThreadData().currentIdentifierTable());
112 ASSERT(JSLock::lockCount() > 0);
113 ASSERT(JSLock::currentThreadIsHoldingLock());
114 ASSERT(bytes <= MarkedSpace::maxCellSize);
115 ASSERT(m_operationInProgress == NoOperation);
116
117#if COLLECT_ON_EVERY_SLOW_ALLOCATION
118 collectAllGarbage();
119 ASSERT(m_operationInProgress == NoOperation);
120#endif
121
122 reset(DoNotSweep);
123
124 m_operationInProgress = Allocation;
125 void* result = m_markedSpace.allocate(bytes);
126 m_operationInProgress = NoOperation;
127
128 ASSERT(result);
129 return result;
130}
131
132void Heap::protect(JSValue k)
133{
134 ASSERT(k);
135 ASSERT(JSLock::currentThreadIsHoldingLock() || !m_globalData->isSharedInstance());
136
137 if (!k.isCell())
138 return;
139
140 m_protectedValues.add(k.asCell());
141}
142
143bool Heap::unprotect(JSValue k)
144{
145 ASSERT(k);
146 ASSERT(JSLock::currentThreadIsHoldingLock() || !m_globalData->isSharedInstance());
147
148 if (!k.isCell())
149 return false;
150
151 return m_protectedValues.remove(k.asCell());
152}
153
154void Heap::markProtectedObjects(HeapRootVisitor& heapRootMarker)
155{
156 ProtectCountSet::iterator end = m_protectedValues.end();
157 for (ProtectCountSet::iterator it = m_protectedValues.begin(); it != end; ++it)
158 heapRootMarker.mark(&it->first);
159}
160
161void Heap::pushTempSortVector(Vector<ValueStringPair>* tempVector)
162{
163 m_tempSortingVectors.append(tempVector);
164}
165
166void Heap::popTempSortVector(Vector<ValueStringPair>* tempVector)
167{
168 ASSERT_UNUSED(tempVector, tempVector == m_tempSortingVectors.last());
169 m_tempSortingVectors.removeLast();
170}
171
172void Heap::markTempSortVectors(HeapRootVisitor& heapRootMarker)
173{
174 typedef Vector<Vector<ValueStringPair>* > VectorOfValueStringVectors;
175
176 VectorOfValueStringVectors::iterator end = m_tempSortingVectors.end();
177 for (VectorOfValueStringVectors::iterator it = m_tempSortingVectors.begin(); it != end; ++it) {
178 Vector<ValueStringPair>* tempSortingVector = *it;
179
180 Vector<ValueStringPair>::iterator vectorEnd = tempSortingVector->end();
181 for (Vector<ValueStringPair>::iterator vectorIt = tempSortingVector->begin(); vectorIt != vectorEnd; ++vectorIt) {
182 if (vectorIt->first)
183 heapRootMarker.mark(&vectorIt->first);
184 }
185 }
186}
187
188inline RegisterFile& Heap::registerFile()
189{
190 return m_globalData->interpreter->registerFile();
191}
192
193void Heap::getConservativeRegisterRoots(HashSet<JSCell*>& roots)
194{
195#ifndef NDEBUG
196 if (m_globalData->isSharedInstance()) {
197 ASSERT(JSLock::lockCount() > 0);
198 ASSERT(JSLock::currentThreadIsHoldingLock());
199 }
200#endif
201 if (m_operationInProgress != NoOperation)
202 CRASH();
203 m_operationInProgress = Collection;
204 ConservativeRoots registerFileRoots(this);
205 registerFile().gatherConservativeRoots(registerFileRoots);
206 size_t registerFileRootCount = registerFileRoots.size();
207 JSCell** registerRoots = registerFileRoots.roots();
208 for (size_t i = 0; i < registerFileRootCount; i++) {
209 setMarked(registerRoots[i]);
210 roots.add(registerRoots[i]);
211 }
212 m_operationInProgress = NoOperation;
213}
214
215void Heap::markRoots()
216{
217#ifndef NDEBUG
218 if (m_globalData->isSharedInstance()) {
219 ASSERT(JSLock::lockCount() > 0);
220 ASSERT(JSLock::currentThreadIsHoldingLock());
221 }
222#endif
223
224 void* dummy;
225
226 ASSERT(m_operationInProgress == NoOperation);
227 if (m_operationInProgress != NoOperation)
228 CRASH();
229
230 m_operationInProgress = Collection;
231
232 MarkStack& visitor = m_markStack;
233 HeapRootVisitor heapRootMarker(visitor);
234
235 // We gather conservative roots before clearing mark bits because
236 // conservative gathering uses the mark bits from our last mark pass to
237 // determine whether a reference is valid.
238 ConservativeRoots machineThreadRoots(this);
239 m_machineThreads.gatherConservativeRoots(machineThreadRoots, &dummy);
240
241 ConservativeRoots registerFileRoots(this);
242 registerFile().gatherConservativeRoots(registerFileRoots);
243
244 m_markedSpace.clearMarks();
245
246 visitor.append(machineThreadRoots);
247 visitor.drain();
248
249 visitor.append(registerFileRoots);
250 visitor.drain();
251
252 markProtectedObjects(heapRootMarker);
253 visitor.drain();
254
255 markTempSortVectors(heapRootMarker);
256 visitor.drain();
257
258 if (m_markListSet && m_markListSet->size())
259 MarkedArgumentBuffer::markLists(heapRootMarker, *m_markListSet);
260 if (m_globalData->exception)
261 heapRootMarker.mark(&m_globalData->exception);
262 visitor.drain();
263
264 m_handleHeap.markStrongHandles(heapRootMarker);
265 visitor.drain();
266
267 m_handleStack.mark(heapRootMarker);
268 visitor.drain();
269
270 // Mark the small strings cache as late as possible, since it will clear
271 // itself if nothing else has marked it.
272 // FIXME: Change the small strings cache to use Weak<T>.
273 m_globalData->smallStrings.visitChildren(heapRootMarker);
274 visitor.drain();
275
276 // Weak handles must be marked last, because their owners use the set of
277 // opaque roots to determine reachability.
278 int lastOpaqueRootCount;
279 do {
280 lastOpaqueRootCount = visitor.opaqueRootCount();
281 m_handleHeap.markWeakHandles(heapRootMarker);
282 visitor.drain();
283 // If the set of opaque roots has grown, more weak handles may have become reachable.
284 } while (lastOpaqueRootCount != visitor.opaqueRootCount());
285
286 visitor.reset();
287
288 m_operationInProgress = NoOperation;
289}
290
291size_t Heap::objectCount() const
292{
293 return m_markedSpace.objectCount();
294}
295
296size_t Heap::size() const
297{
298 return m_markedSpace.size();
299}
300
301size_t Heap::capacity() const
302{
303 return m_markedSpace.capacity();
304}
305
306size_t Heap::globalObjectCount()
307{
308 return m_globalData->globalObjectCount;
309}
310
311size_t Heap::protectedGlobalObjectCount()
312{
313 size_t count = m_handleHeap.protectedGlobalObjectCount();
314
315 ProtectCountSet::iterator end = m_protectedValues.end();
316 for (ProtectCountSet::iterator it = m_protectedValues.begin(); it != end; ++it) {
317 if (it->first->isObject() && asObject(it->first)->isGlobalObject())
318 count++;
319 }
320
321 return count;
322}
323
324size_t Heap::protectedObjectCount()
325{
326 return m_protectedValues.size();
327}
328
329class TypeCounter {
330public:
331 TypeCounter();
332 void operator()(JSCell*);
333 PassOwnPtr<TypeCountSet> take();
334
335private:
336 const char* typeName(JSCell*);
337 OwnPtr<TypeCountSet> m_typeCountSet;
338 HashSet<JSCell*> m_cells;
339};
340
341inline TypeCounter::TypeCounter()
342 : m_typeCountSet(adoptPtr(new TypeCountSet))
343{
344}
345
346inline const char* TypeCounter::typeName(JSCell* cell)
347{
348 if (cell->isString())
349 return "string";
350 if (cell->isGetterSetter())
351 return "Getter-Setter";
352 if (cell->isAPIValueWrapper())
353 return "API wrapper";
354 if (cell->isPropertyNameIterator())
355 return "For-in iterator";
356 if (const ClassInfo* info = cell->classInfo())
357 return info->className;
358 if (!cell->isObject())
359 return "[empty cell]";
360 return "Object";
361}
362
363inline void TypeCounter::operator()(JSCell* cell)
364{
365 if (!m_cells.add(cell).second)
366 return;
367 m_typeCountSet->add(typeName(cell));
368}
369
370inline PassOwnPtr<TypeCountSet> TypeCounter::take()
371{
372 return m_typeCountSet.release();
373}
374
375PassOwnPtr<TypeCountSet> Heap::protectedObjectTypeCounts()
376{
377 TypeCounter typeCounter;
378
379 ProtectCountSet::iterator end = m_protectedValues.end();
380 for (ProtectCountSet::iterator it = m_protectedValues.begin(); it != end; ++it)
381 typeCounter(it->first);
382 m_handleHeap.protectedObjectTypeCounts(typeCounter);
383
384 return typeCounter.take();
385}
386
387void HandleHeap::protectedObjectTypeCounts(TypeCounter& typeCounter)
388{
389 Node* end = m_strongList.end();
390 for (Node* node = m_strongList.begin(); node != end; node = node->next()) {
391 JSValue value = *node->slot();
392 if (value && value.isCell())
393 typeCounter(value.asCell());
394 }
395}
396
397PassOwnPtr<TypeCountSet> Heap::objectTypeCounts()
398{
399 TypeCounter typeCounter;
400 forEach(typeCounter);
401 return typeCounter.take();
402}
403
404void Heap::collectAllGarbage()
405{
406 m_markStack.setShouldUnlinkCalls(true);
407 reset(DoSweep);
408 m_markStack.setShouldUnlinkCalls(false);
409}
410
411void Heap::reset(SweepToggle sweepToggle)
412{
413 ASSERT(globalData()->identifierTable == wtfThreadData().currentIdentifierTable());
414 JAVASCRIPTCORE_GC_BEGIN();
415
416 markRoots();
417 m_handleHeap.finalizeWeakHandles();
418
419 JAVASCRIPTCORE_GC_MARKED();
420
421 m_markedSpace.reset();
422 m_extraCost = 0;
423
424#if ENABLE(JSC_ZOMBIES)
425 sweepToggle = DoSweep;
426#endif
427
428 if (sweepToggle == DoSweep) {
429 m_markedSpace.sweep();
430 m_markedSpace.shrink();
431 }
432
433 // To avoid pathological GC churn in large heaps, we set the allocation high
434 // water mark to be proportional to the current size of the heap. The exact
435 // proportion is a bit arbitrary. A 2X multiplier gives a 1:1 (heap size :
436 // new bytes allocated) proportion, and seems to work well in benchmarks.
437 size_t proportionalBytes = 2 * m_markedSpace.size();
438 m_markedSpace.setHighWaterMark(max(proportionalBytes, minBytesPerCycle));
439
440 JAVASCRIPTCORE_GC_END();
441
442 (*m_activityCallback)();
443}
444
445void Heap::setActivityCallback(PassOwnPtr<GCActivityCallback> activityCallback)
446{
447 m_activityCallback = activityCallback;
448}
449
450GCActivityCallback* Heap::activityCallback()
451{
452 return m_activityCallback.get();
453}
454
455} // namespace JSC