2 * Copyright (C) 2009, 2011 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 #include "CopiedSpace.h"
30 #include "HandleTypes.h"
34 #include "UnconditionalFinalizer.h"
35 #include "VTableSpectrum.h"
36 #include "WeakReferenceHarvester.h"
37 #include <wtf/HashMap.h>
38 #include <wtf/HashSet.h>
39 #include <wtf/Vector.h>
40 #include <wtf/Noncopyable.h>
41 #include <wtf/OSAllocator.h>
42 #include <wtf/PageBlock.h>
46 class ConservativeRoots
;
49 class ParallelModeEnabler
;
52 template<typename T
> class WriteBarrierBase
;
53 template<typename T
> class JITWriteBarrier
;
55 struct MarkStackSegment
{
56 MarkStackSegment
* m_previous
;
63 return bitwise_cast
<const JSCell
**>(this + 1);
66 static size_t capacityFromSize(size_t size
)
68 return (size
- sizeof(MarkStackSegment
)) / sizeof(const JSCell
*);
71 static size_t sizeFromCapacity(size_t capacity
)
73 return sizeof(MarkStackSegment
) + capacity
* sizeof(const JSCell
*);
77 class MarkStackSegmentAllocator
{
79 MarkStackSegmentAllocator();
80 ~MarkStackSegmentAllocator();
82 MarkStackSegment
* allocate();
83 void release(MarkStackSegment
*);
89 MarkStackSegment
* m_nextFreeSegment
;
92 class MarkStackArray
{
94 MarkStackArray(MarkStackSegmentAllocator
&);
97 void append(const JSCell
*);
100 const JSCell
* removeLast();
105 bool canDonateSomeCells(); // Returns false if you should definitely not call doanteSomeCellsTo().
106 bool donateSomeCellsTo(MarkStackArray
& other
); // Returns true if some cells were donated.
108 void stealSomeCellsFrom(MarkStackArray
& other
);
113 MarkStackSegment
* m_topSegment
;
115 JS_EXPORT_PRIVATE
void expand();
117 MarkStackSegmentAllocator
& m_allocator
;
119 size_t m_segmentCapacity
;
121 size_t m_numberOfPreviousSegments
;
125 size_t result
= m_top
++;
126 ASSERT(result
== m_topSegment
->m_top
++);
132 size_t result
= --m_top
;
133 ASSERT(result
== --m_topSegment
->m_top
);
137 void setTopForFullSegment()
139 ASSERT(m_topSegment
->m_top
== m_segmentCapacity
);
140 m_top
= m_segmentCapacity
;
143 void setTopForEmptySegment()
145 ASSERT(!m_topSegment
->m_top
);
151 ASSERT(m_top
== m_topSegment
->m_top
);
156 void validatePrevious() { }
158 void validatePrevious()
161 for (MarkStackSegment
* current
= m_topSegment
->m_previous
; current
; current
= current
->m_previous
)
163 ASSERT(count
== m_numberOfPreviousSegments
);
168 class MarkStackThreadSharedData
{
170 MarkStackThreadSharedData(JSGlobalData
*);
171 ~MarkStackThreadSharedData();
176 friend class MarkStack
;
177 friend class SlotVisitor
;
179 #if ENABLE(PARALLEL_GC)
180 void markingThreadMain();
181 static void markingThreadStartFunc(void* heap
);
184 JSGlobalData
* m_globalData
;
185 CopiedSpace
* m_copiedSpace
;
187 MarkStackSegmentAllocator m_segmentAllocator
;
189 Vector
<ThreadIdentifier
> m_markingThreads
;
192 ThreadCondition m_markingCondition
;
193 MarkStackArray m_sharedMarkStack
;
194 unsigned m_numberOfActiveParallelMarkers
;
195 bool m_parallelMarkersShouldExit
;
197 Mutex m_opaqueRootsLock
;
198 HashSet
<void*> m_opaqueRoots
;
200 ListableHandler
<WeakReferenceHarvester
>::List m_weakReferenceHarvesters
;
201 ListableHandler
<UnconditionalFinalizer
>::List m_unconditionalFinalizers
;
205 WTF_MAKE_NONCOPYABLE(MarkStack
);
206 friend class HeapRootVisitor
; // Allowed to mark a JSValue* or JSCell** directly.
209 MarkStack(MarkStackThreadSharedData
&);
212 void append(ConservativeRoots
&);
214 template<typename T
> void append(JITWriteBarrier
<T
>*);
215 template<typename T
> void append(WriteBarrierBase
<T
>*);
216 void appendValues(WriteBarrierBase
<Unknown
>*, size_t count
);
219 void appendUnbarrieredPointer(T
**);
221 void addOpaqueRoot(void*);
222 bool containsOpaqueRoot(void*);
223 int opaqueRootCount();
225 bool isEmpty() { return m_stack
.isEmpty(); }
229 size_t visitCount() const { return m_visitCount
; }
231 #if ENABLE(SIMPLE_HEAP_PROFILING)
232 VTableSpectrum m_visitedTypeCounts
;
235 void addWeakReferenceHarvester(WeakReferenceHarvester
* weakReferenceHarvester
)
237 m_shared
.m_weakReferenceHarvesters
.addThreadSafe(weakReferenceHarvester
);
240 void addUnconditionalFinalizer(UnconditionalFinalizer
* unconditionalFinalizer
)
242 m_shared
.m_unconditionalFinalizers
.addThreadSafe(unconditionalFinalizer
);
246 JS_EXPORT_PRIVATE
static void validate(JSCell
*);
248 void append(JSValue
*);
249 void append(JSValue
*, size_t count
);
250 void append(JSCell
**);
252 void internalAppend(JSCell
*);
253 void internalAppend(JSValue
);
255 JS_EXPORT_PRIVATE
void mergeOpaqueRoots();
257 void mergeOpaqueRootsIfNecessary()
259 if (m_opaqueRoots
.isEmpty())
264 void mergeOpaqueRootsIfProfitable()
266 if (static_cast<unsigned>(m_opaqueRoots
.size()) < Options::opaqueRootMergeThreshold
)
271 MarkStackArray m_stack
;
272 HashSet
<void*> m_opaqueRoots
; // Handle-owning data structures not visible to the garbage collector.
276 bool m_isCheckingForDefaultMarkViolation
;
280 friend class ParallelModeEnabler
;
283 bool m_isInParallelMode
;
285 MarkStackThreadSharedData
& m_shared
;
288 inline MarkStack::MarkStack(MarkStackThreadSharedData
& shared
)
289 : m_stack(shared
.m_segmentAllocator
)
291 , m_isCheckingForDefaultMarkViolation(false)
292 , m_isDraining(false)
295 , m_isInParallelMode(false)
300 inline MarkStack::~MarkStack()
302 ASSERT(m_stack
.isEmpty());
305 inline void MarkStack::addOpaqueRoot(void* root
)
307 #if ENABLE(PARALLEL_GC)
308 if (Options::numberOfGCMarkers
== 1) {
309 // Put directly into the shared HashSet.
310 m_shared
.m_opaqueRoots
.add(root
);
313 // Put into the local set, but merge with the shared one every once in
314 // a while to make sure that the local sets don't grow too large.
315 mergeOpaqueRootsIfProfitable();
316 m_opaqueRoots
.add(root
);
318 m_opaqueRoots
.add(root
);
322 inline bool MarkStack::containsOpaqueRoot(void* root
)
324 ASSERT(!m_isInParallelMode
);
325 #if ENABLE(PARALLEL_GC)
326 ASSERT(m_opaqueRoots
.isEmpty());
327 return m_shared
.m_opaqueRoots
.contains(root
);
329 return m_opaqueRoots
.contains(root
);
333 inline int MarkStack::opaqueRootCount()
335 ASSERT(!m_isInParallelMode
);
336 #if ENABLE(PARALLEL_GC)
337 ASSERT(m_opaqueRoots
.isEmpty());
338 return m_shared
.m_opaqueRoots
.size();
340 return m_opaqueRoots
.size();
344 inline void MarkStackArray::append(const JSCell
* cell
)
346 if (m_top
== m_segmentCapacity
)
348 m_topSegment
->data()[postIncTop()] = cell
;
351 inline bool MarkStackArray::canRemoveLast()
356 inline const JSCell
* MarkStackArray::removeLast()
358 return m_topSegment
->data()[preDecTop()];
361 inline bool MarkStackArray::isEmpty()
365 if (m_topSegment
->m_previous
) {
366 ASSERT(m_topSegment
->m_previous
->m_top
== m_segmentCapacity
);
372 inline bool MarkStackArray::canDonateSomeCells()
374 size_t numberOfCellsToKeep
= Options::minimumNumberOfCellsToKeep
;
375 // Another check: see if we have enough cells to warrant donation.
376 if (m_top
<= numberOfCellsToKeep
) {
377 // This indicates that we might not want to donate anything; check if we have
378 // another full segment. If not, then don't donate.
379 if (!m_topSegment
->m_previous
)
382 ASSERT(m_topSegment
->m_previous
->m_top
== m_segmentCapacity
);
388 inline size_t MarkStackArray::size()
390 return m_top
+ m_segmentCapacity
* m_numberOfPreviousSegments
;
393 ALWAYS_INLINE
void MarkStack::append(JSValue
* slot
, size_t count
)
395 for (size_t i
= 0; i
< count
; ++i
) {
396 JSValue
& value
= slot
[i
];
399 internalAppend(value
);
404 inline void MarkStack::appendUnbarrieredPointer(T
** slot
)
407 JSCell
* cell
= *slot
;
409 internalAppend(cell
);
412 ALWAYS_INLINE
void MarkStack::append(JSValue
* slot
)
415 internalAppend(*slot
);
418 ALWAYS_INLINE
void MarkStack::append(JSCell
** slot
)
421 internalAppend(*slot
);
424 ALWAYS_INLINE
void MarkStack::internalAppend(JSValue value
)
429 internalAppend(value
.asCell());
432 class ParallelModeEnabler
{
434 ParallelModeEnabler(MarkStack
& stack
)
437 ASSERT(!m_stack
.m_isInParallelMode
);
438 m_stack
.m_isInParallelMode
= true;
441 ~ParallelModeEnabler()
443 ASSERT(m_stack
.m_isInParallelMode
);
444 m_stack
.m_isInParallelMode
= false;