2 * Copyright (C) 2009, 2011 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include "MarkStack.h"
29 #include "CopiedSpace.h"
30 #include "CopiedSpaceInlineMethods.h"
31 #include "ConservativeRoots.h"
37 #include "ScopeChain.h"
38 #include "Structure.h"
39 #include "WriteBarrier.h"
40 #include <wtf/MainThread.h>
44 MarkStackSegmentAllocator::MarkStackSegmentAllocator()
45 : m_nextFreeSegment(0)
49 MarkStackSegmentAllocator::~MarkStackSegmentAllocator()
54 MarkStackSegment
* MarkStackSegmentAllocator::allocate()
57 MutexLocker
locker(m_lock
);
58 if (m_nextFreeSegment
) {
59 MarkStackSegment
* result
= m_nextFreeSegment
;
60 m_nextFreeSegment
= result
->m_previous
;
65 return static_cast<MarkStackSegment
*>(OSAllocator::reserveAndCommit(Options::gcMarkStackSegmentSize
));
68 void MarkStackSegmentAllocator::release(MarkStackSegment
* segment
)
70 MutexLocker
locker(m_lock
);
71 segment
->m_previous
= m_nextFreeSegment
;
72 m_nextFreeSegment
= segment
;
75 void MarkStackSegmentAllocator::shrinkReserve()
77 MarkStackSegment
* segments
;
79 MutexLocker
locker(m_lock
);
80 segments
= m_nextFreeSegment
;
81 m_nextFreeSegment
= 0;
84 MarkStackSegment
* toFree
= segments
;
85 segments
= segments
->m_previous
;
86 OSAllocator::decommitAndRelease(toFree
, Options::gcMarkStackSegmentSize
);
90 MarkStackArray::MarkStackArray(MarkStackSegmentAllocator
& allocator
)
91 : m_allocator(allocator
)
92 , m_segmentCapacity(MarkStackSegment::capacityFromSize(Options::gcMarkStackSegmentSize
))
94 , m_numberOfPreviousSegments(0)
96 m_topSegment
= m_allocator
.allocate();
98 m_topSegment
->m_top
= 0;
100 m_topSegment
->m_previous
= 0;
103 MarkStackArray::~MarkStackArray()
105 ASSERT(!m_topSegment
->m_previous
);
106 m_allocator
.release(m_topSegment
);
109 void MarkStackArray::expand()
111 ASSERT(m_topSegment
->m_top
== m_segmentCapacity
);
113 m_numberOfPreviousSegments
++;
115 MarkStackSegment
* nextSegment
= m_allocator
.allocate();
117 nextSegment
->m_top
= 0;
119 nextSegment
->m_previous
= m_topSegment
;
120 m_topSegment
= nextSegment
;
121 setTopForEmptySegment();
125 bool MarkStackArray::refill()
130 MarkStackSegment
* toFree
= m_topSegment
;
131 MarkStackSegment
* previous
= m_topSegment
->m_previous
;
134 ASSERT(m_numberOfPreviousSegments
);
135 m_numberOfPreviousSegments
--;
136 m_topSegment
= previous
;
137 m_allocator
.release(toFree
);
138 setTopForFullSegment();
143 bool MarkStackArray::donateSomeCellsTo(MarkStackArray
& other
)
145 ASSERT(m_segmentCapacity
== other
.m_segmentCapacity
);
147 other
.validatePrevious();
149 // Fast check: see if the other mark stack already has enough segments.
150 if (other
.m_numberOfPreviousSegments
+ 1 >= Options::maximumNumberOfSharedSegments
)
153 size_t numberOfCellsToKeep
= Options::minimumNumberOfCellsToKeep
;
154 ASSERT(m_top
> numberOfCellsToKeep
|| m_topSegment
->m_previous
);
156 // Looks like we should donate! Give the other mark stack all of our
157 // previous segments, and then top it off.
158 MarkStackSegment
* previous
= m_topSegment
->m_previous
;
160 ASSERT(m_numberOfPreviousSegments
);
162 MarkStackSegment
* current
= previous
;
163 previous
= current
->m_previous
;
165 current
->m_previous
= other
.m_topSegment
->m_previous
;
166 other
.m_topSegment
->m_previous
= current
;
168 m_numberOfPreviousSegments
--;
169 other
.m_numberOfPreviousSegments
++;
171 ASSERT(!m_numberOfPreviousSegments
);
172 m_topSegment
->m_previous
= 0;
174 other
.validatePrevious();
176 // Now top off. We want to keep at a minimum numberOfCellsToKeep, but if
177 // we really have a lot of work, we give up half.
178 if (m_top
> numberOfCellsToKeep
* 2)
179 numberOfCellsToKeep
= m_top
/ 2;
180 while (m_top
> numberOfCellsToKeep
)
181 other
.append(removeLast());
186 void MarkStackArray::stealSomeCellsFrom(MarkStackArray
& other
)
188 ASSERT(m_segmentCapacity
== other
.m_segmentCapacity
);
190 other
.validatePrevious();
192 // If other has an entire segment, steal it and return.
193 if (other
.m_topSegment
->m_previous
) {
194 ASSERT(other
.m_topSegment
->m_previous
->m_top
== m_segmentCapacity
);
196 // First remove a segment from other.
197 MarkStackSegment
* current
= other
.m_topSegment
->m_previous
;
198 other
.m_topSegment
->m_previous
= current
->m_previous
;
199 other
.m_numberOfPreviousSegments
--;
201 ASSERT(!!other
.m_numberOfPreviousSegments
== !!other
.m_topSegment
->m_previous
);
203 // Now add it to this.
204 current
->m_previous
= m_topSegment
->m_previous
;
205 m_topSegment
->m_previous
= current
;
206 m_numberOfPreviousSegments
++;
209 other
.validatePrevious();
213 // Otherwise drain 1/Nth of the shared array where N is the number of
214 // workers, or Options::minimumNumberOfCellsToKeep, whichever is bigger.
215 size_t numberOfCellsToSteal
= std::max((size_t)Options::minimumNumberOfCellsToKeep
, other
.size() / Options::numberOfGCMarkers
);
216 while (numberOfCellsToSteal
-- > 0 && other
.canRemoveLast())
217 append(other
.removeLast());
220 #if ENABLE(PARALLEL_GC)
221 void MarkStackThreadSharedData::markingThreadMain()
223 WTF::registerGCThread();
224 SlotVisitor
slotVisitor(*this);
225 ParallelModeEnabler
enabler(slotVisitor
);
226 slotVisitor
.drainFromShared(SlotVisitor::SlaveDrain
);
229 void MarkStackThreadSharedData::markingThreadStartFunc(void* shared
)
231 static_cast<MarkStackThreadSharedData
*>(shared
)->markingThreadMain();
235 MarkStackThreadSharedData::MarkStackThreadSharedData(JSGlobalData
* globalData
)
236 : m_globalData(globalData
)
237 , m_copiedSpace(&globalData
->heap
.m_storageSpace
)
238 , m_sharedMarkStack(m_segmentAllocator
)
239 , m_numberOfActiveParallelMarkers(0)
240 , m_parallelMarkersShouldExit(false)
242 #if ENABLE(PARALLEL_GC)
243 for (unsigned i
= 1; i
< Options::numberOfGCMarkers
; ++i
) {
244 m_markingThreads
.append(createThread(markingThreadStartFunc
, this, "JavaScriptCore::Marking"));
245 ASSERT(m_markingThreads
.last());
250 MarkStackThreadSharedData::~MarkStackThreadSharedData()
252 #if ENABLE(PARALLEL_GC)
253 // Destroy our marking threads.
255 MutexLocker
locker(m_markingLock
);
256 m_parallelMarkersShouldExit
= true;
257 m_markingCondition
.broadcast();
259 for (unsigned i
= 0; i
< m_markingThreads
.size(); ++i
)
260 waitForThreadCompletion(m_markingThreads
[i
]);
264 void MarkStackThreadSharedData::reset()
266 ASSERT(!m_numberOfActiveParallelMarkers
);
267 ASSERT(!m_parallelMarkersShouldExit
);
268 ASSERT(m_sharedMarkStack
.isEmpty());
270 #if ENABLE(PARALLEL_GC)
271 m_segmentAllocator
.shrinkReserve();
272 m_opaqueRoots
.clear();
274 ASSERT(m_opaqueRoots
.isEmpty());
277 m_weakReferenceHarvesters
.removeAll();
280 void MarkStack::reset()
283 ASSERT(m_stack
.isEmpty());
284 #if ENABLE(PARALLEL_GC)
285 ASSERT(m_opaqueRoots
.isEmpty()); // Should have merged by now.
287 m_opaqueRoots
.clear();
291 void MarkStack::append(ConservativeRoots
& conservativeRoots
)
293 JSCell
** roots
= conservativeRoots
.roots();
294 size_t size
= conservativeRoots
.size();
295 for (size_t i
= 0; i
< size
; ++i
)
296 internalAppend(roots
[i
]);
299 ALWAYS_INLINE
static void visitChildren(SlotVisitor
& visitor
, const JSCell
* cell
)
301 #if ENABLE(SIMPLE_HEAP_PROFILING)
302 m_visitedTypeCounts
.count(cell
);
305 ASSERT(Heap::isMarked(cell
));
307 if (isJSString(cell
)) {
308 JSString::visitChildren(const_cast<JSCell
*>(cell
), visitor
);
312 if (isJSFinalObject(cell
)) {
313 JSObject::visitChildren(const_cast<JSCell
*>(cell
), visitor
);
317 if (isJSArray(cell
)) {
318 JSArray::visitChildren(const_cast<JSCell
*>(cell
), visitor
);
322 cell
->methodTable()->visitChildren(const_cast<JSCell
*>(cell
), visitor
);
325 void SlotVisitor::donateSlow()
327 // Refuse to donate if shared has more entries than I do.
328 if (m_shared
.m_sharedMarkStack
.size() > m_stack
.size())
330 MutexLocker
locker(m_shared
.m_markingLock
);
331 if (m_stack
.donateSomeCellsTo(m_shared
.m_sharedMarkStack
)) {
332 // Only wake up threads if the shared stack is big enough; otherwise assume that
333 // it's more profitable for us to just scan this ourselves later.
334 if (m_shared
.m_sharedMarkStack
.size() >= Options::sharedStackWakeupThreshold
)
335 m_shared
.m_markingCondition
.broadcast();
339 void SlotVisitor::drain()
341 ASSERT(m_isInParallelMode
);
343 #if ENABLE(PARALLEL_GC)
344 if (Options::numberOfGCMarkers
> 1) {
345 while (!m_stack
.isEmpty()) {
347 for (unsigned countdown
= Options::minimumNumberOfScansBetweenRebalance
; m_stack
.canRemoveLast() && countdown
--;)
348 visitChildren(*this, m_stack
.removeLast());
349 donateKnownParallel();
352 mergeOpaqueRootsIfNecessary();
357 while (!m_stack
.isEmpty()) {
359 while (m_stack
.canRemoveLast())
360 visitChildren(*this, m_stack
.removeLast());
364 void SlotVisitor::drainFromShared(SharedDrainMode sharedDrainMode
)
366 ASSERT(m_isInParallelMode
);
368 ASSERT(Options::numberOfGCMarkers
);
370 bool shouldBeParallel
;
372 #if ENABLE(PARALLEL_GC)
373 shouldBeParallel
= Options::numberOfGCMarkers
> 1;
375 ASSERT(Options::numberOfGCMarkers
== 1);
376 shouldBeParallel
= false;
379 if (!shouldBeParallel
) {
380 // This call should be a no-op.
381 ASSERT_UNUSED(sharedDrainMode
, sharedDrainMode
== MasterDrain
);
382 ASSERT(m_stack
.isEmpty());
383 ASSERT(m_shared
.m_sharedMarkStack
.isEmpty());
387 #if ENABLE(PARALLEL_GC)
389 MutexLocker
locker(m_shared
.m_markingLock
);
390 m_shared
.m_numberOfActiveParallelMarkers
++;
394 MutexLocker
locker(m_shared
.m_markingLock
);
395 m_shared
.m_numberOfActiveParallelMarkers
--;
397 // How we wait differs depending on drain mode.
398 if (sharedDrainMode
== MasterDrain
) {
399 // Wait until either termination is reached, or until there is some work
402 // Did we reach termination?
403 if (!m_shared
.m_numberOfActiveParallelMarkers
&& m_shared
.m_sharedMarkStack
.isEmpty()) {
404 // Let any sleeping slaves know it's time for them to give their private CopiedBlocks back
405 m_shared
.m_markingCondition
.broadcast();
409 // Is there work to be done?
410 if (!m_shared
.m_sharedMarkStack
.isEmpty())
414 m_shared
.m_markingCondition
.wait(m_shared
.m_markingLock
);
417 ASSERT(sharedDrainMode
== SlaveDrain
);
419 // Did we detect termination? If so, let the master know.
420 if (!m_shared
.m_numberOfActiveParallelMarkers
&& m_shared
.m_sharedMarkStack
.isEmpty())
421 m_shared
.m_markingCondition
.broadcast();
423 while (m_shared
.m_sharedMarkStack
.isEmpty() && !m_shared
.m_parallelMarkersShouldExit
) {
424 if (!m_shared
.m_numberOfActiveParallelMarkers
&& m_shared
.m_sharedMarkStack
.isEmpty())
426 m_shared
.m_markingCondition
.wait(m_shared
.m_markingLock
);
429 // Is the VM exiting? If so, exit this thread.
430 if (m_shared
.m_parallelMarkersShouldExit
) {
436 m_stack
.stealSomeCellsFrom(m_shared
.m_sharedMarkStack
);
437 m_shared
.m_numberOfActiveParallelMarkers
++;
445 void MarkStack::mergeOpaqueRoots()
447 ASSERT(!m_opaqueRoots
.isEmpty()); // Should only be called when opaque roots are non-empty.
449 MutexLocker
locker(m_shared
.m_opaqueRootsLock
);
450 HashSet
<void*>::iterator begin
= m_opaqueRoots
.begin();
451 HashSet
<void*>::iterator end
= m_opaqueRoots
.end();
452 for (HashSet
<void*>::iterator iter
= begin
; iter
!= end
; ++iter
)
453 m_shared
.m_opaqueRoots
.add(*iter
);
455 m_opaqueRoots
.clear();
458 void SlotVisitor::startCopying()
460 ASSERT(!m_copyBlock
);
461 if (!m_shared
.m_copiedSpace
->borrowBlock(&m_copyBlock
))
465 void* SlotVisitor::allocateNewSpace(void* ptr
, size_t bytes
)
467 if (CopiedSpace::isOversize(bytes
)) {
468 m_shared
.m_copiedSpace
->pin(CopiedSpace::oversizeBlockFor(ptr
));
472 if (m_shared
.m_copiedSpace
->isPinned(ptr
))
475 // The only time it's possible to have a null copy block is if we have just started copying.
479 if (!CopiedSpace::fitsInBlock(m_copyBlock
, bytes
)) {
480 // We don't need to lock across these two calls because the master thread won't
481 // call doneCopying() because this thread is considered active.
482 m_shared
.m_copiedSpace
->doneFillingBlock(m_copyBlock
);
483 if (!m_shared
.m_copiedSpace
->borrowBlock(&m_copyBlock
))
486 return CopiedSpace::allocateFromBlock(m_copyBlock
, bytes
);
489 void SlotVisitor::copyAndAppend(void** ptr
, size_t bytes
, JSValue
* values
, unsigned length
)
492 void* newPtr
= allocateNewSpace(oldPtr
, bytes
);
494 size_t jsValuesOffset
= static_cast<size_t>(reinterpret_cast<char*>(values
) - static_cast<char*>(oldPtr
));
496 JSValue
* newValues
= reinterpret_cast_ptr
<JSValue
*>(static_cast<char*>(newPtr
) + jsValuesOffset
);
497 for (unsigned i
= 0; i
< length
; i
++) {
498 JSValue
& value
= values
[i
];
499 newValues
[i
] = value
;
502 internalAppend(value
);
505 memcpy(newPtr
, oldPtr
, jsValuesOffset
);
508 append(values
, length
);
511 void SlotVisitor::doneCopying()
516 m_shared
.m_copiedSpace
->doneFillingBlock(m_copyBlock
);
521 void SlotVisitor::harvestWeakReferences()
523 for (WeakReferenceHarvester
* current
= m_shared
.m_weakReferenceHarvesters
.head(); current
; current
= current
->next())
524 current
->visitWeakReferences(*this);
527 void SlotVisitor::finalizeUnconditionalFinalizers()
529 while (m_shared
.m_unconditionalFinalizers
.hasNext())
530 m_shared
.m_unconditionalFinalizers
.removeNext()->finalizeUnconditionally();
533 #if ENABLE(GC_VALIDATION)
534 void MarkStack::validate(JSCell
* cell
)
539 if (!cell
->structure())
542 // Both the cell's structure, and the cell's structure's structure should be the Structure Structure.
543 // I hate this sentence.
544 if (cell
->structure()->structure()->JSCell::classInfo() != cell
->structure()->JSCell::classInfo())
548 void MarkStack::validate(JSCell
*)