+MarkStackSegmentAllocator::MarkStackSegmentAllocator()
+ : m_nextFreeSegment(0)
+{
+}
+
+MarkStackSegmentAllocator::~MarkStackSegmentAllocator()
+{
+ shrinkReserve();
+}
+
+MarkStackSegment* MarkStackSegmentAllocator::allocate()
+{
+ {
+ MutexLocker locker(m_lock);
+ if (m_nextFreeSegment) {
+ MarkStackSegment* result = m_nextFreeSegment;
+ m_nextFreeSegment = result->m_previous;
+ return result;
+ }
+ }
+
+ return static_cast<MarkStackSegment*>(OSAllocator::reserveAndCommit(Options::gcMarkStackSegmentSize));
+}
+
+void MarkStackSegmentAllocator::release(MarkStackSegment* segment)
+{
+ MutexLocker locker(m_lock);
+ segment->m_previous = m_nextFreeSegment;
+ m_nextFreeSegment = segment;
+}
+
+void MarkStackSegmentAllocator::shrinkReserve()
+{
+ MarkStackSegment* segments;
+ {
+ MutexLocker locker(m_lock);
+ segments = m_nextFreeSegment;
+ m_nextFreeSegment = 0;
+ }
+ while (segments) {
+ MarkStackSegment* toFree = segments;
+ segments = segments->m_previous;
+ OSAllocator::decommitAndRelease(toFree, Options::gcMarkStackSegmentSize);
+ }
+}
+
+MarkStackArray::MarkStackArray(MarkStackSegmentAllocator& allocator)
+ : m_allocator(allocator)
+ , m_segmentCapacity(MarkStackSegment::capacityFromSize(Options::gcMarkStackSegmentSize))
+ , m_top(0)
+ , m_numberOfPreviousSegments(0)
+{
+ m_topSegment = m_allocator.allocate();
+#if !ASSERT_DISABLED
+ m_topSegment->m_top = 0;
+#endif
+ m_topSegment->m_previous = 0;
+}
+
+MarkStackArray::~MarkStackArray()
+{
+ ASSERT(!m_topSegment->m_previous);
+ m_allocator.release(m_topSegment);
+}
+
+void MarkStackArray::expand()
+{
+ ASSERT(m_topSegment->m_top == m_segmentCapacity);
+
+ m_numberOfPreviousSegments++;
+
+ MarkStackSegment* nextSegment = m_allocator.allocate();
+#if !ASSERT_DISABLED
+ nextSegment->m_top = 0;
+#endif
+ nextSegment->m_previous = m_topSegment;
+ m_topSegment = nextSegment;
+ setTopForEmptySegment();
+ validatePrevious();
+}
+
+bool MarkStackArray::refill()
+{
+ validatePrevious();
+ if (top())
+ return true;
+ MarkStackSegment* toFree = m_topSegment;
+ MarkStackSegment* previous = m_topSegment->m_previous;
+ if (!previous)
+ return false;
+ ASSERT(m_numberOfPreviousSegments);
+ m_numberOfPreviousSegments--;
+ m_topSegment = previous;
+ m_allocator.release(toFree);
+ setTopForFullSegment();
+ validatePrevious();
+ return true;
+}
+
+bool MarkStackArray::donateSomeCellsTo(MarkStackArray& other)
+{
+ ASSERT(m_segmentCapacity == other.m_segmentCapacity);
+ validatePrevious();
+ other.validatePrevious();
+
+ // Fast check: see if the other mark stack already has enough segments.
+ if (other.m_numberOfPreviousSegments + 1 >= Options::maximumNumberOfSharedSegments)
+ return false;
+
+ size_t numberOfCellsToKeep = Options::minimumNumberOfCellsToKeep;
+ ASSERT(m_top > numberOfCellsToKeep || m_topSegment->m_previous);
+
+ // Looks like we should donate! Give the other mark stack all of our
+ // previous segments, and then top it off.
+ MarkStackSegment* previous = m_topSegment->m_previous;
+ while (previous) {
+ ASSERT(m_numberOfPreviousSegments);
+
+ MarkStackSegment* current = previous;
+ previous = current->m_previous;
+
+ current->m_previous = other.m_topSegment->m_previous;
+ other.m_topSegment->m_previous = current;
+
+ m_numberOfPreviousSegments--;
+ other.m_numberOfPreviousSegments++;
+ }
+ ASSERT(!m_numberOfPreviousSegments);
+ m_topSegment->m_previous = 0;
+ validatePrevious();
+ other.validatePrevious();
+
+ // Now top off. We want to keep at a minimum numberOfCellsToKeep, but if
+ // we really have a lot of work, we give up half.
+ if (m_top > numberOfCellsToKeep * 2)
+ numberOfCellsToKeep = m_top / 2;
+ while (m_top > numberOfCellsToKeep)
+ other.append(removeLast());
+
+ return true;
+}
+
+void MarkStackArray::stealSomeCellsFrom(MarkStackArray& other)
+{
+ ASSERT(m_segmentCapacity == other.m_segmentCapacity);
+ validatePrevious();
+ other.validatePrevious();
+
+ // If other has an entire segment, steal it and return.
+ if (other.m_topSegment->m_previous) {
+ ASSERT(other.m_topSegment->m_previous->m_top == m_segmentCapacity);
+
+ // First remove a segment from other.
+ MarkStackSegment* current = other.m_topSegment->m_previous;
+ other.m_topSegment->m_previous = current->m_previous;
+ other.m_numberOfPreviousSegments--;
+
+ ASSERT(!!other.m_numberOfPreviousSegments == !!other.m_topSegment->m_previous);
+
+ // Now add it to this.
+ current->m_previous = m_topSegment->m_previous;
+ m_topSegment->m_previous = current;
+ m_numberOfPreviousSegments++;
+
+ validatePrevious();
+ other.validatePrevious();
+ return;
+ }
+
+ // Otherwise drain 1/Nth of the shared array where N is the number of
+ // workers, or Options::minimumNumberOfCellsToKeep, whichever is bigger.
+ size_t numberOfCellsToSteal = std::max((size_t)Options::minimumNumberOfCellsToKeep, other.size() / Options::numberOfGCMarkers);
+ while (numberOfCellsToSteal-- > 0 && other.canRemoveLast())
+ append(other.removeLast());
+}
+
+#if ENABLE(PARALLEL_GC)
+void MarkStackThreadSharedData::markingThreadMain()
+{
+ WTF::registerGCThread();
+ SlotVisitor slotVisitor(*this);
+ ParallelModeEnabler enabler(slotVisitor);
+ slotVisitor.drainFromShared(SlotVisitor::SlaveDrain);
+}
+
+void MarkStackThreadSharedData::markingThreadStartFunc(void* shared)
+{
+ static_cast<MarkStackThreadSharedData*>(shared)->markingThreadMain();
+}
+#endif
+
+MarkStackThreadSharedData::MarkStackThreadSharedData(JSGlobalData* globalData)
+ : m_globalData(globalData)
+ , m_copiedSpace(&globalData->heap.m_storageSpace)
+ , m_sharedMarkStack(m_segmentAllocator)
+ , m_numberOfActiveParallelMarkers(0)
+ , m_parallelMarkersShouldExit(false)
+{
+#if ENABLE(PARALLEL_GC)
+ for (unsigned i = 1; i < Options::numberOfGCMarkers; ++i) {
+ m_markingThreads.append(createThread(markingThreadStartFunc, this, "JavaScriptCore::Marking"));
+ ASSERT(m_markingThreads.last());
+ }
+#endif
+}
+
+MarkStackThreadSharedData::~MarkStackThreadSharedData()
+{
+#if ENABLE(PARALLEL_GC)
+ // Destroy our marking threads.
+ {
+ MutexLocker locker(m_markingLock);
+ m_parallelMarkersShouldExit = true;
+ m_markingCondition.broadcast();
+ }
+ for (unsigned i = 0; i < m_markingThreads.size(); ++i)
+ waitForThreadCompletion(m_markingThreads[i]);
+#endif
+}
+
+void MarkStackThreadSharedData::reset()
+{
+ ASSERT(!m_numberOfActiveParallelMarkers);
+ ASSERT(!m_parallelMarkersShouldExit);
+ ASSERT(m_sharedMarkStack.isEmpty());
+
+#if ENABLE(PARALLEL_GC)
+ m_segmentAllocator.shrinkReserve();
+ m_opaqueRoots.clear();
+#else
+ ASSERT(m_opaqueRoots.isEmpty());
+#endif
+
+ m_weakReferenceHarvesters.removeAll();
+}