#include "config.h"
#include "MarkStack.h"
+#include "MarkStackInlines.h"
-#include "CopiedSpace.h"
-#include "CopiedSpaceInlineMethods.h"
#include "ConservativeRoots.h"
+#include "CopiedSpace.h"
+#include "CopiedSpaceInlines.h"
#include "Heap.h"
-#include "Options.h"
#include "JSArray.h"
#include "JSCell.h"
#include "JSObject.h"
-#include "ScopeChain.h"
+
+#include "SlotVisitorInlines.h"
#include "Structure.h"
#include "WriteBarrier.h"
+#include <wtf/Atomics.h>
+#include <wtf/DataLog.h>
#include <wtf/MainThread.h>
namespace JSC {
-MarkStackSegmentAllocator::MarkStackSegmentAllocator()
- : m_nextFreeSegment(0)
-{
-}
-
-MarkStackSegmentAllocator::~MarkStackSegmentAllocator()
-{
- shrinkReserve();
-}
-
-MarkStackSegment* MarkStackSegmentAllocator::allocate()
-{
- {
- MutexLocker locker(m_lock);
- if (m_nextFreeSegment) {
- MarkStackSegment* result = m_nextFreeSegment;
- m_nextFreeSegment = result->m_previous;
- return result;
- }
- }
-
- return static_cast<MarkStackSegment*>(OSAllocator::reserveAndCommit(Options::gcMarkStackSegmentSize));
-}
-
-void MarkStackSegmentAllocator::release(MarkStackSegment* segment)
-{
- MutexLocker locker(m_lock);
- segment->m_previous = m_nextFreeSegment;
- m_nextFreeSegment = segment;
-}
-
-void MarkStackSegmentAllocator::shrinkReserve()
-{
- MarkStackSegment* segments;
- {
- MutexLocker locker(m_lock);
- segments = m_nextFreeSegment;
- m_nextFreeSegment = 0;
- }
- while (segments) {
- MarkStackSegment* toFree = segments;
- segments = segments->m_previous;
- OSAllocator::decommitAndRelease(toFree, Options::gcMarkStackSegmentSize);
- }
-}
+COMPILE_ASSERT(MarkStackSegment::blockSize == WeakBlock::blockSize, blockSizeMatch);
-MarkStackArray::MarkStackArray(MarkStackSegmentAllocator& allocator)
- : m_allocator(allocator)
- , m_segmentCapacity(MarkStackSegment::capacityFromSize(Options::gcMarkStackSegmentSize))
+MarkStackArray::MarkStackArray(BlockAllocator& blockAllocator)
+ : m_blockAllocator(blockAllocator)
, m_top(0)
- , m_numberOfPreviousSegments(0)
+ , m_numberOfSegments(0)
{
- m_topSegment = m_allocator.allocate();
-#if !ASSERT_DISABLED
- m_topSegment->m_top = 0;
-#endif
- m_topSegment->m_previous = 0;
+ m_segments.push(MarkStackSegment::create(m_blockAllocator.allocate<MarkStackSegment>()));
+ m_numberOfSegments++;
}
MarkStackArray::~MarkStackArray()
{
- ASSERT(!m_topSegment->m_previous);
- m_allocator.release(m_topSegment);
+ ASSERT(m_numberOfSegments == 1 && m_segments.size() == 1);
+ m_blockAllocator.deallocate(MarkStackSegment::destroy(m_segments.removeHead()));
}
void MarkStackArray::expand()
{
- ASSERT(m_topSegment->m_top == m_segmentCapacity);
+ ASSERT(m_segments.head()->m_top == s_segmentCapacity);
- m_numberOfPreviousSegments++;
+ MarkStackSegment* nextSegment = MarkStackSegment::create(m_blockAllocator.allocate<MarkStackSegment>());
+ m_numberOfSegments++;
- MarkStackSegment* nextSegment = m_allocator.allocate();
#if !ASSERT_DISABLED
nextSegment->m_top = 0;
#endif
- nextSegment->m_previous = m_topSegment;
- m_topSegment = nextSegment;
+
+ m_segments.push(nextSegment);
setTopForEmptySegment();
validatePrevious();
}
validatePrevious();
if (top())
return true;
- MarkStackSegment* toFree = m_topSegment;
- MarkStackSegment* previous = m_topSegment->m_previous;
- if (!previous)
- return false;
- ASSERT(m_numberOfPreviousSegments);
- m_numberOfPreviousSegments--;
- m_topSegment = previous;
- m_allocator.release(toFree);
+ m_blockAllocator.deallocate(MarkStackSegment::destroy(m_segments.removeHead()));
+ ASSERT(m_numberOfSegments > 1);
+ m_numberOfSegments--;
setTopForFullSegment();
validatePrevious();
return true;
}
-bool MarkStackArray::donateSomeCellsTo(MarkStackArray& other)
+void MarkStackArray::donateSomeCellsTo(MarkStackArray& other)
{
- ASSERT(m_segmentCapacity == other.m_segmentCapacity);
- validatePrevious();
- other.validatePrevious();
-
- // Fast check: see if the other mark stack already has enough segments.
- if (other.m_numberOfPreviousSegments + 1 >= Options::maximumNumberOfSharedSegments)
- return false;
-
- size_t numberOfCellsToKeep = Options::minimumNumberOfCellsToKeep;
- ASSERT(m_top > numberOfCellsToKeep || m_topSegment->m_previous);
-
- // Looks like we should donate! Give the other mark stack all of our
- // previous segments, and then top it off.
- MarkStackSegment* previous = m_topSegment->m_previous;
- while (previous) {
- ASSERT(m_numberOfPreviousSegments);
+ // Try to donate about 1 / 2 of our cells. To reduce copying costs,
+ // we prefer donating whole segments over donating individual cells,
+ // even if this skews away from our 1 / 2 target.
- MarkStackSegment* current = previous;
- previous = current->m_previous;
-
- current->m_previous = other.m_topSegment->m_previous;
- other.m_topSegment->m_previous = current;
-
- m_numberOfPreviousSegments--;
- other.m_numberOfPreviousSegments++;
- }
- ASSERT(!m_numberOfPreviousSegments);
- m_topSegment->m_previous = 0;
- validatePrevious();
- other.validatePrevious();
-
- // Now top off. We want to keep at a minimum numberOfCellsToKeep, but if
- // we really have a lot of work, we give up half.
- if (m_top > numberOfCellsToKeep * 2)
- numberOfCellsToKeep = m_top / 2;
- while (m_top > numberOfCellsToKeep)
- other.append(removeLast());
-
- return true;
-}
+ size_t segmentsToDonate = m_numberOfSegments / 2; // If we only have one segment (our head) we don't donate any segments.
-void MarkStackArray::stealSomeCellsFrom(MarkStackArray& other)
-{
- ASSERT(m_segmentCapacity == other.m_segmentCapacity);
- validatePrevious();
- other.validatePrevious();
-
- // If other has an entire segment, steal it and return.
- if (other.m_topSegment->m_previous) {
- ASSERT(other.m_topSegment->m_previous->m_top == m_segmentCapacity);
-
- // First remove a segment from other.
- MarkStackSegment* current = other.m_topSegment->m_previous;
- other.m_topSegment->m_previous = current->m_previous;
- other.m_numberOfPreviousSegments--;
-
- ASSERT(!!other.m_numberOfPreviousSegments == !!other.m_topSegment->m_previous);
-
- // Now add it to this.
- current->m_previous = m_topSegment->m_previous;
- m_topSegment->m_previous = current;
- m_numberOfPreviousSegments++;
-
- validatePrevious();
- other.validatePrevious();
+ if (!segmentsToDonate) {
+ size_t cellsToDonate = m_top / 2; // Round down to donate 0 / 1 cells.
+ while (cellsToDonate--) {
+ ASSERT(m_top);
+ other.append(removeLast());
+ }
return;
}
-
- // Otherwise drain 1/Nth of the shared array where N is the number of
- // workers, or Options::minimumNumberOfCellsToKeep, whichever is bigger.
- size_t numberOfCellsToSteal = std::max((size_t)Options::minimumNumberOfCellsToKeep, other.size() / Options::numberOfGCMarkers);
- while (numberOfCellsToSteal-- > 0 && other.canRemoveLast())
- append(other.removeLast());
-}
-
-#if ENABLE(PARALLEL_GC)
-void MarkStackThreadSharedData::markingThreadMain()
-{
- WTF::registerGCThread();
- SlotVisitor slotVisitor(*this);
- ParallelModeEnabler enabler(slotVisitor);
- slotVisitor.drainFromShared(SlotVisitor::SlaveDrain);
-}
-
-void MarkStackThreadSharedData::markingThreadStartFunc(void* shared)
-{
- static_cast<MarkStackThreadSharedData*>(shared)->markingThreadMain();
-}
-#endif
-
-MarkStackThreadSharedData::MarkStackThreadSharedData(JSGlobalData* globalData)
- : m_globalData(globalData)
- , m_copiedSpace(&globalData->heap.m_storageSpace)
- , m_sharedMarkStack(m_segmentAllocator)
- , m_numberOfActiveParallelMarkers(0)
- , m_parallelMarkersShouldExit(false)
-{
-#if ENABLE(PARALLEL_GC)
- for (unsigned i = 1; i < Options::numberOfGCMarkers; ++i) {
- m_markingThreads.append(createThread(markingThreadStartFunc, this, "JavaScriptCore::Marking"));
- ASSERT(m_markingThreads.last());
- }
-#endif
-}
-
-MarkStackThreadSharedData::~MarkStackThreadSharedData()
-{
-#if ENABLE(PARALLEL_GC)
- // Destroy our marking threads.
- {
- MutexLocker locker(m_markingLock);
- m_parallelMarkersShouldExit = true;
- m_markingCondition.broadcast();
- }
- for (unsigned i = 0; i < m_markingThreads.size(); ++i)
- waitForThreadCompletion(m_markingThreads[i]);
-#endif
-}
-
-void MarkStackThreadSharedData::reset()
-{
- ASSERT(!m_numberOfActiveParallelMarkers);
- ASSERT(!m_parallelMarkersShouldExit);
- ASSERT(m_sharedMarkStack.isEmpty());
-
-#if ENABLE(PARALLEL_GC)
- m_segmentAllocator.shrinkReserve();
- m_opaqueRoots.clear();
-#else
- ASSERT(m_opaqueRoots.isEmpty());
-#endif
-
- m_weakReferenceHarvesters.removeAll();
-}
-
-void MarkStack::reset()
-{
- m_visitCount = 0;
- ASSERT(m_stack.isEmpty());
-#if ENABLE(PARALLEL_GC)
- ASSERT(m_opaqueRoots.isEmpty()); // Should have merged by now.
-#else
- m_opaqueRoots.clear();
-#endif
-}
-void MarkStack::append(ConservativeRoots& conservativeRoots)
-{
- JSCell** roots = conservativeRoots.roots();
- size_t size = conservativeRoots.size();
- for (size_t i = 0; i < size; ++i)
- internalAppend(roots[i]);
-}
-
-ALWAYS_INLINE static void visitChildren(SlotVisitor& visitor, const JSCell* cell)
-{
-#if ENABLE(SIMPLE_HEAP_PROFILING)
- m_visitedTypeCounts.count(cell);
-#endif
+ validatePrevious();
+ other.validatePrevious();
- ASSERT(Heap::isMarked(cell));
-
- if (isJSString(cell)) {
- JSString::visitChildren(const_cast<JSCell*>(cell), visitor);
- return;
- }
+ // Remove our head and the head of the other list before we start moving segments around.
+ // We'll add them back on once we're done donating.
+ MarkStackSegment* myHead = m_segments.removeHead();
+ MarkStackSegment* otherHead = other.m_segments.removeHead();
- if (isJSFinalObject(cell)) {
- JSObject::visitChildren(const_cast<JSCell*>(cell), visitor);
- return;
+ while (segmentsToDonate--) {
+ MarkStackSegment* current = m_segments.removeHead();
+ ASSERT(current);
+ ASSERT(m_numberOfSegments > 1);
+ other.m_segments.push(current);
+ m_numberOfSegments--;
+ other.m_numberOfSegments++;
}
- if (isJSArray(cell)) {
- JSArray::visitChildren(const_cast<JSCell*>(cell), visitor);
- return;
- }
+ // Put the original heads back in their places.
+ m_segments.push(myHead);
+ other.m_segments.push(otherHead);
- cell->methodTable()->visitChildren(const_cast<JSCell*>(cell), visitor);
+ validatePrevious();
+ other.validatePrevious();
}
-void SlotVisitor::donateSlow()
+void MarkStackArray::stealSomeCellsFrom(MarkStackArray& other, size_t idleThreadCount)
{
- // Refuse to donate if shared has more entries than I do.
- if (m_shared.m_sharedMarkStack.size() > m_stack.size())
- return;
- MutexLocker locker(m_shared.m_markingLock);
- if (m_stack.donateSomeCellsTo(m_shared.m_sharedMarkStack)) {
- // Only wake up threads if the shared stack is big enough; otherwise assume that
- // it's more profitable for us to just scan this ourselves later.
- if (m_shared.m_sharedMarkStack.size() >= Options::sharedStackWakeupThreshold)
- m_shared.m_markingCondition.broadcast();
- }
-}
+ // Try to steal 1 / Nth of the shared array, where N is the number of idle threads.
+ // To reduce copying costs, we prefer stealing a whole segment over stealing
+ // individual cells, even if this skews away from our 1 / N target.
-void SlotVisitor::drain()
-{
- ASSERT(m_isInParallelMode);
-
-#if ENABLE(PARALLEL_GC)
- if (Options::numberOfGCMarkers > 1) {
- while (!m_stack.isEmpty()) {
- m_stack.refill();
- for (unsigned countdown = Options::minimumNumberOfScansBetweenRebalance; m_stack.canRemoveLast() && countdown--;)
- visitChildren(*this, m_stack.removeLast());
- donateKnownParallel();
- }
+ validatePrevious();
+ other.validatePrevious();
- mergeOpaqueRootsIfNecessary();
- return;
- }
-#endif
-
- while (!m_stack.isEmpty()) {
- m_stack.refill();
- while (m_stack.canRemoveLast())
- visitChildren(*this, m_stack.removeLast());
- }
-}
+ // If other has an entire segment, steal it and return.
+ if (other.m_numberOfSegments > 1) {
+ // Move the heads of the lists aside. We'll push them back on after.
+ MarkStackSegment* otherHead = other.m_segments.removeHead();
+ MarkStackSegment* myHead = m_segments.removeHead();
-void SlotVisitor::drainFromShared(SharedDrainMode sharedDrainMode)
-{
- ASSERT(m_isInParallelMode);
-
- ASSERT(Options::numberOfGCMarkers);
-
- bool shouldBeParallel;
+ ASSERT(other.m_segments.head()->m_top == s_segmentCapacity);
-#if ENABLE(PARALLEL_GC)
- shouldBeParallel = Options::numberOfGCMarkers > 1;
-#else
- ASSERT(Options::numberOfGCMarkers == 1);
- shouldBeParallel = false;
-#endif
-
- if (!shouldBeParallel) {
- // This call should be a no-op.
- ASSERT_UNUSED(sharedDrainMode, sharedDrainMode == MasterDrain);
- ASSERT(m_stack.isEmpty());
- ASSERT(m_shared.m_sharedMarkStack.isEmpty());
- return;
- }
-
-#if ENABLE(PARALLEL_GC)
- {
- MutexLocker locker(m_shared.m_markingLock);
- m_shared.m_numberOfActiveParallelMarkers++;
- }
- while (true) {
- {
- MutexLocker locker(m_shared.m_markingLock);
- m_shared.m_numberOfActiveParallelMarkers--;
+ m_segments.push(other.m_segments.removeHead());
- // How we wait differs depending on drain mode.
- if (sharedDrainMode == MasterDrain) {
- // Wait until either termination is reached, or until there is some work
- // for us to do.
- while (true) {
- // Did we reach termination?
- if (!m_shared.m_numberOfActiveParallelMarkers && m_shared.m_sharedMarkStack.isEmpty()) {
- // Let any sleeping slaves know it's time for them to give their private CopiedBlocks back
- m_shared.m_markingCondition.broadcast();
- return;
- }
-
- // Is there work to be done?
- if (!m_shared.m_sharedMarkStack.isEmpty())
- break;
-
- // Otherwise wait.
- m_shared.m_markingCondition.wait(m_shared.m_markingLock);
- }
- } else {
- ASSERT(sharedDrainMode == SlaveDrain);
-
- // Did we detect termination? If so, let the master know.
- if (!m_shared.m_numberOfActiveParallelMarkers && m_shared.m_sharedMarkStack.isEmpty())
- m_shared.m_markingCondition.broadcast();
-
- while (m_shared.m_sharedMarkStack.isEmpty() && !m_shared.m_parallelMarkersShouldExit) {
- if (!m_shared.m_numberOfActiveParallelMarkers && m_shared.m_sharedMarkStack.isEmpty())
- doneCopying();
- m_shared.m_markingCondition.wait(m_shared.m_markingLock);
- }
-
- // Is the VM exiting? If so, exit this thread.
- if (m_shared.m_parallelMarkersShouldExit) {
- doneCopying();
- return;
- }
- }
-
- m_stack.stealSomeCellsFrom(m_shared.m_sharedMarkStack);
- m_shared.m_numberOfActiveParallelMarkers++;
- }
+ m_numberOfSegments++;
+ other.m_numberOfSegments--;
- drain();
- }
-#endif
-}
-
-void MarkStack::mergeOpaqueRoots()
-{
- ASSERT(!m_opaqueRoots.isEmpty()); // Should only be called when opaque roots are non-empty.
- {
- MutexLocker locker(m_shared.m_opaqueRootsLock);
- HashSet<void*>::iterator begin = m_opaqueRoots.begin();
- HashSet<void*>::iterator end = m_opaqueRoots.end();
- for (HashSet<void*>::iterator iter = begin; iter != end; ++iter)
- m_shared.m_opaqueRoots.add(*iter);
- }
- m_opaqueRoots.clear();
-}
-
-void SlotVisitor::startCopying()
-{
- ASSERT(!m_copyBlock);
- if (!m_shared.m_copiedSpace->borrowBlock(&m_copyBlock))
- CRASH();
-}
-
-void* SlotVisitor::allocateNewSpace(void* ptr, size_t bytes)
-{
- if (CopiedSpace::isOversize(bytes)) {
- m_shared.m_copiedSpace->pin(CopiedSpace::oversizeBlockFor(ptr));
- return 0;
- }
-
- if (m_shared.m_copiedSpace->isPinned(ptr))
- return 0;
-
- // The only time it's possible to have a null copy block is if we have just started copying.
- if (!m_copyBlock)
- startCopying();
-
- if (!CopiedSpace::fitsInBlock(m_copyBlock, bytes)) {
- // We don't need to lock across these two calls because the master thread won't
- // call doneCopying() because this thread is considered active.
- m_shared.m_copiedSpace->doneFillingBlock(m_copyBlock);
- if (!m_shared.m_copiedSpace->borrowBlock(&m_copyBlock))
- CRASH();
- }
- return CopiedSpace::allocateFromBlock(m_copyBlock, bytes);
-}
-
-void SlotVisitor::copyAndAppend(void** ptr, size_t bytes, JSValue* values, unsigned length)
-{
- void* oldPtr = *ptr;
- void* newPtr = allocateNewSpace(oldPtr, bytes);
- if (newPtr) {
- size_t jsValuesOffset = static_cast<size_t>(reinterpret_cast<char*>(values) - static_cast<char*>(oldPtr));
-
- JSValue* newValues = reinterpret_cast_ptr<JSValue*>(static_cast<char*>(newPtr) + jsValuesOffset);
- for (unsigned i = 0; i < length; i++) {
- JSValue& value = values[i];
- newValues[i] = value;
- if (!value)
- continue;
- internalAppend(value);
- }
-
- memcpy(newPtr, oldPtr, jsValuesOffset);
- *ptr = newPtr;
- } else
- append(values, length);
-}
+ m_segments.push(myHead);
+ other.m_segments.push(otherHead);
-void SlotVisitor::doneCopying()
-{
- if (!m_copyBlock)
+ validatePrevious();
+ other.validatePrevious();
return;
+ }
- m_shared.m_copiedSpace->doneFillingBlock(m_copyBlock);
-
- m_copyBlock = 0;
-}
-
-void SlotVisitor::harvestWeakReferences()
-{
- for (WeakReferenceHarvester* current = m_shared.m_weakReferenceHarvesters.head(); current; current = current->next())
- current->visitWeakReferences(*this);
-}
-
-void SlotVisitor::finalizeUnconditionalFinalizers()
-{
- while (m_shared.m_unconditionalFinalizers.hasNext())
- m_shared.m_unconditionalFinalizers.removeNext()->finalizeUnconditionally();
-}
-
-#if ENABLE(GC_VALIDATION)
-void MarkStack::validate(JSCell* cell)
-{
- if (!cell)
- CRASH();
-
- if (!cell->structure())
- CRASH();
-
- // Both the cell's structure, and the cell's structure's structure should be the Structure Structure.
- // I hate this sentence.
- if (cell->structure()->structure()->JSCell::classInfo() != cell->structure()->JSCell::classInfo())
- CRASH();
-}
-#else
-void MarkStack::validate(JSCell*)
-{
+ size_t numberOfCellsToSteal = (other.size() + idleThreadCount - 1) / idleThreadCount; // Round up to steal 1 / 1.
+ while (numberOfCellsToSteal-- > 0 && other.canRemoveLast())
+ append(other.removeLast());
}
-#endif
} // namespace JSC