2 #include "SlotVisitor.h"
3 #include "SlotVisitorInlines.h"
5 #include "ConservativeRoots.h"
6 #include "CopiedSpace.h"
7 #include "CopiedSpaceInlines.h"
10 #include "JSDestructibleObject.h"
14 #include "JSCInlines.h"
15 #include <wtf/StackStats.h>
19 SlotVisitor::SlotVisitor(GCThreadSharedData
& shared
)
20 : m_stack(shared
.m_vm
->heap
.blockAllocator())
24 , m_isInParallelMode(false)
26 , m_shouldHashCons(false)
28 , m_isCheckingForDefaultMarkViolation(false)
34 SlotVisitor::~SlotVisitor()
39 void SlotVisitor::didStartMarking()
41 if (heap()->operationInProgress() == FullCollection
) {
42 #if ENABLE(PARALLEL_GC)
43 ASSERT(m_opaqueRoots
.isEmpty()); // Should have merged by now.
45 m_opaqueRoots
.clear();
49 m_shared
.m_shouldHashCons
= m_shared
.m_vm
->haveEnoughNewStringsToHashCons();
50 m_shouldHashCons
= m_shared
.m_shouldHashCons
;
51 #if ENABLE(PARALLEL_GC)
52 for (unsigned i
= 0; i
< m_shared
.m_gcThreads
.size(); ++i
)
53 m_shared
.m_gcThreads
[i
]->slotVisitor()->m_shouldHashCons
= m_shared
.m_shouldHashCons
;
57 void SlotVisitor::reset()
62 ASSERT(m_stack
.isEmpty());
63 if (m_shouldHashCons
) {
64 m_uniqueStrings
.clear();
65 m_shouldHashCons
= false;
69 void SlotVisitor::clearMarkStack()
74 void SlotVisitor::append(ConservativeRoots
& conservativeRoots
)
77 JSCell
** roots
= conservativeRoots
.roots();
78 size_t size
= conservativeRoots
.size();
79 for (size_t i
= 0; i
< size
; ++i
)
80 internalAppend(0, roots
[i
]);
83 ALWAYS_INLINE
static void visitChildren(SlotVisitor
& visitor
, const JSCell
* cell
)
87 ASSERT(Heap::isMarked(cell
));
89 if (isJSString(cell
)) {
90 JSString::visitChildren(const_cast<JSCell
*>(cell
), visitor
);
94 if (isJSFinalObject(cell
)) {
95 JSFinalObject::visitChildren(const_cast<JSCell
*>(cell
), visitor
);
99 if (isJSArray(cell
)) {
100 JSArray::visitChildren(const_cast<JSCell
*>(cell
), visitor
);
104 cell
->methodTable()->visitChildren(const_cast<JSCell
*>(cell
), visitor
);
107 void SlotVisitor::donateKnownParallel()
110 // NOTE: Because we re-try often, we can afford to be conservative, and
111 // assume that donating is not profitable.
113 // Avoid locking when a thread reaches a dead end in the object graph.
114 if (m_stack
.size() < 2)
117 // If there's already some shared work queued up, be conservative and assume
118 // that donating more is not profitable.
119 if (m_shared
.m_sharedMarkStack
.size())
122 // If we're contending on the lock, be conservative and assume that another
123 // thread is already donating.
124 std::unique_lock
<std::mutex
> lock(m_shared
.m_markingMutex
, std::try_to_lock
);
125 if (!lock
.owns_lock())
128 // Otherwise, assume that a thread will go idle soon, and donate.
129 m_stack
.donateSomeCellsTo(m_shared
.m_sharedMarkStack
);
131 if (m_shared
.m_numberOfActiveParallelMarkers
< Options::numberOfGCMarkers())
132 m_shared
.m_markingConditionVariable
.notify_all();
135 void SlotVisitor::drain()
138 ASSERT(m_isInParallelMode
);
140 #if ENABLE(PARALLEL_GC)
141 if (Options::numberOfGCMarkers() > 1) {
142 while (!m_stack
.isEmpty()) {
144 for (unsigned countdown
= Options::minimumNumberOfScansBetweenRebalance(); m_stack
.canRemoveLast() && countdown
--;)
145 visitChildren(*this, m_stack
.removeLast());
146 donateKnownParallel();
149 mergeOpaqueRootsIfNecessary();
154 while (!m_stack
.isEmpty()) {
156 while (m_stack
.canRemoveLast())
157 visitChildren(*this, m_stack
.removeLast());
161 void SlotVisitor::drainFromShared(SharedDrainMode sharedDrainMode
)
164 ASSERT(m_isInParallelMode
);
166 ASSERT(Options::numberOfGCMarkers());
168 bool shouldBeParallel
;
170 #if ENABLE(PARALLEL_GC)
171 shouldBeParallel
= Options::numberOfGCMarkers() > 1;
173 ASSERT(Options::numberOfGCMarkers() == 1);
174 shouldBeParallel
= false;
177 if (!shouldBeParallel
) {
178 // This call should be a no-op.
179 ASSERT_UNUSED(sharedDrainMode
, sharedDrainMode
== MasterDrain
);
180 ASSERT(m_stack
.isEmpty());
181 ASSERT(m_shared
.m_sharedMarkStack
.isEmpty());
185 #if ENABLE(PARALLEL_GC)
187 std::lock_guard
<std::mutex
> lock(m_shared
.m_markingMutex
);
188 m_shared
.m_numberOfActiveParallelMarkers
++;
192 std::unique_lock
<std::mutex
> lock(m_shared
.m_markingMutex
);
193 m_shared
.m_numberOfActiveParallelMarkers
--;
195 // How we wait differs depending on drain mode.
196 if (sharedDrainMode
== MasterDrain
) {
197 // Wait until either termination is reached, or until there is some work
200 // Did we reach termination?
201 if (!m_shared
.m_numberOfActiveParallelMarkers
&& m_shared
.m_sharedMarkStack
.isEmpty()) {
202 // Let any sleeping slaves know it's time for them to return;
203 m_shared
.m_markingConditionVariable
.notify_all();
207 // Is there work to be done?
208 if (!m_shared
.m_sharedMarkStack
.isEmpty())
212 m_shared
.m_markingConditionVariable
.wait(lock
);
215 ASSERT(sharedDrainMode
== SlaveDrain
);
217 // Did we detect termination? If so, let the master know.
218 if (!m_shared
.m_numberOfActiveParallelMarkers
&& m_shared
.m_sharedMarkStack
.isEmpty())
219 m_shared
.m_markingConditionVariable
.notify_all();
221 m_shared
.m_markingConditionVariable
.wait(lock
, [this] { return !m_shared
.m_sharedMarkStack
.isEmpty() || m_shared
.m_parallelMarkersShouldExit
; });
223 // Is the current phase done? If so, return from this function.
224 if (m_shared
.m_parallelMarkersShouldExit
)
228 size_t idleThreadCount
= Options::numberOfGCMarkers() - m_shared
.m_numberOfActiveParallelMarkers
;
229 m_stack
.stealSomeCellsFrom(m_shared
.m_sharedMarkStack
, idleThreadCount
);
230 m_shared
.m_numberOfActiveParallelMarkers
++;
238 void SlotVisitor::mergeOpaqueRoots()
241 ASSERT(!m_opaqueRoots
.isEmpty()); // Should only be called when opaque roots are non-empty.
243 std::lock_guard
<std::mutex
> lock(m_shared
.m_opaqueRootsMutex
);
244 for (auto* root
: m_opaqueRoots
)
245 m_shared
.m_opaqueRoots
.add(root
);
247 m_opaqueRoots
.clear();
250 ALWAYS_INLINE
bool JSString::tryHashConsLock()
252 #if ENABLE(PARALLEL_GC)
253 unsigned currentFlags
= m_flags
;
255 if (currentFlags
& HashConsLock
)
258 unsigned newFlags
= currentFlags
| HashConsLock
;
260 if (!WTF::weakCompareAndSwap(&m_flags
, currentFlags
, newFlags
))
263 WTF::memoryBarrierAfterLock();
266 if (isHashConsSingleton())
269 m_flags
|= HashConsLock
;
275 ALWAYS_INLINE
void JSString::releaseHashConsLock()
277 #if ENABLE(PARALLEL_GC)
278 WTF::memoryBarrierBeforeUnlock();
280 m_flags
&= ~HashConsLock
;
283 ALWAYS_INLINE
bool JSString::shouldTryHashCons()
285 return ((length() > 1) && !isRope() && !isHashConsSingleton());
288 ALWAYS_INLINE
void SlotVisitor::internalAppend(void* from
, JSValue
* slot
)
290 // This internalAppend is only intended for visits to object and array backing stores.
291 // as it can change the JSValue pointed to be the argument when the original JSValue
292 // is a string that contains the same contents as another string.
296 JSValue value
= *slot
;
301 JSCell
* cell
= value
.asCell();
307 if (m_shouldHashCons
&& cell
->isString()) {
308 JSString
* string
= jsCast
<JSString
*>(cell
);
309 if (string
->shouldTryHashCons() && string
->tryHashConsLock()) {
310 UniqueStringMap::AddResult addResult
= m_uniqueStrings
.add(string
->string().impl(), value
);
311 if (addResult
.isNewEntry
)
312 string
->setHashConsSingleton();
314 JSValue existingJSValue
= addResult
.iterator
->value
;
315 if (value
!= existingJSValue
)
316 jsCast
<JSString
*>(existingJSValue
.asCell())->clearHashConsSingleton();
317 *slot
= existingJSValue
;
318 string
->releaseHashConsLock();
321 string
->releaseHashConsLock();
325 internalAppend(from
, cell
);
328 void SlotVisitor::harvestWeakReferences()
331 for (WeakReferenceHarvester
* current
= m_shared
.m_weakReferenceHarvesters
.head(); current
; current
= current
->next())
332 current
->visitWeakReferences(*this);
335 void SlotVisitor::finalizeUnconditionalFinalizers()
338 while (m_shared
.m_unconditionalFinalizers
.hasNext())
339 m_shared
.m_unconditionalFinalizers
.removeNext()->finalizeUnconditionally();
342 #if ENABLE(GC_VALIDATION)
343 void SlotVisitor::validate(JSCell
* cell
)
345 RELEASE_ASSERT(cell
);
347 if (!cell
->structure()) {
348 dataLogF("cell at %p has a null structure\n" , cell
);
352 // Both the cell's structure, and the cell's structure's structure should be the Structure Structure.
353 // I hate this sentence.
354 if (cell
->structure()->structure()->JSCell::classInfo() != cell
->structure()->JSCell::classInfo()) {
355 const char* parentClassName
= 0;
356 const char* ourClassName
= 0;
357 if (cell
->structure()->structure() && cell
->structure()->structure()->JSCell::classInfo())
358 parentClassName
= cell
->structure()->structure()->JSCell::classInfo()->className
;
359 if (cell
->structure()->JSCell::classInfo())
360 ourClassName
= cell
->structure()->JSCell::classInfo()->className
;
361 dataLogF("parent structure (%p <%s>) of cell at %p doesn't match cell's structure (%p <%s>)\n",
362 cell
->structure()->structure(), parentClassName
, cell
, cell
->structure(), ourClassName
);
366 // Make sure we can walk the ClassInfo chain
367 const ClassInfo
* info
= cell
->classInfo();
368 do { } while ((info
= info
->parentClass
));
371 void SlotVisitor::validate(JSCell
*)
376 void SlotVisitor::dump(PrintStream
&) const
378 for (const JSCell
* cell
: markStack())
379 dataLog(*cell
, "\n");