2 #include "SlotVisitor.h"
3 #include "SlotVisitorInlines.h"
5 #include "ConservativeRoots.h"
6 #include "CopiedSpace.h"
7 #include "CopiedSpaceInlines.h"
10 #include "JSDestructibleObject.h"
14 #include "Operations.h"
15 #include <wtf/StackStats.h>
19 SlotVisitor::SlotVisitor(GCThreadSharedData
& shared
)
20 : m_stack(shared
.m_vm
->heap
.blockAllocator())
22 , m_isInParallelMode(false)
24 , m_shouldHashCons(false)
26 , m_isCheckingForDefaultMarkViolation(false)
32 SlotVisitor::~SlotVisitor()
34 ASSERT(m_stack
.isEmpty());
37 void SlotVisitor::setup()
39 m_shared
.m_shouldHashCons
= m_shared
.m_vm
->haveEnoughNewStringsToHashCons();
40 m_shouldHashCons
= m_shared
.m_shouldHashCons
;
41 #if ENABLE(PARALLEL_GC)
42 for (unsigned i
= 0; i
< m_shared
.m_gcThreads
.size(); ++i
)
43 m_shared
.m_gcThreads
[i
]->slotVisitor()->m_shouldHashCons
= m_shared
.m_shouldHashCons
;
47 void SlotVisitor::reset()
50 ASSERT(m_stack
.isEmpty());
51 #if ENABLE(PARALLEL_GC)
52 ASSERT(m_opaqueRoots
.isEmpty()); // Should have merged by now.
54 m_opaqueRoots
.clear();
56 if (m_shouldHashCons
) {
57 m_uniqueStrings
.clear();
58 m_shouldHashCons
= false;
62 void SlotVisitor::append(ConservativeRoots
& conservativeRoots
)
65 JSCell
** roots
= conservativeRoots
.roots();
66 size_t size
= conservativeRoots
.size();
67 for (size_t i
= 0; i
< size
; ++i
)
68 internalAppend(roots
[i
]);
71 ALWAYS_INLINE
static void visitChildren(SlotVisitor
& visitor
, const JSCell
* cell
)
74 #if ENABLE(SIMPLE_HEAP_PROFILING)
75 m_visitedTypeCounts
.count(cell
);
78 ASSERT(Heap::isMarked(cell
));
80 if (isJSString(cell
)) {
81 JSString::visitChildren(const_cast<JSCell
*>(cell
), visitor
);
85 if (isJSFinalObject(cell
)) {
86 JSFinalObject::visitChildren(const_cast<JSCell
*>(cell
), visitor
);
90 if (isJSArray(cell
)) {
91 JSArray::visitChildren(const_cast<JSCell
*>(cell
), visitor
);
95 cell
->methodTable()->visitChildren(const_cast<JSCell
*>(cell
), visitor
);
98 void SlotVisitor::donateKnownParallel()
101 // NOTE: Because we re-try often, we can afford to be conservative, and
102 // assume that donating is not profitable.
104 // Avoid locking when a thread reaches a dead end in the object graph.
105 if (m_stack
.size() < 2)
108 // If there's already some shared work queued up, be conservative and assume
109 // that donating more is not profitable.
110 if (m_shared
.m_sharedMarkStack
.size())
113 // If we're contending on the lock, be conservative and assume that another
114 // thread is already donating.
115 MutexTryLocker
locker(m_shared
.m_markingLock
);
116 if (!locker
.locked())
119 // Otherwise, assume that a thread will go idle soon, and donate.
120 m_stack
.donateSomeCellsTo(m_shared
.m_sharedMarkStack
);
122 if (m_shared
.m_numberOfActiveParallelMarkers
< Options::numberOfGCMarkers())
123 m_shared
.m_markingCondition
.broadcast();
126 void SlotVisitor::drain()
129 ASSERT(m_isInParallelMode
);
131 #if ENABLE(PARALLEL_GC)
132 if (Options::numberOfGCMarkers() > 1) {
133 while (!m_stack
.isEmpty()) {
135 for (unsigned countdown
= Options::minimumNumberOfScansBetweenRebalance(); m_stack
.canRemoveLast() && countdown
--;)
136 visitChildren(*this, m_stack
.removeLast());
137 donateKnownParallel();
140 mergeOpaqueRootsIfNecessary();
145 while (!m_stack
.isEmpty()) {
147 while (m_stack
.canRemoveLast())
148 visitChildren(*this, m_stack
.removeLast());
152 void SlotVisitor::drainFromShared(SharedDrainMode sharedDrainMode
)
155 ASSERT(m_isInParallelMode
);
157 ASSERT(Options::numberOfGCMarkers());
159 bool shouldBeParallel
;
161 #if ENABLE(PARALLEL_GC)
162 shouldBeParallel
= Options::numberOfGCMarkers() > 1;
164 ASSERT(Options::numberOfGCMarkers() == 1);
165 shouldBeParallel
= false;
168 if (!shouldBeParallel
) {
169 // This call should be a no-op.
170 ASSERT_UNUSED(sharedDrainMode
, sharedDrainMode
== MasterDrain
);
171 ASSERT(m_stack
.isEmpty());
172 ASSERT(m_shared
.m_sharedMarkStack
.isEmpty());
176 #if ENABLE(PARALLEL_GC)
178 MutexLocker
locker(m_shared
.m_markingLock
);
179 m_shared
.m_numberOfActiveParallelMarkers
++;
183 MutexLocker
locker(m_shared
.m_markingLock
);
184 m_shared
.m_numberOfActiveParallelMarkers
--;
186 // How we wait differs depending on drain mode.
187 if (sharedDrainMode
== MasterDrain
) {
188 // Wait until either termination is reached, or until there is some work
191 // Did we reach termination?
192 if (!m_shared
.m_numberOfActiveParallelMarkers
&& m_shared
.m_sharedMarkStack
.isEmpty()) {
193 // Let any sleeping slaves know it's time for them to return;
194 m_shared
.m_markingCondition
.broadcast();
198 // Is there work to be done?
199 if (!m_shared
.m_sharedMarkStack
.isEmpty())
203 m_shared
.m_markingCondition
.wait(m_shared
.m_markingLock
);
206 ASSERT(sharedDrainMode
== SlaveDrain
);
208 // Did we detect termination? If so, let the master know.
209 if (!m_shared
.m_numberOfActiveParallelMarkers
&& m_shared
.m_sharedMarkStack
.isEmpty())
210 m_shared
.m_markingCondition
.broadcast();
212 while (m_shared
.m_sharedMarkStack
.isEmpty() && !m_shared
.m_parallelMarkersShouldExit
)
213 m_shared
.m_markingCondition
.wait(m_shared
.m_markingLock
);
215 // Is the current phase done? If so, return from this function.
216 if (m_shared
.m_parallelMarkersShouldExit
)
220 size_t idleThreadCount
= Options::numberOfGCMarkers() - m_shared
.m_numberOfActiveParallelMarkers
;
221 m_stack
.stealSomeCellsFrom(m_shared
.m_sharedMarkStack
, idleThreadCount
);
222 m_shared
.m_numberOfActiveParallelMarkers
++;
230 void SlotVisitor::mergeOpaqueRoots()
233 ASSERT(!m_opaqueRoots
.isEmpty()); // Should only be called when opaque roots are non-empty.
235 MutexLocker
locker(m_shared
.m_opaqueRootsLock
);
236 HashSet
<void*>::iterator begin
= m_opaqueRoots
.begin();
237 HashSet
<void*>::iterator end
= m_opaqueRoots
.end();
238 for (HashSet
<void*>::iterator iter
= begin
; iter
!= end
; ++iter
)
239 m_shared
.m_opaqueRoots
.add(*iter
);
241 m_opaqueRoots
.clear();
244 ALWAYS_INLINE
bool JSString::tryHashConsLock()
246 #if ENABLE(PARALLEL_GC)
247 unsigned currentFlags
= m_flags
;
249 if (currentFlags
& HashConsLock
)
252 unsigned newFlags
= currentFlags
| HashConsLock
;
254 if (!WTF::weakCompareAndSwap(&m_flags
, currentFlags
, newFlags
))
257 WTF::memoryBarrierAfterLock();
260 if (isHashConsSingleton())
263 m_flags
|= HashConsLock
;
269 ALWAYS_INLINE
void JSString::releaseHashConsLock()
271 #if ENABLE(PARALLEL_GC)
272 WTF::memoryBarrierBeforeUnlock();
274 m_flags
&= ~HashConsLock
;
277 ALWAYS_INLINE
bool JSString::shouldTryHashCons()
279 return ((length() > 1) && !isRope() && !isHashConsSingleton());
282 ALWAYS_INLINE
void SlotVisitor::internalAppend(JSValue
* slot
)
284 // This internalAppend is only intended for visits to object and array backing stores.
285 // as it can change the JSValue pointed to be the argument when the original JSValue
286 // is a string that contains the same contents as another string.
290 JSValue value
= *slot
;
295 JSCell
* cell
= value
.asCell();
301 if (m_shouldHashCons
&& cell
->isString()) {
302 JSString
* string
= jsCast
<JSString
*>(cell
);
303 if (string
->shouldTryHashCons() && string
->tryHashConsLock()) {
304 UniqueStringMap::AddResult addResult
= m_uniqueStrings
.add(string
->string().impl(), value
);
305 if (addResult
.isNewEntry
)
306 string
->setHashConsSingleton();
308 JSValue existingJSValue
= addResult
.iterator
->value
;
309 if (value
!= existingJSValue
)
310 jsCast
<JSString
*>(existingJSValue
.asCell())->clearHashConsSingleton();
311 *slot
= existingJSValue
;
312 string
->releaseHashConsLock();
315 string
->releaseHashConsLock();
319 internalAppend(cell
);
322 void SlotVisitor::harvestWeakReferences()
325 for (WeakReferenceHarvester
* current
= m_shared
.m_weakReferenceHarvesters
.head(); current
; current
= current
->next())
326 current
->visitWeakReferences(*this);
329 void SlotVisitor::finalizeUnconditionalFinalizers()
332 while (m_shared
.m_unconditionalFinalizers
.hasNext())
333 m_shared
.m_unconditionalFinalizers
.removeNext()->finalizeUnconditionally();
336 #if ENABLE(GC_VALIDATION)
337 void SlotVisitor::validate(JSCell
* cell
)
339 RELEASE_ASSERT(cell
);
341 if (!cell
->structure()) {
342 dataLogF("cell at %p has a null structure\n" , cell
);
346 // Both the cell's structure, and the cell's structure's structure should be the Structure Structure.
347 // I hate this sentence.
348 if (cell
->structure()->structure()->JSCell::classInfo() != cell
->structure()->JSCell::classInfo()) {
349 const char* parentClassName
= 0;
350 const char* ourClassName
= 0;
351 if (cell
->structure()->structure() && cell
->structure()->structure()->JSCell::classInfo())
352 parentClassName
= cell
->structure()->structure()->JSCell::classInfo()->className
;
353 if (cell
->structure()->JSCell::classInfo())
354 ourClassName
= cell
->structure()->JSCell::classInfo()->className
;
355 dataLogF("parent structure (%p <%s>) of cell at %p doesn't match cell's structure (%p <%s>)\n",
356 cell
->structure()->structure(), parentClassName
, cell
, cell
->structure(), ourClassName
);
360 // Make sure we can walk the ClassInfo chain
361 const ClassInfo
* info
= cell
->classInfo();
362 do { } while ((info
= info
->parentClass
));
365 void SlotVisitor::validate(JSCell
*)