]> git.saurik.com Git - apple/javascriptcore.git/blame_incremental - heap/SlotVisitor.cpp
JavaScriptCore-7600.1.4.17.5.tar.gz
[apple/javascriptcore.git] / heap / SlotVisitor.cpp
... / ...
CommitLineData
1#include "config.h"
2#include "SlotVisitor.h"
3#include "SlotVisitorInlines.h"
4
5#include "ConservativeRoots.h"
6#include "CopiedSpace.h"
7#include "CopiedSpaceInlines.h"
8#include "GCThread.h"
9#include "JSArray.h"
10#include "JSDestructibleObject.h"
11#include "VM.h"
12#include "JSObject.h"
13#include "JSString.h"
14#include "JSCInlines.h"
15#include <wtf/StackStats.h>
16
17namespace JSC {
18
19SlotVisitor::SlotVisitor(GCThreadSharedData& shared)
20 : m_stack(shared.m_vm->heap.blockAllocator())
21 , m_bytesVisited(0)
22 , m_bytesCopied(0)
23 , m_visitCount(0)
24 , m_isInParallelMode(false)
25 , m_shared(shared)
26 , m_shouldHashCons(false)
27#if !ASSERT_DISABLED
28 , m_isCheckingForDefaultMarkViolation(false)
29 , m_isDraining(false)
30#endif
31{
32}
33
34SlotVisitor::~SlotVisitor()
35{
36 clearMarkStack();
37}
38
39void SlotVisitor::didStartMarking()
40{
41 if (heap()->operationInProgress() == FullCollection) {
42#if ENABLE(PARALLEL_GC)
43 ASSERT(m_opaqueRoots.isEmpty()); // Should have merged by now.
44#else
45 m_opaqueRoots.clear();
46#endif
47 }
48
49 m_shared.m_shouldHashCons = m_shared.m_vm->haveEnoughNewStringsToHashCons();
50 m_shouldHashCons = m_shared.m_shouldHashCons;
51#if ENABLE(PARALLEL_GC)
52 for (unsigned i = 0; i < m_shared.m_gcThreads.size(); ++i)
53 m_shared.m_gcThreads[i]->slotVisitor()->m_shouldHashCons = m_shared.m_shouldHashCons;
54#endif
55}
56
57void SlotVisitor::reset()
58{
59 m_bytesVisited = 0;
60 m_bytesCopied = 0;
61 m_visitCount = 0;
62 ASSERT(m_stack.isEmpty());
63 if (m_shouldHashCons) {
64 m_uniqueStrings.clear();
65 m_shouldHashCons = false;
66 }
67}
68
69void SlotVisitor::clearMarkStack()
70{
71 m_stack.clear();
72}
73
74void SlotVisitor::append(ConservativeRoots& conservativeRoots)
75{
76 StackStats::probe();
77 JSCell** roots = conservativeRoots.roots();
78 size_t size = conservativeRoots.size();
79 for (size_t i = 0; i < size; ++i)
80 internalAppend(0, roots[i]);
81}
82
83ALWAYS_INLINE static void visitChildren(SlotVisitor& visitor, const JSCell* cell)
84{
85 StackStats::probe();
86
87 ASSERT(Heap::isMarked(cell));
88
89 if (isJSString(cell)) {
90 JSString::visitChildren(const_cast<JSCell*>(cell), visitor);
91 return;
92 }
93
94 if (isJSFinalObject(cell)) {
95 JSFinalObject::visitChildren(const_cast<JSCell*>(cell), visitor);
96 return;
97 }
98
99 if (isJSArray(cell)) {
100 JSArray::visitChildren(const_cast<JSCell*>(cell), visitor);
101 return;
102 }
103
104 cell->methodTable()->visitChildren(const_cast<JSCell*>(cell), visitor);
105}
106
107void SlotVisitor::donateKnownParallel()
108{
109 StackStats::probe();
110 // NOTE: Because we re-try often, we can afford to be conservative, and
111 // assume that donating is not profitable.
112
113 // Avoid locking when a thread reaches a dead end in the object graph.
114 if (m_stack.size() < 2)
115 return;
116
117 // If there's already some shared work queued up, be conservative and assume
118 // that donating more is not profitable.
119 if (m_shared.m_sharedMarkStack.size())
120 return;
121
122 // If we're contending on the lock, be conservative and assume that another
123 // thread is already donating.
124 std::unique_lock<std::mutex> lock(m_shared.m_markingMutex, std::try_to_lock);
125 if (!lock.owns_lock())
126 return;
127
128 // Otherwise, assume that a thread will go idle soon, and donate.
129 m_stack.donateSomeCellsTo(m_shared.m_sharedMarkStack);
130
131 if (m_shared.m_numberOfActiveParallelMarkers < Options::numberOfGCMarkers())
132 m_shared.m_markingConditionVariable.notify_all();
133}
134
135void SlotVisitor::drain()
136{
137 StackStats::probe();
138 ASSERT(m_isInParallelMode);
139
140#if ENABLE(PARALLEL_GC)
141 if (Options::numberOfGCMarkers() > 1) {
142 while (!m_stack.isEmpty()) {
143 m_stack.refill();
144 for (unsigned countdown = Options::minimumNumberOfScansBetweenRebalance(); m_stack.canRemoveLast() && countdown--;)
145 visitChildren(*this, m_stack.removeLast());
146 donateKnownParallel();
147 }
148
149 mergeOpaqueRootsIfNecessary();
150 return;
151 }
152#endif
153
154 while (!m_stack.isEmpty()) {
155 m_stack.refill();
156 while (m_stack.canRemoveLast())
157 visitChildren(*this, m_stack.removeLast());
158 }
159}
160
161void SlotVisitor::drainFromShared(SharedDrainMode sharedDrainMode)
162{
163 StackStats::probe();
164 ASSERT(m_isInParallelMode);
165
166 ASSERT(Options::numberOfGCMarkers());
167
168 bool shouldBeParallel;
169
170#if ENABLE(PARALLEL_GC)
171 shouldBeParallel = Options::numberOfGCMarkers() > 1;
172#else
173 ASSERT(Options::numberOfGCMarkers() == 1);
174 shouldBeParallel = false;
175#endif
176
177 if (!shouldBeParallel) {
178 // This call should be a no-op.
179 ASSERT_UNUSED(sharedDrainMode, sharedDrainMode == MasterDrain);
180 ASSERT(m_stack.isEmpty());
181 ASSERT(m_shared.m_sharedMarkStack.isEmpty());
182 return;
183 }
184
185#if ENABLE(PARALLEL_GC)
186 {
187 std::lock_guard<std::mutex> lock(m_shared.m_markingMutex);
188 m_shared.m_numberOfActiveParallelMarkers++;
189 }
190 while (true) {
191 {
192 std::unique_lock<std::mutex> lock(m_shared.m_markingMutex);
193 m_shared.m_numberOfActiveParallelMarkers--;
194
195 // How we wait differs depending on drain mode.
196 if (sharedDrainMode == MasterDrain) {
197 // Wait until either termination is reached, or until there is some work
198 // for us to do.
199 while (true) {
200 // Did we reach termination?
201 if (!m_shared.m_numberOfActiveParallelMarkers && m_shared.m_sharedMarkStack.isEmpty()) {
202 // Let any sleeping slaves know it's time for them to return;
203 m_shared.m_markingConditionVariable.notify_all();
204 return;
205 }
206
207 // Is there work to be done?
208 if (!m_shared.m_sharedMarkStack.isEmpty())
209 break;
210
211 // Otherwise wait.
212 m_shared.m_markingConditionVariable.wait(lock);
213 }
214 } else {
215 ASSERT(sharedDrainMode == SlaveDrain);
216
217 // Did we detect termination? If so, let the master know.
218 if (!m_shared.m_numberOfActiveParallelMarkers && m_shared.m_sharedMarkStack.isEmpty())
219 m_shared.m_markingConditionVariable.notify_all();
220
221 m_shared.m_markingConditionVariable.wait(lock, [this] { return !m_shared.m_sharedMarkStack.isEmpty() || m_shared.m_parallelMarkersShouldExit; });
222
223 // Is the current phase done? If so, return from this function.
224 if (m_shared.m_parallelMarkersShouldExit)
225 return;
226 }
227
228 size_t idleThreadCount = Options::numberOfGCMarkers() - m_shared.m_numberOfActiveParallelMarkers;
229 m_stack.stealSomeCellsFrom(m_shared.m_sharedMarkStack, idleThreadCount);
230 m_shared.m_numberOfActiveParallelMarkers++;
231 }
232
233 drain();
234 }
235#endif
236}
237
238void SlotVisitor::mergeOpaqueRoots()
239{
240 StackStats::probe();
241 ASSERT(!m_opaqueRoots.isEmpty()); // Should only be called when opaque roots are non-empty.
242 {
243 std::lock_guard<std::mutex> lock(m_shared.m_opaqueRootsMutex);
244 for (auto* root : m_opaqueRoots)
245 m_shared.m_opaqueRoots.add(root);
246 }
247 m_opaqueRoots.clear();
248}
249
250ALWAYS_INLINE bool JSString::tryHashConsLock()
251{
252#if ENABLE(PARALLEL_GC)
253 unsigned currentFlags = m_flags;
254
255 if (currentFlags & HashConsLock)
256 return false;
257
258 unsigned newFlags = currentFlags | HashConsLock;
259
260 if (!WTF::weakCompareAndSwap(&m_flags, currentFlags, newFlags))
261 return false;
262
263 WTF::memoryBarrierAfterLock();
264 return true;
265#else
266 if (isHashConsSingleton())
267 return false;
268
269 m_flags |= HashConsLock;
270
271 return true;
272#endif
273}
274
275ALWAYS_INLINE void JSString::releaseHashConsLock()
276{
277#if ENABLE(PARALLEL_GC)
278 WTF::memoryBarrierBeforeUnlock();
279#endif
280 m_flags &= ~HashConsLock;
281}
282
283ALWAYS_INLINE bool JSString::shouldTryHashCons()
284{
285 return ((length() > 1) && !isRope() && !isHashConsSingleton());
286}
287
288ALWAYS_INLINE void SlotVisitor::internalAppend(void* from, JSValue* slot)
289{
290 // This internalAppend is only intended for visits to object and array backing stores.
291 // as it can change the JSValue pointed to be the argument when the original JSValue
292 // is a string that contains the same contents as another string.
293
294 StackStats::probe();
295 ASSERT(slot);
296 JSValue value = *slot;
297 ASSERT(value);
298 if (!value.isCell())
299 return;
300
301 JSCell* cell = value.asCell();
302 if (!cell)
303 return;
304
305 validate(cell);
306
307 if (m_shouldHashCons && cell->isString()) {
308 JSString* string = jsCast<JSString*>(cell);
309 if (string->shouldTryHashCons() && string->tryHashConsLock()) {
310 UniqueStringMap::AddResult addResult = m_uniqueStrings.add(string->string().impl(), value);
311 if (addResult.isNewEntry)
312 string->setHashConsSingleton();
313 else {
314 JSValue existingJSValue = addResult.iterator->value;
315 if (value != existingJSValue)
316 jsCast<JSString*>(existingJSValue.asCell())->clearHashConsSingleton();
317 *slot = existingJSValue;
318 string->releaseHashConsLock();
319 return;
320 }
321 string->releaseHashConsLock();
322 }
323 }
324
325 internalAppend(from, cell);
326}
327
328void SlotVisitor::harvestWeakReferences()
329{
330 StackStats::probe();
331 for (WeakReferenceHarvester* current = m_shared.m_weakReferenceHarvesters.head(); current; current = current->next())
332 current->visitWeakReferences(*this);
333}
334
335void SlotVisitor::finalizeUnconditionalFinalizers()
336{
337 StackStats::probe();
338 while (m_shared.m_unconditionalFinalizers.hasNext())
339 m_shared.m_unconditionalFinalizers.removeNext()->finalizeUnconditionally();
340}
341
342#if ENABLE(GC_VALIDATION)
343void SlotVisitor::validate(JSCell* cell)
344{
345 RELEASE_ASSERT(cell);
346
347 if (!cell->structure()) {
348 dataLogF("cell at %p has a null structure\n" , cell);
349 CRASH();
350 }
351
352 // Both the cell's structure, and the cell's structure's structure should be the Structure Structure.
353 // I hate this sentence.
354 if (cell->structure()->structure()->JSCell::classInfo() != cell->structure()->JSCell::classInfo()) {
355 const char* parentClassName = 0;
356 const char* ourClassName = 0;
357 if (cell->structure()->structure() && cell->structure()->structure()->JSCell::classInfo())
358 parentClassName = cell->structure()->structure()->JSCell::classInfo()->className;
359 if (cell->structure()->JSCell::classInfo())
360 ourClassName = cell->structure()->JSCell::classInfo()->className;
361 dataLogF("parent structure (%p <%s>) of cell at %p doesn't match cell's structure (%p <%s>)\n",
362 cell->structure()->structure(), parentClassName, cell, cell->structure(), ourClassName);
363 CRASH();
364 }
365
366 // Make sure we can walk the ClassInfo chain
367 const ClassInfo* info = cell->classInfo();
368 do { } while ((info = info->parentClass));
369}
370#else
371void SlotVisitor::validate(JSCell*)
372{
373}
374#endif
375
376void SlotVisitor::dump(PrintStream&) const
377{
378 for (const JSCell* cell : markStack())
379 dataLog(*cell, "\n");
380}
381
382} // namespace JSC