]> git.saurik.com Git - apple/javascriptcore.git/blob - heap/SlotVisitor.cpp
JavaScriptCore-1218.33.tar.gz
[apple/javascriptcore.git] / heap / SlotVisitor.cpp
1 #include "config.h"
2 #include "SlotVisitor.h"
3 #include "SlotVisitorInlines.h"
4
5 #include "ConservativeRoots.h"
6 #include "CopiedSpace.h"
7 #include "CopiedSpaceInlines.h"
8 #include "GCThread.h"
9 #include "JSArray.h"
10 #include "JSDestructibleObject.h"
11 #include "VM.h"
12 #include "JSObject.h"
13 #include "JSString.h"
14 #include "Operations.h"
15 #include <wtf/StackStats.h>
16
17 namespace JSC {
18
19 SlotVisitor::SlotVisitor(GCThreadSharedData& shared)
20 : m_stack(shared.m_vm->heap.blockAllocator())
21 , m_visitCount(0)
22 , m_isInParallelMode(false)
23 , m_shared(shared)
24 , m_shouldHashCons(false)
25 #if !ASSERT_DISABLED
26 , m_isCheckingForDefaultMarkViolation(false)
27 , m_isDraining(false)
28 #endif
29 {
30 }
31
32 SlotVisitor::~SlotVisitor()
33 {
34 ASSERT(m_stack.isEmpty());
35 }
36
37 void SlotVisitor::setup()
38 {
39 m_shared.m_shouldHashCons = m_shared.m_vm->haveEnoughNewStringsToHashCons();
40 m_shouldHashCons = m_shared.m_shouldHashCons;
41 #if ENABLE(PARALLEL_GC)
42 for (unsigned i = 0; i < m_shared.m_gcThreads.size(); ++i)
43 m_shared.m_gcThreads[i]->slotVisitor()->m_shouldHashCons = m_shared.m_shouldHashCons;
44 #endif
45 }
46
47 void SlotVisitor::reset()
48 {
49 m_visitCount = 0;
50 ASSERT(m_stack.isEmpty());
51 #if ENABLE(PARALLEL_GC)
52 ASSERT(m_opaqueRoots.isEmpty()); // Should have merged by now.
53 #else
54 m_opaqueRoots.clear();
55 #endif
56 if (m_shouldHashCons) {
57 m_uniqueStrings.clear();
58 m_shouldHashCons = false;
59 }
60 }
61
62 void SlotVisitor::append(ConservativeRoots& conservativeRoots)
63 {
64 StackStats::probe();
65 JSCell** roots = conservativeRoots.roots();
66 size_t size = conservativeRoots.size();
67 for (size_t i = 0; i < size; ++i)
68 internalAppend(roots[i]);
69 }
70
71 ALWAYS_INLINE static void visitChildren(SlotVisitor& visitor, const JSCell* cell)
72 {
73 StackStats::probe();
74 #if ENABLE(SIMPLE_HEAP_PROFILING)
75 m_visitedTypeCounts.count(cell);
76 #endif
77
78 ASSERT(Heap::isMarked(cell));
79
80 if (isJSString(cell)) {
81 JSString::visitChildren(const_cast<JSCell*>(cell), visitor);
82 return;
83 }
84
85 if (isJSFinalObject(cell)) {
86 JSFinalObject::visitChildren(const_cast<JSCell*>(cell), visitor);
87 return;
88 }
89
90 if (isJSArray(cell)) {
91 JSArray::visitChildren(const_cast<JSCell*>(cell), visitor);
92 return;
93 }
94
95 cell->methodTable()->visitChildren(const_cast<JSCell*>(cell), visitor);
96 }
97
98 void SlotVisitor::donateKnownParallel()
99 {
100 StackStats::probe();
101 // NOTE: Because we re-try often, we can afford to be conservative, and
102 // assume that donating is not profitable.
103
104 // Avoid locking when a thread reaches a dead end in the object graph.
105 if (m_stack.size() < 2)
106 return;
107
108 // If there's already some shared work queued up, be conservative and assume
109 // that donating more is not profitable.
110 if (m_shared.m_sharedMarkStack.size())
111 return;
112
113 // If we're contending on the lock, be conservative and assume that another
114 // thread is already donating.
115 MutexTryLocker locker(m_shared.m_markingLock);
116 if (!locker.locked())
117 return;
118
119 // Otherwise, assume that a thread will go idle soon, and donate.
120 m_stack.donateSomeCellsTo(m_shared.m_sharedMarkStack);
121
122 if (m_shared.m_numberOfActiveParallelMarkers < Options::numberOfGCMarkers())
123 m_shared.m_markingCondition.broadcast();
124 }
125
126 void SlotVisitor::drain()
127 {
128 StackStats::probe();
129 ASSERT(m_isInParallelMode);
130
131 #if ENABLE(PARALLEL_GC)
132 if (Options::numberOfGCMarkers() > 1) {
133 while (!m_stack.isEmpty()) {
134 m_stack.refill();
135 for (unsigned countdown = Options::minimumNumberOfScansBetweenRebalance(); m_stack.canRemoveLast() && countdown--;)
136 visitChildren(*this, m_stack.removeLast());
137 donateKnownParallel();
138 }
139
140 mergeOpaqueRootsIfNecessary();
141 return;
142 }
143 #endif
144
145 while (!m_stack.isEmpty()) {
146 m_stack.refill();
147 while (m_stack.canRemoveLast())
148 visitChildren(*this, m_stack.removeLast());
149 }
150 }
151
152 void SlotVisitor::drainFromShared(SharedDrainMode sharedDrainMode)
153 {
154 StackStats::probe();
155 ASSERT(m_isInParallelMode);
156
157 ASSERT(Options::numberOfGCMarkers());
158
159 bool shouldBeParallel;
160
161 #if ENABLE(PARALLEL_GC)
162 shouldBeParallel = Options::numberOfGCMarkers() > 1;
163 #else
164 ASSERT(Options::numberOfGCMarkers() == 1);
165 shouldBeParallel = false;
166 #endif
167
168 if (!shouldBeParallel) {
169 // This call should be a no-op.
170 ASSERT_UNUSED(sharedDrainMode, sharedDrainMode == MasterDrain);
171 ASSERT(m_stack.isEmpty());
172 ASSERT(m_shared.m_sharedMarkStack.isEmpty());
173 return;
174 }
175
176 #if ENABLE(PARALLEL_GC)
177 {
178 MutexLocker locker(m_shared.m_markingLock);
179 m_shared.m_numberOfActiveParallelMarkers++;
180 }
181 while (true) {
182 {
183 MutexLocker locker(m_shared.m_markingLock);
184 m_shared.m_numberOfActiveParallelMarkers--;
185
186 // How we wait differs depending on drain mode.
187 if (sharedDrainMode == MasterDrain) {
188 // Wait until either termination is reached, or until there is some work
189 // for us to do.
190 while (true) {
191 // Did we reach termination?
192 if (!m_shared.m_numberOfActiveParallelMarkers && m_shared.m_sharedMarkStack.isEmpty()) {
193 // Let any sleeping slaves know it's time for them to return;
194 m_shared.m_markingCondition.broadcast();
195 return;
196 }
197
198 // Is there work to be done?
199 if (!m_shared.m_sharedMarkStack.isEmpty())
200 break;
201
202 // Otherwise wait.
203 m_shared.m_markingCondition.wait(m_shared.m_markingLock);
204 }
205 } else {
206 ASSERT(sharedDrainMode == SlaveDrain);
207
208 // Did we detect termination? If so, let the master know.
209 if (!m_shared.m_numberOfActiveParallelMarkers && m_shared.m_sharedMarkStack.isEmpty())
210 m_shared.m_markingCondition.broadcast();
211
212 while (m_shared.m_sharedMarkStack.isEmpty() && !m_shared.m_parallelMarkersShouldExit)
213 m_shared.m_markingCondition.wait(m_shared.m_markingLock);
214
215 // Is the current phase done? If so, return from this function.
216 if (m_shared.m_parallelMarkersShouldExit)
217 return;
218 }
219
220 size_t idleThreadCount = Options::numberOfGCMarkers() - m_shared.m_numberOfActiveParallelMarkers;
221 m_stack.stealSomeCellsFrom(m_shared.m_sharedMarkStack, idleThreadCount);
222 m_shared.m_numberOfActiveParallelMarkers++;
223 }
224
225 drain();
226 }
227 #endif
228 }
229
230 void SlotVisitor::mergeOpaqueRoots()
231 {
232 StackStats::probe();
233 ASSERT(!m_opaqueRoots.isEmpty()); // Should only be called when opaque roots are non-empty.
234 {
235 MutexLocker locker(m_shared.m_opaqueRootsLock);
236 HashSet<void*>::iterator begin = m_opaqueRoots.begin();
237 HashSet<void*>::iterator end = m_opaqueRoots.end();
238 for (HashSet<void*>::iterator iter = begin; iter != end; ++iter)
239 m_shared.m_opaqueRoots.add(*iter);
240 }
241 m_opaqueRoots.clear();
242 }
243
244 ALWAYS_INLINE bool JSString::tryHashConsLock()
245 {
246 #if ENABLE(PARALLEL_GC)
247 unsigned currentFlags = m_flags;
248
249 if (currentFlags & HashConsLock)
250 return false;
251
252 unsigned newFlags = currentFlags | HashConsLock;
253
254 if (!WTF::weakCompareAndSwap(&m_flags, currentFlags, newFlags))
255 return false;
256
257 WTF::memoryBarrierAfterLock();
258 return true;
259 #else
260 if (isHashConsSingleton())
261 return false;
262
263 m_flags |= HashConsLock;
264
265 return true;
266 #endif
267 }
268
269 ALWAYS_INLINE void JSString::releaseHashConsLock()
270 {
271 #if ENABLE(PARALLEL_GC)
272 WTF::memoryBarrierBeforeUnlock();
273 #endif
274 m_flags &= ~HashConsLock;
275 }
276
277 ALWAYS_INLINE bool JSString::shouldTryHashCons()
278 {
279 return ((length() > 1) && !isRope() && !isHashConsSingleton());
280 }
281
282 ALWAYS_INLINE void SlotVisitor::internalAppend(JSValue* slot)
283 {
284 // This internalAppend is only intended for visits to object and array backing stores.
285 // as it can change the JSValue pointed to be the argument when the original JSValue
286 // is a string that contains the same contents as another string.
287
288 StackStats::probe();
289 ASSERT(slot);
290 JSValue value = *slot;
291 ASSERT(value);
292 if (!value.isCell())
293 return;
294
295 JSCell* cell = value.asCell();
296 if (!cell)
297 return;
298
299 validate(cell);
300
301 if (m_shouldHashCons && cell->isString()) {
302 JSString* string = jsCast<JSString*>(cell);
303 if (string->shouldTryHashCons() && string->tryHashConsLock()) {
304 UniqueStringMap::AddResult addResult = m_uniqueStrings.add(string->string().impl(), value);
305 if (addResult.isNewEntry)
306 string->setHashConsSingleton();
307 else {
308 JSValue existingJSValue = addResult.iterator->value;
309 if (value != existingJSValue)
310 jsCast<JSString*>(existingJSValue.asCell())->clearHashConsSingleton();
311 *slot = existingJSValue;
312 string->releaseHashConsLock();
313 return;
314 }
315 string->releaseHashConsLock();
316 }
317 }
318
319 internalAppend(cell);
320 }
321
322 void SlotVisitor::harvestWeakReferences()
323 {
324 StackStats::probe();
325 for (WeakReferenceHarvester* current = m_shared.m_weakReferenceHarvesters.head(); current; current = current->next())
326 current->visitWeakReferences(*this);
327 }
328
329 void SlotVisitor::finalizeUnconditionalFinalizers()
330 {
331 StackStats::probe();
332 while (m_shared.m_unconditionalFinalizers.hasNext())
333 m_shared.m_unconditionalFinalizers.removeNext()->finalizeUnconditionally();
334 }
335
336 #if ENABLE(GC_VALIDATION)
337 void SlotVisitor::validate(JSCell* cell)
338 {
339 RELEASE_ASSERT(cell);
340
341 if (!cell->structure()) {
342 dataLogF("cell at %p has a null structure\n" , cell);
343 CRASH();
344 }
345
346 // Both the cell's structure, and the cell's structure's structure should be the Structure Structure.
347 // I hate this sentence.
348 if (cell->structure()->structure()->JSCell::classInfo() != cell->structure()->JSCell::classInfo()) {
349 const char* parentClassName = 0;
350 const char* ourClassName = 0;
351 if (cell->structure()->structure() && cell->structure()->structure()->JSCell::classInfo())
352 parentClassName = cell->structure()->structure()->JSCell::classInfo()->className;
353 if (cell->structure()->JSCell::classInfo())
354 ourClassName = cell->structure()->JSCell::classInfo()->className;
355 dataLogF("parent structure (%p <%s>) of cell at %p doesn't match cell's structure (%p <%s>)\n",
356 cell->structure()->structure(), parentClassName, cell, cell->structure(), ourClassName);
357 CRASH();
358 }
359
360 // Make sure we can walk the ClassInfo chain
361 const ClassInfo* info = cell->classInfo();
362 do { } while ((info = info->parentClass));
363 }
364 #else
365 void SlotVisitor::validate(JSCell*)
366 {
367 }
368 #endif
369
370 } // namespace JSC