]> git.saurik.com Git - apple/javascriptcore.git/blame - dfg/DFGWorklist.cpp
JavaScriptCore-7601.1.46.3.tar.gz
[apple/javascriptcore.git] / dfg / DFGWorklist.cpp
CommitLineData
81345200
A
1/*
2 * Copyright (C) 2013, 2014 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#include "config.h"
27#include "DFGWorklist.h"
28
29#if ENABLE(DFG_JIT)
30
31#include "CodeBlock.h"
32#include "DeferGC.h"
33#include "DFGLongLivedState.h"
34#include "DFGSafepoint.h"
35#include "JSCInlines.h"
36#include <mutex>
37
38namespace JSC { namespace DFG {
39
40Worklist::Worklist(CString worklistName)
41 : m_threadName(toCString(worklistName, " Worker Thread"))
42 , m_numberOfActiveThreads(0)
43{
44}
45
46Worklist::~Worklist()
47{
48 {
49 MutexLocker locker(m_lock);
50 for (unsigned i = m_threads.size(); i--;)
51 m_queue.append(nullptr); // Use null plan to indicate that we want the thread to terminate.
52 m_planEnqueued.broadcast();
53 }
54 for (unsigned i = m_threads.size(); i--;)
55 waitForThreadCompletion(m_threads[i]->m_identifier);
56 ASSERT(!m_numberOfActiveThreads);
57}
58
59void Worklist::finishCreation(unsigned numberOfThreads, int relativePriority)
60{
61 RELEASE_ASSERT(numberOfThreads);
62 for (unsigned i = numberOfThreads; i--;) {
63 std::unique_ptr<ThreadData> data = std::make_unique<ThreadData>(this);
64 data->m_identifier = createThread(threadFunction, data.get(), m_threadName.data());
65 if (relativePriority)
66 changeThreadPriority(data->m_identifier, relativePriority);
67 m_threads.append(WTF::move(data));
68 }
69}
70
ed1e77d3 71Ref<Worklist> Worklist::create(CString worklistName, unsigned numberOfThreads, int relativePriority)
81345200 72{
ed1e77d3 73 Ref<Worklist> result = adoptRef(*new Worklist(worklistName));
81345200
A
74 result->finishCreation(numberOfThreads, relativePriority);
75 return result;
76}
77
78bool Worklist::isActiveForVM(VM& vm) const
79{
80 MutexLocker locker(m_lock);
81 PlanMap::const_iterator end = m_plans.end();
82 for (PlanMap::const_iterator iter = m_plans.begin(); iter != end; ++iter) {
83 if (&iter->value->vm == &vm)
84 return true;
85 }
86 return false;
87}
88
89void Worklist::enqueue(PassRefPtr<Plan> passedPlan)
90{
91 RefPtr<Plan> plan = passedPlan;
92 MutexLocker locker(m_lock);
93 if (Options::verboseCompilationQueue()) {
94 dump(locker, WTF::dataFile());
95 dataLog(": Enqueueing plan to optimize ", plan->key(), "\n");
96 }
97 ASSERT(m_plans.find(plan->key()) == m_plans.end());
98 m_plans.add(plan->key(), plan);
99 m_queue.append(plan);
100 m_planEnqueued.signal();
101}
102
103Worklist::State Worklist::compilationState(CompilationKey key)
104{
105 MutexLocker locker(m_lock);
106 PlanMap::iterator iter = m_plans.find(key);
107 if (iter == m_plans.end())
108 return NotKnown;
109 return iter->value->stage == Plan::Ready ? Compiled : Compiling;
110}
111
112void Worklist::waitUntilAllPlansForVMAreReady(VM& vm)
113{
114 DeferGC deferGC(vm.heap);
115 // Wait for all of the plans for the given VM to complete. The idea here
116 // is that we want all of the caller VM's plans to be done. We don't care
117 // about any other VM's plans, and we won't attempt to wait on those.
118 // After we release this lock, we know that although other VMs may still
119 // be adding plans, our VM will not be.
120
121 MutexLocker locker(m_lock);
122
123 if (Options::verboseCompilationQueue()) {
124 dump(locker, WTF::dataFile());
125 dataLog(": Waiting for all in VM to complete.\n");
126 }
127
128 for (;;) {
129 bool allAreCompiled = true;
130 PlanMap::iterator end = m_plans.end();
131 for (PlanMap::iterator iter = m_plans.begin(); iter != end; ++iter) {
132 if (&iter->value->vm != &vm)
133 continue;
134 if (iter->value->stage != Plan::Ready) {
135 allAreCompiled = false;
136 break;
137 }
138 }
139
140 if (allAreCompiled)
141 break;
142
143 m_planCompiled.wait(m_lock);
144 }
145}
146
147void Worklist::removeAllReadyPlansForVM(VM& vm, Vector<RefPtr<Plan>, 8>& myReadyPlans)
148{
149 DeferGC deferGC(vm.heap);
150 MutexLocker locker(m_lock);
151 for (size_t i = 0; i < m_readyPlans.size(); ++i) {
152 RefPtr<Plan> plan = m_readyPlans[i];
153 if (&plan->vm != &vm)
154 continue;
155 if (plan->stage != Plan::Ready)
156 continue;
157 myReadyPlans.append(plan);
158 m_readyPlans[i--] = m_readyPlans.last();
159 m_readyPlans.removeLast();
160 m_plans.remove(plan->key());
161 }
162}
163
164void Worklist::removeAllReadyPlansForVM(VM& vm)
165{
166 Vector<RefPtr<Plan>, 8> myReadyPlans;
167 removeAllReadyPlansForVM(vm, myReadyPlans);
168}
169
170Worklist::State Worklist::completeAllReadyPlansForVM(VM& vm, CompilationKey requestedKey)
171{
172 DeferGC deferGC(vm.heap);
173 Vector<RefPtr<Plan>, 8> myReadyPlans;
174
175 removeAllReadyPlansForVM(vm, myReadyPlans);
176
177 State resultingState = NotKnown;
178
179 while (!myReadyPlans.isEmpty()) {
180 RefPtr<Plan> plan = myReadyPlans.takeLast();
181 CompilationKey currentKey = plan->key();
182
183 if (Options::verboseCompilationQueue())
184 dataLog(*this, ": Completing ", currentKey, "\n");
185
186 RELEASE_ASSERT(plan->stage == Plan::Ready);
187
188 plan->finalizeAndNotifyCallback();
189
190 if (currentKey == requestedKey)
191 resultingState = Compiled;
192 }
193
194 if (!!requestedKey && resultingState == NotKnown) {
195 MutexLocker locker(m_lock);
196 if (m_plans.contains(requestedKey))
197 resultingState = Compiling;
198 }
199
200 return resultingState;
201}
202
203void Worklist::completeAllPlansForVM(VM& vm)
204{
205 DeferGC deferGC(vm.heap);
206 waitUntilAllPlansForVMAreReady(vm);
207 completeAllReadyPlansForVM(vm);
208}
209
210void Worklist::suspendAllThreads()
211{
212 m_suspensionLock.lock();
213 for (unsigned i = m_threads.size(); i--;)
214 m_threads[i]->m_rightToRun.lock();
215}
216
217void Worklist::resumeAllThreads()
218{
219 for (unsigned i = m_threads.size(); i--;)
220 m_threads[i]->m_rightToRun.unlock();
221 m_suspensionLock.unlock();
222}
223
224void Worklist::visitWeakReferences(SlotVisitor& visitor, CodeBlockSet& codeBlocks)
225{
226 VM* vm = visitor.heap()->vm();
227 {
228 MutexLocker locker(m_lock);
229 for (PlanMap::iterator iter = m_plans.begin(); iter != m_plans.end(); ++iter) {
230 Plan* plan = iter->value.get();
231 if (&plan->vm != vm)
232 continue;
233 iter->value->checkLivenessAndVisitChildren(visitor, codeBlocks);
234 }
235 }
236 // This loop doesn't need locking because:
237 // (1) no new threads can be added to m_threads. Hence, it is immutable and needs no locks.
238 // (2) ThreadData::m_safepoint is protected by that thread's m_rightToRun which we must be
239 // holding here because of a prior call to suspendAllThreads().
240 for (unsigned i = m_threads.size(); i--;) {
241 ThreadData* data = m_threads[i].get();
242 Safepoint* safepoint = data->m_safepoint;
243 if (safepoint && &safepoint->vm() == vm)
244 safepoint->checkLivenessAndVisitChildren(visitor);
245 }
246}
247
248void Worklist::removeDeadPlans(VM& vm)
249{
250 {
251 MutexLocker locker(m_lock);
252 HashSet<CompilationKey> deadPlanKeys;
253 for (PlanMap::iterator iter = m_plans.begin(); iter != m_plans.end(); ++iter) {
254 Plan* plan = iter->value.get();
255 if (&plan->vm != &vm)
256 continue;
257 if (plan->isKnownToBeLiveDuringGC())
258 continue;
259 RELEASE_ASSERT(plan->stage != Plan::Cancelled); // Should not be cancelled, yet.
260 ASSERT(!deadPlanKeys.contains(plan->key()));
261 deadPlanKeys.add(plan->key());
262 }
263 if (!deadPlanKeys.isEmpty()) {
264 for (HashSet<CompilationKey>::iterator iter = deadPlanKeys.begin(); iter != deadPlanKeys.end(); ++iter)
265 m_plans.take(*iter)->cancel();
266 Deque<RefPtr<Plan>> newQueue;
267 while (!m_queue.isEmpty()) {
268 RefPtr<Plan> plan = m_queue.takeFirst();
269 if (plan->stage != Plan::Cancelled)
270 newQueue.append(plan);
271 }
272 m_queue.swap(newQueue);
273 for (unsigned i = 0; i < m_readyPlans.size(); ++i) {
274 if (m_readyPlans[i]->stage != Plan::Cancelled)
275 continue;
276 m_readyPlans[i] = m_readyPlans.last();
277 m_readyPlans.removeLast();
278 }
279 }
280 }
281
282 // No locking needed for this part, see comment in visitWeakReferences().
283 for (unsigned i = m_threads.size(); i--;) {
284 ThreadData* data = m_threads[i].get();
285 Safepoint* safepoint = data->m_safepoint;
286 if (!safepoint)
287 continue;
288 if (&safepoint->vm() != &vm)
289 continue;
290 if (safepoint->isKnownToBeLiveDuringGC())
291 continue;
292 safepoint->cancel();
293 }
294}
295
296size_t Worklist::queueLength()
297{
298 MutexLocker locker(m_lock);
299 return m_queue.size();
300}
301
302void Worklist::dump(PrintStream& out) const
303{
304 MutexLocker locker(m_lock);
305 dump(locker, out);
306}
307
308void Worklist::dump(const MutexLocker&, PrintStream& out) const
309{
310 out.print(
311 "Worklist(", RawPointer(this), ")[Queue Length = ", m_queue.size(),
312 ", Map Size = ", m_plans.size(), ", Num Ready = ", m_readyPlans.size(),
313 ", Num Active Threads = ", m_numberOfActiveThreads, "/", m_threads.size(), "]");
314}
315
316void Worklist::runThread(ThreadData* data)
317{
318 CompilationScope compilationScope;
319
320 if (Options::verboseCompilationQueue())
321 dataLog(*this, ": Thread started\n");
322
323 LongLivedState longLivedState;
324
325 for (;;) {
326 RefPtr<Plan> plan;
327 {
328 MutexLocker locker(m_lock);
329 while (m_queue.isEmpty())
330 m_planEnqueued.wait(m_lock);
331
332 plan = m_queue.takeFirst();
333 if (plan)
334 m_numberOfActiveThreads++;
335 }
336
337 if (!plan) {
338 if (Options::verboseCompilationQueue())
339 dataLog(*this, ": Thread shutting down\n");
340 return;
341 }
342
343 {
344 MutexLocker locker(data->m_rightToRun);
345 {
346 MutexLocker locker(m_lock);
347 if (plan->stage == Plan::Cancelled) {
348 m_numberOfActiveThreads--;
349 continue;
350 }
351 plan->notifyCompiling();
352 }
353
354 if (Options::verboseCompilationQueue())
355 dataLog(*this, ": Compiling ", plan->key(), " asynchronously\n");
356
357 RELEASE_ASSERT(!plan->vm.heap.isCollecting());
358 plan->compileInThread(longLivedState, data);
359 RELEASE_ASSERT(!plan->vm.heap.isCollecting());
360
361 {
362 MutexLocker locker(m_lock);
363 if (plan->stage == Plan::Cancelled) {
364 m_numberOfActiveThreads--;
365 continue;
366 }
367 plan->notifyCompiled();
368 }
369 RELEASE_ASSERT(!plan->vm.heap.isCollecting());
370 }
371
372 {
373 MutexLocker locker(m_lock);
374
375 // We could have been cancelled between releasing rightToRun and acquiring m_lock.
376 // This would mean that we might be in the middle of GC right now.
377 if (plan->stage == Plan::Cancelled) {
378 m_numberOfActiveThreads--;
379 continue;
380 }
381
382 plan->notifyReady();
383
384 if (Options::verboseCompilationQueue()) {
385 dump(locker, WTF::dataFile());
386 dataLog(": Compiled ", plan->key(), " asynchronously\n");
387 }
388
389 m_readyPlans.append(plan);
390
391 m_planCompiled.broadcast();
392 m_numberOfActiveThreads--;
393 }
394 }
395}
396
397void Worklist::threadFunction(void* argument)
398{
399 ThreadData* data = static_cast<ThreadData*>(argument);
400 data->m_worklist->runThread(data);
401}
402
403static Worklist* theGlobalDFGWorklist;
404
405Worklist* ensureGlobalDFGWorklist()
406{
407 static std::once_flag initializeGlobalWorklistOnceFlag;
408 std::call_once(initializeGlobalWorklistOnceFlag, [] {
ed1e77d3 409 theGlobalDFGWorklist = &Worklist::create("DFG Worklist", Options::numberOfDFGCompilerThreads(), Options::priorityDeltaOfDFGCompilerThreads()).leakRef();
81345200
A
410 });
411 return theGlobalDFGWorklist;
412}
413
414Worklist* existingGlobalDFGWorklistOrNull()
415{
416 return theGlobalDFGWorklist;
417}
418
419static Worklist* theGlobalFTLWorklist;
420
421Worklist* ensureGlobalFTLWorklist()
422{
423 static std::once_flag initializeGlobalWorklistOnceFlag;
424 std::call_once(initializeGlobalWorklistOnceFlag, [] {
ed1e77d3 425 theGlobalFTLWorklist = &Worklist::create("FTL Worklist", Options::numberOfFTLCompilerThreads(), Options::priorityDeltaOfFTLCompilerThreads()).leakRef();
81345200
A
426 });
427 return theGlobalFTLWorklist;
428}
429
430Worklist* existingGlobalFTLWorklistOrNull()
431{
432 return theGlobalFTLWorklist;
433}
434
435Worklist* ensureGlobalWorklistFor(CompilationMode mode)
436{
437 switch (mode) {
438 case InvalidCompilationMode:
439 RELEASE_ASSERT_NOT_REACHED();
440 return 0;
441 case DFGMode:
442 return ensureGlobalDFGWorklist();
443 case FTLMode:
444 case FTLForOSREntryMode:
445 return ensureGlobalFTLWorklist();
446 }
447 RELEASE_ASSERT_NOT_REACHED();
448 return 0;
449}
450
451} } // namespace JSC::DFG
452
453#endif // ENABLE(DFG_JIT)
454