2 * Copyright (C) 2013, 2014 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include "DFGWorklist.h"
31 #include "CodeBlock.h"
33 #include "DFGLongLivedState.h"
34 #include "DFGSafepoint.h"
35 #include "JSCInlines.h"
38 namespace JSC
{ namespace DFG
{
40 Worklist::Worklist(CString worklistName
)
41 : m_threadName(toCString(worklistName
, " Worker Thread"))
42 , m_numberOfActiveThreads(0)
49 MutexLocker
locker(m_lock
);
50 for (unsigned i
= m_threads
.size(); i
--;)
51 m_queue
.append(nullptr); // Use null plan to indicate that we want the thread to terminate.
52 m_planEnqueued
.broadcast();
54 for (unsigned i
= m_threads
.size(); i
--;)
55 waitForThreadCompletion(m_threads
[i
]->m_identifier
);
56 ASSERT(!m_numberOfActiveThreads
);
59 void Worklist::finishCreation(unsigned numberOfThreads
, int relativePriority
)
61 RELEASE_ASSERT(numberOfThreads
);
62 for (unsigned i
= numberOfThreads
; i
--;) {
63 std::unique_ptr
<ThreadData
> data
= std::make_unique
<ThreadData
>(this);
64 data
->m_identifier
= createThread(threadFunction
, data
.get(), m_threadName
.data());
66 changeThreadPriority(data
->m_identifier
, relativePriority
);
67 m_threads
.append(WTF::move(data
));
71 PassRefPtr
<Worklist
> Worklist::create(CString worklistName
, unsigned numberOfThreads
, int relativePriority
)
73 RefPtr
<Worklist
> result
= adoptRef(new Worklist(worklistName
));
74 result
->finishCreation(numberOfThreads
, relativePriority
);
78 bool Worklist::isActiveForVM(VM
& vm
) const
80 MutexLocker
locker(m_lock
);
81 PlanMap::const_iterator end
= m_plans
.end();
82 for (PlanMap::const_iterator iter
= m_plans
.begin(); iter
!= end
; ++iter
) {
83 if (&iter
->value
->vm
== &vm
)
89 void Worklist::enqueue(PassRefPtr
<Plan
> passedPlan
)
91 RefPtr
<Plan
> plan
= passedPlan
;
92 MutexLocker
locker(m_lock
);
93 if (Options::verboseCompilationQueue()) {
94 dump(locker
, WTF::dataFile());
95 dataLog(": Enqueueing plan to optimize ", plan
->key(), "\n");
97 ASSERT(m_plans
.find(plan
->key()) == m_plans
.end());
98 m_plans
.add(plan
->key(), plan
);
100 m_planEnqueued
.signal();
103 Worklist::State
Worklist::compilationState(CompilationKey key
)
105 MutexLocker
locker(m_lock
);
106 PlanMap::iterator iter
= m_plans
.find(key
);
107 if (iter
== m_plans
.end())
109 return iter
->value
->stage
== Plan::Ready
? Compiled
: Compiling
;
112 void Worklist::waitUntilAllPlansForVMAreReady(VM
& vm
)
114 DeferGC
deferGC(vm
.heap
);
115 // Wait for all of the plans for the given VM to complete. The idea here
116 // is that we want all of the caller VM's plans to be done. We don't care
117 // about any other VM's plans, and we won't attempt to wait on those.
118 // After we release this lock, we know that although other VMs may still
119 // be adding plans, our VM will not be.
121 MutexLocker
locker(m_lock
);
123 if (Options::verboseCompilationQueue()) {
124 dump(locker
, WTF::dataFile());
125 dataLog(": Waiting for all in VM to complete.\n");
129 bool allAreCompiled
= true;
130 PlanMap::iterator end
= m_plans
.end();
131 for (PlanMap::iterator iter
= m_plans
.begin(); iter
!= end
; ++iter
) {
132 if (&iter
->value
->vm
!= &vm
)
134 if (iter
->value
->stage
!= Plan::Ready
) {
135 allAreCompiled
= false;
143 m_planCompiled
.wait(m_lock
);
147 void Worklist::removeAllReadyPlansForVM(VM
& vm
, Vector
<RefPtr
<Plan
>, 8>& myReadyPlans
)
149 DeferGC
deferGC(vm
.heap
);
150 MutexLocker
locker(m_lock
);
151 for (size_t i
= 0; i
< m_readyPlans
.size(); ++i
) {
152 RefPtr
<Plan
> plan
= m_readyPlans
[i
];
153 if (&plan
->vm
!= &vm
)
155 if (plan
->stage
!= Plan::Ready
)
157 myReadyPlans
.append(plan
);
158 m_readyPlans
[i
--] = m_readyPlans
.last();
159 m_readyPlans
.removeLast();
160 m_plans
.remove(plan
->key());
164 void Worklist::removeAllReadyPlansForVM(VM
& vm
)
166 Vector
<RefPtr
<Plan
>, 8> myReadyPlans
;
167 removeAllReadyPlansForVM(vm
, myReadyPlans
);
170 Worklist::State
Worklist::completeAllReadyPlansForVM(VM
& vm
, CompilationKey requestedKey
)
172 DeferGC
deferGC(vm
.heap
);
173 Vector
<RefPtr
<Plan
>, 8> myReadyPlans
;
175 removeAllReadyPlansForVM(vm
, myReadyPlans
);
177 State resultingState
= NotKnown
;
179 while (!myReadyPlans
.isEmpty()) {
180 RefPtr
<Plan
> plan
= myReadyPlans
.takeLast();
181 CompilationKey currentKey
= plan
->key();
183 if (Options::verboseCompilationQueue())
184 dataLog(*this, ": Completing ", currentKey
, "\n");
186 RELEASE_ASSERT(plan
->stage
== Plan::Ready
);
188 plan
->finalizeAndNotifyCallback();
190 if (currentKey
== requestedKey
)
191 resultingState
= Compiled
;
194 if (!!requestedKey
&& resultingState
== NotKnown
) {
195 MutexLocker
locker(m_lock
);
196 if (m_plans
.contains(requestedKey
))
197 resultingState
= Compiling
;
200 return resultingState
;
203 void Worklist::completeAllPlansForVM(VM
& vm
)
205 DeferGC
deferGC(vm
.heap
);
206 waitUntilAllPlansForVMAreReady(vm
);
207 completeAllReadyPlansForVM(vm
);
210 void Worklist::suspendAllThreads()
212 m_suspensionLock
.lock();
213 for (unsigned i
= m_threads
.size(); i
--;)
214 m_threads
[i
]->m_rightToRun
.lock();
217 void Worklist::resumeAllThreads()
219 for (unsigned i
= m_threads
.size(); i
--;)
220 m_threads
[i
]->m_rightToRun
.unlock();
221 m_suspensionLock
.unlock();
224 void Worklist::visitWeakReferences(SlotVisitor
& visitor
, CodeBlockSet
& codeBlocks
)
226 VM
* vm
= visitor
.heap()->vm();
228 MutexLocker
locker(m_lock
);
229 for (PlanMap::iterator iter
= m_plans
.begin(); iter
!= m_plans
.end(); ++iter
) {
230 Plan
* plan
= iter
->value
.get();
233 iter
->value
->checkLivenessAndVisitChildren(visitor
, codeBlocks
);
236 // This loop doesn't need locking because:
237 // (1) no new threads can be added to m_threads. Hence, it is immutable and needs no locks.
238 // (2) ThreadData::m_safepoint is protected by that thread's m_rightToRun which we must be
239 // holding here because of a prior call to suspendAllThreads().
240 for (unsigned i
= m_threads
.size(); i
--;) {
241 ThreadData
* data
= m_threads
[i
].get();
242 Safepoint
* safepoint
= data
->m_safepoint
;
243 if (safepoint
&& &safepoint
->vm() == vm
)
244 safepoint
->checkLivenessAndVisitChildren(visitor
);
248 void Worklist::removeDeadPlans(VM
& vm
)
251 MutexLocker
locker(m_lock
);
252 HashSet
<CompilationKey
> deadPlanKeys
;
253 for (PlanMap::iterator iter
= m_plans
.begin(); iter
!= m_plans
.end(); ++iter
) {
254 Plan
* plan
= iter
->value
.get();
255 if (&plan
->vm
!= &vm
)
257 if (plan
->isKnownToBeLiveDuringGC())
259 RELEASE_ASSERT(plan
->stage
!= Plan::Cancelled
); // Should not be cancelled, yet.
260 ASSERT(!deadPlanKeys
.contains(plan
->key()));
261 deadPlanKeys
.add(plan
->key());
263 if (!deadPlanKeys
.isEmpty()) {
264 for (HashSet
<CompilationKey
>::iterator iter
= deadPlanKeys
.begin(); iter
!= deadPlanKeys
.end(); ++iter
)
265 m_plans
.take(*iter
)->cancel();
266 Deque
<RefPtr
<Plan
>> newQueue
;
267 while (!m_queue
.isEmpty()) {
268 RefPtr
<Plan
> plan
= m_queue
.takeFirst();
269 if (plan
->stage
!= Plan::Cancelled
)
270 newQueue
.append(plan
);
272 m_queue
.swap(newQueue
);
273 for (unsigned i
= 0; i
< m_readyPlans
.size(); ++i
) {
274 if (m_readyPlans
[i
]->stage
!= Plan::Cancelled
)
276 m_readyPlans
[i
] = m_readyPlans
.last();
277 m_readyPlans
.removeLast();
282 // No locking needed for this part, see comment in visitWeakReferences().
283 for (unsigned i
= m_threads
.size(); i
--;) {
284 ThreadData
* data
= m_threads
[i
].get();
285 Safepoint
* safepoint
= data
->m_safepoint
;
288 if (&safepoint
->vm() != &vm
)
290 if (safepoint
->isKnownToBeLiveDuringGC())
296 size_t Worklist::queueLength()
298 MutexLocker
locker(m_lock
);
299 return m_queue
.size();
302 void Worklist::dump(PrintStream
& out
) const
304 MutexLocker
locker(m_lock
);
308 void Worklist::dump(const MutexLocker
&, PrintStream
& out
) const
311 "Worklist(", RawPointer(this), ")[Queue Length = ", m_queue
.size(),
312 ", Map Size = ", m_plans
.size(), ", Num Ready = ", m_readyPlans
.size(),
313 ", Num Active Threads = ", m_numberOfActiveThreads
, "/", m_threads
.size(), "]");
316 void Worklist::runThread(ThreadData
* data
)
318 CompilationScope compilationScope
;
320 if (Options::verboseCompilationQueue())
321 dataLog(*this, ": Thread started\n");
323 LongLivedState longLivedState
;
328 MutexLocker
locker(m_lock
);
329 while (m_queue
.isEmpty())
330 m_planEnqueued
.wait(m_lock
);
332 plan
= m_queue
.takeFirst();
334 m_numberOfActiveThreads
++;
338 if (Options::verboseCompilationQueue())
339 dataLog(*this, ": Thread shutting down\n");
344 MutexLocker
locker(data
->m_rightToRun
);
346 MutexLocker
locker(m_lock
);
347 if (plan
->stage
== Plan::Cancelled
) {
348 m_numberOfActiveThreads
--;
351 plan
->notifyCompiling();
354 if (Options::verboseCompilationQueue())
355 dataLog(*this, ": Compiling ", plan
->key(), " asynchronously\n");
357 RELEASE_ASSERT(!plan
->vm
.heap
.isCollecting());
358 plan
->compileInThread(longLivedState
, data
);
359 RELEASE_ASSERT(!plan
->vm
.heap
.isCollecting());
362 MutexLocker
locker(m_lock
);
363 if (plan
->stage
== Plan::Cancelled
) {
364 m_numberOfActiveThreads
--;
367 plan
->notifyCompiled();
369 RELEASE_ASSERT(!plan
->vm
.heap
.isCollecting());
373 MutexLocker
locker(m_lock
);
375 // We could have been cancelled between releasing rightToRun and acquiring m_lock.
376 // This would mean that we might be in the middle of GC right now.
377 if (plan
->stage
== Plan::Cancelled
) {
378 m_numberOfActiveThreads
--;
384 if (Options::verboseCompilationQueue()) {
385 dump(locker
, WTF::dataFile());
386 dataLog(": Compiled ", plan
->key(), " asynchronously\n");
389 m_readyPlans
.append(plan
);
391 m_planCompiled
.broadcast();
392 m_numberOfActiveThreads
--;
397 void Worklist::threadFunction(void* argument
)
399 ThreadData
* data
= static_cast<ThreadData
*>(argument
);
400 data
->m_worklist
->runThread(data
);
403 static Worklist
* theGlobalDFGWorklist
;
405 Worklist
* ensureGlobalDFGWorklist()
407 static std::once_flag initializeGlobalWorklistOnceFlag
;
408 std::call_once(initializeGlobalWorklistOnceFlag
, [] {
409 theGlobalDFGWorklist
= Worklist::create("DFG Worklist", Options::numberOfDFGCompilerThreads(), Options::priorityDeltaOfDFGCompilerThreads()).leakRef();
411 return theGlobalDFGWorklist
;
414 Worklist
* existingGlobalDFGWorklistOrNull()
416 return theGlobalDFGWorklist
;
419 static Worklist
* theGlobalFTLWorklist
;
421 Worklist
* ensureGlobalFTLWorklist()
423 static std::once_flag initializeGlobalWorklistOnceFlag
;
424 std::call_once(initializeGlobalWorklistOnceFlag
, [] {
425 theGlobalFTLWorklist
= Worklist::create("FTL Worklist", Options::numberOfFTLCompilerThreads(), Options::priorityDeltaOfFTLCompilerThreads()).leakRef();
427 return theGlobalFTLWorklist
;
430 Worklist
* existingGlobalFTLWorklistOrNull()
432 return theGlobalFTLWorklist
;
435 Worklist
* ensureGlobalWorklistFor(CompilationMode mode
)
438 case InvalidCompilationMode
:
439 RELEASE_ASSERT_NOT_REACHED();
442 return ensureGlobalDFGWorklist();
444 case FTLForOSREntryMode
:
445 return ensureGlobalFTLWorklist();
447 RELEASE_ASSERT_NOT_REACHED();
451 } } // namespace JSC::DFG
453 #endif // ENABLE(DFG_JIT)