]> git.saurik.com Git - apple/xnu.git/blame_incremental - iokit/Kernel/IOWorkLoop.cpp
xnu-7195.101.1.tar.gz
[apple/xnu.git] / iokit / Kernel / IOWorkLoop.cpp
... / ...
CommitLineData
1/*
2 * Copyright (c) 1998-2010 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include <pexpert/pexpert.h>
30#include <IOKit/IOWorkLoop.h>
31#include <IOKit/IOEventSource.h>
32#include <IOKit/IOInterruptEventSource.h>
33#include <IOKit/IOCommandGate.h>
34#include <IOKit/IOCommandPool.h>
35#include <IOKit/IOTimeStamp.h>
36#include <IOKit/IOKitDebug.h>
37#include <libkern/OSDebug.h>
38#include <kern/thread.h>
39
40#define super OSObject
41
42OSDefineMetaClassAndStructors(IOWorkLoop, OSObject);
43
44// Block of unused functions intended for future use
45#if __LP64__
46OSMetaClassDefineReservedUnused(IOWorkLoop, 0);
47OSMetaClassDefineReservedUnused(IOWorkLoop, 1);
48OSMetaClassDefineReservedUnused(IOWorkLoop, 2);
49#else
50OSMetaClassDefineReservedUsedX86(IOWorkLoop, 0);
51OSMetaClassDefineReservedUsedX86(IOWorkLoop, 1);
52OSMetaClassDefineReservedUsedX86(IOWorkLoop, 2);
53#endif
54OSMetaClassDefineReservedUnused(IOWorkLoop, 3);
55OSMetaClassDefineReservedUnused(IOWorkLoop, 4);
56OSMetaClassDefineReservedUnused(IOWorkLoop, 5);
57OSMetaClassDefineReservedUnused(IOWorkLoop, 6);
58OSMetaClassDefineReservedUnused(IOWorkLoop, 7);
59
60enum IOWorkLoopState { kLoopRestart = 0x1, kLoopTerminate = 0x2 };
61static inline void
62SETP(void *addr, unsigned int flag)
63{
64 unsigned char *num = (unsigned char *) addr; *num |= flag;
65}
66static inline void
67CLRP(void *addr, unsigned int flag)
68{
69 unsigned char *num = (unsigned char *) addr; *num &= ~flag;
70}
71static inline bool
72ISSETP(void *addr, unsigned int flag)
73{
74 unsigned char *num = (unsigned char *) addr; return (*num & flag) != 0;
75}
76
77#define fFlags loopRestart
78
79#define passiveEventChain reserved->passiveEventChain
80
81#if IOKITSTATS
82
83#define IOStatisticsRegisterCounter() \
84do { \
85 reserved->counter = IOStatistics::registerWorkLoop(this); \
86} while(0)
87
88#define IOStatisticsUnregisterCounter() \
89do { \
90 if (reserved) \
91 IOStatistics::unregisterWorkLoop(reserved->counter); \
92} while(0)
93
94#define IOStatisticsOpenGate() \
95do { \
96 IOStatistics::countWorkLoopOpenGate(reserved->counter); \
97 if (reserved->lockInterval) lockTime(); \
98} while(0)
99#define IOStatisticsCloseGate() \
100do { \
101 IOStatistics::countWorkLoopCloseGate(reserved->counter); \
102 if (reserved->lockInterval) reserved->lockTime = mach_absolute_time(); \
103} while(0)
104
105#define IOStatisticsAttachEventSource() \
106do { \
107 IOStatistics::attachWorkLoopEventSource(reserved->counter, inEvent->reserved->counter); \
108} while(0)
109
110#define IOStatisticsDetachEventSource() \
111do { \
112 IOStatistics::detachWorkLoopEventSource(reserved->counter, inEvent->reserved->counter); \
113} while(0)
114
115#else
116
117#define IOStatisticsRegisterCounter()
118#define IOStatisticsUnregisterCounter()
119#define IOStatisticsOpenGate()
120#define IOStatisticsCloseGate()
121#define IOStatisticsAttachEventSource()
122#define IOStatisticsDetachEventSource()
123
124#endif /* IOKITSTATS */
125
126bool
127IOWorkLoop::init()
128{
129 // The super init and gateLock allocation MUST be done first.
130 if (!super::init()) {
131 return false;
132 }
133
134 // Allocate our ExpansionData if it hasn't been allocated already.
135 if (!reserved) {
136 reserved = IONew(ExpansionData, 1);
137 if (!reserved) {
138 return false;
139 }
140
141 bzero(reserved, sizeof(ExpansionData));
142 }
143
144 if (gateLock == NULL) {
145 if (!(gateLock = IORecursiveLockAlloc())) {
146 return false;
147 }
148 }
149
150 if (workToDoLock == NULL) {
151 if (!(workToDoLock = IOSimpleLockAlloc())) {
152 return false;
153 }
154 IOSimpleLockInit(workToDoLock);
155 workToDo = false;
156 }
157
158 IOStatisticsRegisterCounter();
159
160 if (controlG == NULL) {
161 controlG = IOCommandGate::commandGate(
162 this,
163 OSMemberFunctionCast(
164 IOCommandGate::Action,
165 this,
166 &IOWorkLoop::_maintRequest));
167
168 if (!controlG) {
169 return false;
170 }
171 // Point the controlGate at the workLoop. Usually addEventSource
172 // does this automatically. The problem is in this case addEventSource
173 // uses the control gate and it has to be bootstrapped.
174 controlG->setWorkLoop(this);
175 if (addEventSource(controlG) != kIOReturnSuccess) {
176 return false;
177 }
178 }
179
180 if (workThread == NULL) {
181 thread_continue_t cptr = OSMemberFunctionCast(
182 thread_continue_t,
183 this,
184 &IOWorkLoop::threadMain);
185 if (KERN_SUCCESS != kernel_thread_start(cptr, this, &workThread)) {
186 return false;
187 }
188 }
189
190 (void) thread_set_tag(workThread, THREAD_TAG_IOWORKLOOP);
191 return true;
192}
193
194IOWorkLoop *
195IOWorkLoop::workLoop()
196{
197 return IOWorkLoop::workLoopWithOptions(0);
198}
199
200IOWorkLoop *
201IOWorkLoop::workLoopWithOptions(IOOptionBits options)
202{
203 IOWorkLoop *me = new IOWorkLoop;
204
205 if (me && options) {
206 me->reserved = IONew(ExpansionData, 1);
207 if (!me->reserved) {
208 me->release();
209 return NULL;
210 }
211 bzero(me->reserved, sizeof(ExpansionData));
212 me->reserved->options = options;
213 }
214
215 if (me && !me->init()) {
216 me->release();
217 return NULL;
218 }
219
220 return me;
221}
222
223void
224IOWorkLoop::releaseEventChain(LIBKERN_CONSUMED IOEventSource *eventChain)
225{
226 IOEventSource *event, *next;
227 for (event = eventChain; event; event = next) {
228 next = event->getNext();
229#ifdef __clang_analyzer__
230 // Unlike the usual IOKit memory management convention, IOWorkLoop
231 // manages the retain count for the IOEventSource instances in the
232 // the chain rather than have IOEventSource do that itself. This means
233 // it is safe to call release() on the result of getNext() while the
234 // chain is being torn down. However, the analyzer doesn't
235 // realize this. We add an extra retain under analysis to suppress
236 // an analyzer diagnostic about violations of the memory management rules.
237 if (next) {
238 next->retain();
239 }
240#endif
241 event->setWorkLoop(NULL);
242 event->setNext(NULL);
243 event->release();
244 }
245}
246// Free is called twice:
247// First when the atomic retainCount transitions from 1 -> 0
248// Secondly when the work loop itself is commiting hari kari
249// Hence the each leg of the free must be single threaded.
250void
251IOWorkLoop::free()
252{
253 if (workThread) {
254 IOInterruptState is;
255
256 // If we are here then we must be trying to shut down this work loop
257 // in this case disable all of the event source, mark the loop
258 // as terminating and wakeup the work thread itself and return
259 // Note: we hold the gate across the entire operation mainly for the
260 // benefit of our event sources so we can disable them cleanly.
261 closeGate();
262
263 disableAllEventSources();
264
265 is = IOSimpleLockLockDisableInterrupt(workToDoLock);
266 SETP(&fFlags, kLoopTerminate);
267 thread_wakeup_thread((void *) &workToDo, workThread);
268 IOSimpleLockUnlockEnableInterrupt(workToDoLock, is);
269
270 openGate();
271 } else { /* !workThread */
272 releaseEventChain(eventChain);
273 eventChain = NULL;
274
275 releaseEventChain(passiveEventChain);
276 passiveEventChain = NULL;
277
278 // Either we have a partial initialization to clean up
279 // or the workThread itself is performing hari-kari.
280 // Either way clean up all of our resources and return.
281
282 if (controlG) {
283 controlG->workLoop = NULL;
284 controlG->release();
285 controlG = NULL;
286 }
287
288 if (workToDoLock) {
289 IOSimpleLockFree(workToDoLock);
290 workToDoLock = NULL;
291 }
292
293 if (gateLock) {
294 IORecursiveLockFree(gateLock);
295 gateLock = NULL;
296 }
297
298 IOStatisticsUnregisterCounter();
299
300 if (reserved) {
301 IODelete(reserved, ExpansionData, 1);
302 reserved = NULL;
303 }
304
305 super::free();
306 }
307}
308
309IOReturn
310IOWorkLoop::addEventSource(IOEventSource *newEvent)
311{
312 if ((workThread)
313 && !thread_has_thread_name(workThread)
314 && (newEvent->owner)
315 && !OSDynamicCast(IOCommandPool, newEvent->owner)) {
316 thread_set_thread_name(workThread, newEvent->owner->getMetaClass()->getClassName());
317 }
318
319 return controlG->runCommand((void *) mAddEvent, (void *) newEvent);
320}
321
322IOReturn
323IOWorkLoop::removeEventSource(IOEventSource *toRemove)
324{
325 return controlG->runCommand((void *) mRemoveEvent, (void *) toRemove);
326}
327
328void
329IOWorkLoop::enableAllEventSources() const
330{
331 IOEventSource *event;
332
333 for (event = eventChain; event; event = event->getNext()) {
334 event->enable();
335 }
336
337 for (event = passiveEventChain; event; event = event->getNext()) {
338 event->enable();
339 }
340}
341
342void
343IOWorkLoop::disableAllEventSources() const
344{
345 IOEventSource *event;
346
347 for (event = eventChain; event; event = event->getNext()) {
348 event->disable();
349 }
350
351 /* NOTE: controlG is in passiveEventChain since it's an IOCommandGate */
352 for (event = passiveEventChain; event; event = event->getNext()) {
353 if (event != controlG) { // Don't disable the control gate
354 event->disable();
355 }
356 }
357}
358
359void
360IOWorkLoop::enableAllInterrupts() const
361{
362 IOEventSource *event;
363
364 for (event = eventChain; event; event = event->getNext()) {
365 if (OSDynamicCast(IOInterruptEventSource, event)) {
366 event->enable();
367 }
368 }
369}
370
371void
372IOWorkLoop::disableAllInterrupts() const
373{
374 IOEventSource *event;
375
376 for (event = eventChain; event; event = event->getNext()) {
377 if (OSDynamicCast(IOInterruptEventSource, event)) {
378 event->disable();
379 }
380 }
381}
382
383
384/* virtual */ bool
385IOWorkLoop::runEventSources()
386{
387 bool res = false;
388 bool traceWL = (gIOKitTrace & kIOTraceWorkLoops) ? true : false;
389 bool traceES = (gIOKitTrace & kIOTraceEventSources) ? true : false;
390
391 closeGate();
392 if (ISSETP(&fFlags, kLoopTerminate)) {
393 goto abort;
394 }
395
396 if (traceWL) {
397 IOTimeStampStartConstant(IODBG_WORKLOOP(IOWL_WORK), VM_KERNEL_ADDRHIDE(this));
398 }
399
400 bool more;
401 do {
402 CLRP(&fFlags, kLoopRestart);
403 more = false;
404 IOInterruptState is = IOSimpleLockLockDisableInterrupt(workToDoLock);
405 workToDo = false;
406 IOSimpleLockUnlockEnableInterrupt(workToDoLock, is);
407 /* NOTE: only loop over event sources in eventChain. Bypass "passive" event sources for performance */
408 for (IOEventSource *evnt = eventChain; evnt; evnt = evnt->getNext()) {
409 if (traceES) {
410 IOTimeStampStartConstant(IODBG_WORKLOOP(IOWL_CLIENT), VM_KERNEL_ADDRHIDE(this), VM_KERNEL_ADDRHIDE(evnt));
411 }
412
413 more |= evnt->checkForWork();
414
415 if (traceES) {
416 IOTimeStampEndConstant(IODBG_WORKLOOP(IOWL_CLIENT), VM_KERNEL_ADDRHIDE(this), VM_KERNEL_ADDRHIDE(evnt));
417 }
418
419 if (ISSETP(&fFlags, kLoopTerminate)) {
420 goto abort;
421 } else if (fFlags & kLoopRestart) {
422 more = true;
423 break;
424 }
425 }
426 } while (more);
427
428 res = true;
429
430 if (traceWL) {
431 IOTimeStampEndConstant(IODBG_WORKLOOP(IOWL_WORK), VM_KERNEL_ADDRHIDE(this));
432 }
433
434abort:
435 openGate();
436 return res;
437}
438
439/* virtual */ void
440IOWorkLoop::threadMain()
441{
442restartThread:
443 do {
444 if (!runEventSources()) {
445 goto exitThread;
446 }
447
448 IOInterruptState is = IOSimpleLockLockDisableInterrupt(workToDoLock);
449 if (!ISSETP(&fFlags, kLoopTerminate) && !workToDo) {
450 assert_wait((void *) &workToDo, false);
451 IOSimpleLockUnlockEnableInterrupt(workToDoLock, is);
452 thread_continue_t cptr = NULL;
453 if (!reserved || !(kPreciousStack & reserved->options)) {
454 cptr = OSMemberFunctionCast(
455 thread_continue_t, this, &IOWorkLoop::threadMain);
456 }
457 thread_block_parameter(cptr, this);
458 goto restartThread;
459 /* NOTREACHED */
460 }
461
462 // At this point we either have work to do or we need
463 // to commit suicide. But no matter
464 // Clear the simple lock and retore the interrupt state
465 IOSimpleLockUnlockEnableInterrupt(workToDoLock, is);
466 } while (workToDo);
467
468exitThread:
469 closeGate();
470 thread_t thread = workThread;
471 workThread = NULL; // Say we don't have a loop and free ourselves
472 openGate();
473
474 free();
475
476 thread_deallocate(thread);
477 (void) thread_terminate(thread);
478}
479
480IOThread
481IOWorkLoop::getThread() const
482{
483 return workThread;
484}
485
486bool
487IOWorkLoop::onThread() const
488{
489 return IOThreadSelf() == workThread;
490}
491
492bool
493IOWorkLoop::inGate() const
494{
495 return IORecursiveLockHaveLock(gateLock);
496}
497
498// Internal APIs used by event sources to control the thread
499void
500IOWorkLoop::signalWorkAvailable()
501{
502 if (workToDoLock) {
503 IOInterruptState is = IOSimpleLockLockDisableInterrupt(workToDoLock);
504 workToDo = true;
505 thread_wakeup_thread((void *) &workToDo, workThread);
506 IOSimpleLockUnlockEnableInterrupt(workToDoLock, is);
507 }
508}
509
510void
511IOWorkLoop::openGate()
512{
513 IOStatisticsOpenGate();
514 IORecursiveLockUnlock(gateLock);
515}
516
517void
518IOWorkLoop::closeGate()
519{
520 IORecursiveLockLock(gateLock);
521 IOStatisticsCloseGate();
522}
523
524bool
525IOWorkLoop::tryCloseGate()
526{
527 bool res = (IORecursiveLockTryLock(gateLock) != 0);
528 if (res) {
529 IOStatisticsCloseGate();
530 }
531 return res;
532}
533
534int
535IOWorkLoop::sleepGate(void *event, UInt32 interuptibleType)
536{
537 int res;
538 IOStatisticsOpenGate();
539 res = IORecursiveLockSleep(gateLock, event, interuptibleType);
540 IOStatisticsCloseGate();
541 return res;
542}
543
544int
545IOWorkLoop::sleepGate(void *event, AbsoluteTime deadline, UInt32 interuptibleType)
546{
547 int res;
548 IOStatisticsOpenGate();
549 res = IORecursiveLockSleepDeadline(gateLock, event, deadline, interuptibleType);
550 IOStatisticsCloseGate();
551 return res;
552}
553
554void
555IOWorkLoop::wakeupGate(void *event, bool oneThread)
556{
557 IORecursiveLockWakeup(gateLock, event, oneThread);
558}
559
560static IOReturn
561IOWorkLoopActionToBlock(OSObject *owner,
562 void *arg0, void *arg1,
563 void *arg2, void *arg3)
564{
565 return ((IOWorkLoop::ActionBlock) arg0)();
566}
567
568IOReturn
569IOWorkLoop::runActionBlock(ActionBlock action)
570{
571 return runAction(&IOWorkLoopActionToBlock, this, action);
572}
573
574IOReturn
575IOWorkLoop::runAction(Action inAction, OSObject *target,
576 void *arg0, void *arg1,
577 void *arg2, void *arg3)
578{
579 IOReturn res;
580
581 // closeGate is recursive so don't worry if we already hold the lock.
582 closeGate();
583 res = (*inAction)(target, arg0, arg1, arg2, arg3);
584 openGate();
585
586 return res;
587}
588
589IOReturn
590IOWorkLoop::_maintRequest(void *inC, void *inD, void *, void *)
591{
592 maintCommandEnum command = (maintCommandEnum) (uintptr_t) inC;
593 IOEventSource *inEvent = (IOEventSource *) inD;
594 IOReturn res = kIOReturnSuccess;
595
596 switch (command) {
597 case mAddEvent:
598 if (!inEvent->getWorkLoop()) {
599 SETP(&fFlags, kLoopRestart);
600
601 inEvent->retain();
602 inEvent->setWorkLoop(this);
603 inEvent->setNext(NULL);
604
605 /* Check if this is a passive or active event source being added */
606 if (eventSourcePerformsWork(inEvent)) {
607 if (!eventChain) {
608 eventChain = inEvent;
609 } else {
610 IOEventSource *event, *next;
611
612 for (event = eventChain; (next = event->getNext()); event = next) {
613 ;
614 }
615 event->setNext(inEvent);
616 }
617 } else {
618 if (!passiveEventChain) {
619 passiveEventChain = inEvent;
620 } else {
621 IOEventSource *event, *next;
622
623 for (event = passiveEventChain; (next = event->getNext()); event = next) {
624 ;
625 }
626 event->setNext(inEvent);
627 }
628 }
629 IOStatisticsAttachEventSource();
630 }
631 break;
632
633 case mRemoveEvent:
634 if (inEvent->getWorkLoop()) {
635 IOStatisticsDetachEventSource();
636
637 if (eventSourcePerformsWork(inEvent)) {
638 if (eventChain == inEvent) {
639 eventChain = inEvent->getNext();
640 } else {
641 IOEventSource *event, *next = NULL;
642
643 event = eventChain;
644 if (event) {
645 while ((next = event->getNext()) && (next != inEvent)) {
646 event = next;
647 }
648 }
649
650 if (!next) {
651 res = kIOReturnBadArgument;
652 break;
653 }
654 event->setNext(inEvent->getNext());
655 }
656 } else {
657 if (passiveEventChain == inEvent) {
658 passiveEventChain = inEvent->getNext();
659 } else {
660 IOEventSource *event, *next = NULL;
661
662 event = passiveEventChain;
663 if (event) {
664 while ((next = event->getNext()) && (next != inEvent)) {
665 event = next;
666 }
667 }
668
669 if (!next) {
670 res = kIOReturnBadArgument;
671 break;
672 }
673 event->setNext(inEvent->getNext());
674 }
675 }
676
677 inEvent->setWorkLoop(NULL);
678 inEvent->setNext(NULL);
679 inEvent->release();
680 SETP(&fFlags, kLoopRestart);
681 }
682 break;
683
684 default:
685 return kIOReturnUnsupported;
686 }
687
688 return res;
689}
690
691bool
692IOWorkLoop::eventSourcePerformsWork(IOEventSource *inEventSource)
693{
694 bool result = true;
695
696 /*
697 * The idea here is to see if the subclass of IOEventSource has overridden checkForWork().
698 * The assumption is that if you override checkForWork(), you need to be
699 * active and not passive.
700 *
701 * We picked a known quantity controlG that does not override
702 * IOEventSource::checkForWork(), namely the IOCommandGate associated with
703 * the workloop to which this event source is getting attached.
704 *
705 * We do a pointer comparison on the offset in the vtable for inNewEvent against
706 * the offset in the vtable for inReferenceEvent. This works because
707 * IOCommandGate's slot for checkForWork() has the address of
708 * IOEventSource::checkForWork() in it.
709 *
710 * Think of OSMemberFunctionCast yielding the value at the vtable offset for
711 * checkForWork() here. We're just testing to see if it's the same or not.
712 *
713 */
714
715 if (IOEventSource::kPassive & inEventSource->flags) {
716 result = false;
717 } else if (IOEventSource::kActive & inEventSource->flags) {
718 result = true;
719 } else if (controlG) {
720 void * ptr1;
721 void * ptr2;
722
723 ptr1 = OSMemberFunctionCast(void*, inEventSource, &IOEventSource::checkForWork);
724 ptr2 = OSMemberFunctionCast(void*, controlG, &IOEventSource::checkForWork);
725
726 if (ptr1 == ptr2) {
727 result = false;
728 }
729 }
730
731 return result;
732}
733
734void
735IOWorkLoop::lockTime(void)
736{
737 uint64_t time;
738 time = mach_absolute_time() - reserved->lockTime;
739 if (time > reserved->lockInterval) {
740 absolutetime_to_nanoseconds(time, &time);
741 if (kTimeLockPanics & reserved->options) {
742 panic("IOWorkLoop %p lock time %qd us", this, time / 1000ULL);
743 } else {
744 OSReportWithBacktrace("IOWorkLoop %p lock time %qd us", this, time / 1000ULL);
745 }
746 }
747}
748
749void
750IOWorkLoop::setMaximumLockTime(uint64_t interval, uint32_t options)
751{
752 IORecursiveLockLock(gateLock);
753 reserved->lockInterval = interval;
754 reserved->options = (reserved->options & ~kTimeLockPanics) | (options & kTimeLockPanics);
755 IORecursiveLockUnlock(gateLock);
756}