2 * Copyright (c) 1998-2010 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <pexpert/pexpert.h>
30 #include <IOKit/IOWorkLoop.h>
31 #include <IOKit/IOEventSource.h>
32 #include <IOKit/IOInterruptEventSource.h>
33 #include <IOKit/IOCommandGate.h>
34 #include <IOKit/IOCommandPool.h>
35 #include <IOKit/IOTimeStamp.h>
36 #include <IOKit/IOKitDebug.h>
37 #include <libkern/OSDebug.h>
38 #include <kern/thread.h>
40 #define super OSObject
42 OSDefineMetaClassAndStructors(IOWorkLoop
, OSObject
);
44 // Block of unused functions intended for future use
46 OSMetaClassDefineReservedUnused(IOWorkLoop
, 0);
47 OSMetaClassDefineReservedUnused(IOWorkLoop
, 1);
48 OSMetaClassDefineReservedUnused(IOWorkLoop
, 2);
50 OSMetaClassDefineReservedUsedX86(IOWorkLoop
, 0);
51 OSMetaClassDefineReservedUsedX86(IOWorkLoop
, 1);
52 OSMetaClassDefineReservedUsedX86(IOWorkLoop
, 2);
54 OSMetaClassDefineReservedUnused(IOWorkLoop
, 3);
55 OSMetaClassDefineReservedUnused(IOWorkLoop
, 4);
56 OSMetaClassDefineReservedUnused(IOWorkLoop
, 5);
57 OSMetaClassDefineReservedUnused(IOWorkLoop
, 6);
58 OSMetaClassDefineReservedUnused(IOWorkLoop
, 7);
60 enum IOWorkLoopState
{ kLoopRestart
= 0x1, kLoopTerminate
= 0x2 };
62 SETP(void *addr
, unsigned int flag
)
64 unsigned char *num
= (unsigned char *) addr
; *num
|= flag
;
67 CLRP(void *addr
, unsigned int flag
)
69 unsigned char *num
= (unsigned char *) addr
; *num
&= ~flag
;
72 ISSETP(void *addr
, unsigned int flag
)
74 unsigned char *num
= (unsigned char *) addr
; return (*num
& flag
) != 0;
77 #define fFlags loopRestart
79 #define passiveEventChain reserved->passiveEventChain
83 #define IOStatisticsRegisterCounter() \
85 reserved->counter = IOStatistics::registerWorkLoop(this); \
88 #define IOStatisticsUnregisterCounter() \
91 IOStatistics::unregisterWorkLoop(reserved->counter); \
94 #define IOStatisticsOpenGate() \
96 IOStatistics::countWorkLoopOpenGate(reserved->counter); \
97 if (reserved->lockInterval) lockTime(); \
99 #define IOStatisticsCloseGate() \
101 IOStatistics::countWorkLoopCloseGate(reserved->counter); \
102 if (reserved->lockInterval) reserved->lockTime = mach_absolute_time(); \
105 #define IOStatisticsAttachEventSource() \
107 IOStatistics::attachWorkLoopEventSource(reserved->counter, inEvent->reserved->counter); \
110 #define IOStatisticsDetachEventSource() \
112 IOStatistics::detachWorkLoopEventSource(reserved->counter, inEvent->reserved->counter); \
117 #define IOStatisticsRegisterCounter()
118 #define IOStatisticsUnregisterCounter()
119 #define IOStatisticsOpenGate()
120 #define IOStatisticsCloseGate()
121 #define IOStatisticsAttachEventSource()
122 #define IOStatisticsDetachEventSource()
124 #endif /* IOKITSTATS */
129 // The super init and gateLock allocation MUST be done first.
130 if (!super::init()) {
134 // Allocate our ExpansionData if it hasn't been allocated already.
136 reserved
= IONew(ExpansionData
, 1);
141 bzero(reserved
, sizeof(ExpansionData
));
144 if (gateLock
== NULL
) {
145 if (!(gateLock
= IORecursiveLockAlloc())) {
150 if (workToDoLock
== NULL
) {
151 if (!(workToDoLock
= IOSimpleLockAlloc())) {
154 IOSimpleLockInit(workToDoLock
);
158 IOStatisticsRegisterCounter();
160 if (controlG
== NULL
) {
161 controlG
= IOCommandGate::commandGate(
163 OSMemberFunctionCast(
164 IOCommandGate::Action
,
166 &IOWorkLoop::_maintRequest
));
171 // Point the controlGate at the workLoop. Usually addEventSource
172 // does this automatically. The problem is in this case addEventSource
173 // uses the control gate and it has to be bootstrapped.
174 controlG
->setWorkLoop(this);
175 if (addEventSource(controlG
) != kIOReturnSuccess
) {
180 if (workThread
== NULL
) {
181 thread_continue_t cptr
= OSMemberFunctionCast(
184 &IOWorkLoop::threadMain
);
185 if (KERN_SUCCESS
!= kernel_thread_start(cptr
, this, &workThread
)) {
190 (void) thread_set_tag(workThread
, THREAD_TAG_IOWORKLOOP
);
195 IOWorkLoop::workLoop()
197 return IOWorkLoop::workLoopWithOptions(0);
201 IOWorkLoop::workLoopWithOptions(IOOptionBits options
)
203 IOWorkLoop
*me
= new IOWorkLoop
;
206 me
->reserved
= IONew(ExpansionData
, 1);
211 bzero(me
->reserved
, sizeof(ExpansionData
));
212 me
->reserved
->options
= options
;
215 if (me
&& !me
->init()) {
224 IOWorkLoop::releaseEventChain(LIBKERN_CONSUMED IOEventSource
*eventChain
)
226 IOEventSource
*event
, *next
;
227 for (event
= eventChain
; event
; event
= next
) {
228 next
= event
->getNext();
229 #ifdef __clang_analyzer__
230 // Unlike the usual IOKit memory management convention, IOWorkLoop
231 // manages the retain count for the IOEventSource instances in the
232 // the chain rather than have IOEventSource do that itself. This means
233 // it is safe to call release() on the result of getNext() while the
234 // chain is being torn down. However, the analyzer doesn't
235 // realize this. We add an extra retain under analysis to suppress
236 // an analyzer diagnostic about violations of the memory management rules.
241 event
->setWorkLoop(NULL
);
242 event
->setNext(NULL
);
246 // Free is called twice:
247 // First when the atomic retainCount transitions from 1 -> 0
248 // Secondly when the work loop itself is commiting hari kari
249 // Hence the each leg of the free must be single threaded.
256 // If we are here then we must be trying to shut down this work loop
257 // in this case disable all of the event source, mark the loop
258 // as terminating and wakeup the work thread itself and return
259 // Note: we hold the gate across the entire operation mainly for the
260 // benefit of our event sources so we can disable them cleanly.
263 disableAllEventSources();
265 is
= IOSimpleLockLockDisableInterrupt(workToDoLock
);
266 SETP(&fFlags
, kLoopTerminate
);
267 thread_wakeup_thread((void *) &workToDo
, workThread
);
268 IOSimpleLockUnlockEnableInterrupt(workToDoLock
, is
);
271 } else { /* !workThread */
272 releaseEventChain(eventChain
);
275 releaseEventChain(passiveEventChain
);
276 passiveEventChain
= NULL
;
278 // Either we have a partial initialization to clean up
279 // or the workThread itself is performing hari-kari.
280 // Either way clean up all of our resources and return.
283 controlG
->workLoop
= NULL
;
289 IOSimpleLockFree(workToDoLock
);
294 IORecursiveLockFree(gateLock
);
298 IOStatisticsUnregisterCounter();
301 IODelete(reserved
, ExpansionData
, 1);
310 IOWorkLoop::addEventSource(IOEventSource
*newEvent
)
313 && !thread_has_thread_name(workThread
)
315 && !OSDynamicCast(IOCommandPool
, newEvent
->owner
)) {
316 thread_set_thread_name(workThread
, newEvent
->owner
->getMetaClass()->getClassName());
319 return controlG
->runCommand((void *) mAddEvent
, (void *) newEvent
);
323 IOWorkLoop::removeEventSource(IOEventSource
*toRemove
)
325 return controlG
->runCommand((void *) mRemoveEvent
, (void *) toRemove
);
329 IOWorkLoop::enableAllEventSources() const
331 IOEventSource
*event
;
333 for (event
= eventChain
; event
; event
= event
->getNext()) {
337 for (event
= passiveEventChain
; event
; event
= event
->getNext()) {
343 IOWorkLoop::disableAllEventSources() const
345 IOEventSource
*event
;
347 for (event
= eventChain
; event
; event
= event
->getNext()) {
351 /* NOTE: controlG is in passiveEventChain since it's an IOCommandGate */
352 for (event
= passiveEventChain
; event
; event
= event
->getNext()) {
353 if (event
!= controlG
) { // Don't disable the control gate
360 IOWorkLoop::enableAllInterrupts() const
362 IOEventSource
*event
;
364 for (event
= eventChain
; event
; event
= event
->getNext()) {
365 if (OSDynamicCast(IOInterruptEventSource
, event
)) {
372 IOWorkLoop::disableAllInterrupts() const
374 IOEventSource
*event
;
376 for (event
= eventChain
; event
; event
= event
->getNext()) {
377 if (OSDynamicCast(IOInterruptEventSource
, event
)) {
385 IOWorkLoop::runEventSources()
388 bool traceWL
= (gIOKitTrace
& kIOTraceWorkLoops
) ? true : false;
389 bool traceES
= (gIOKitTrace
& kIOTraceEventSources
) ? true : false;
392 if (ISSETP(&fFlags
, kLoopTerminate
)) {
397 IOTimeStampStartConstant(IODBG_WORKLOOP(IOWL_WORK
), VM_KERNEL_ADDRHIDE(this));
402 CLRP(&fFlags
, kLoopRestart
);
404 IOInterruptState is
= IOSimpleLockLockDisableInterrupt(workToDoLock
);
406 IOSimpleLockUnlockEnableInterrupt(workToDoLock
, is
);
407 /* NOTE: only loop over event sources in eventChain. Bypass "passive" event sources for performance */
408 for (IOEventSource
*evnt
= eventChain
; evnt
; evnt
= evnt
->getNext()) {
410 IOTimeStampStartConstant(IODBG_WORKLOOP(IOWL_CLIENT
), VM_KERNEL_ADDRHIDE(this), VM_KERNEL_ADDRHIDE(evnt
));
413 more
|= evnt
->checkForWork();
416 IOTimeStampEndConstant(IODBG_WORKLOOP(IOWL_CLIENT
), VM_KERNEL_ADDRHIDE(this), VM_KERNEL_ADDRHIDE(evnt
));
419 if (ISSETP(&fFlags
, kLoopTerminate
)) {
421 } else if (fFlags
& kLoopRestart
) {
431 IOTimeStampEndConstant(IODBG_WORKLOOP(IOWL_WORK
), VM_KERNEL_ADDRHIDE(this));
440 IOWorkLoop::threadMain()
444 if (!runEventSources()) {
448 IOInterruptState is
= IOSimpleLockLockDisableInterrupt(workToDoLock
);
449 if (!ISSETP(&fFlags
, kLoopTerminate
) && !workToDo
) {
450 assert_wait((void *) &workToDo
, false);
451 IOSimpleLockUnlockEnableInterrupt(workToDoLock
, is
);
452 thread_continue_t cptr
= NULL
;
453 if (!reserved
|| !(kPreciousStack
& reserved
->options
)) {
454 cptr
= OSMemberFunctionCast(
455 thread_continue_t
, this, &IOWorkLoop::threadMain
);
457 thread_block_parameter(cptr
, this);
462 // At this point we either have work to do or we need
463 // to commit suicide. But no matter
464 // Clear the simple lock and retore the interrupt state
465 IOSimpleLockUnlockEnableInterrupt(workToDoLock
, is
);
470 thread_t thread
= workThread
;
471 workThread
= NULL
; // Say we don't have a loop and free ourselves
476 thread_deallocate(thread
);
477 (void) thread_terminate(thread
);
481 IOWorkLoop::getThread() const
487 IOWorkLoop::onThread() const
489 return IOThreadSelf() == workThread
;
493 IOWorkLoop::inGate() const
495 return IORecursiveLockHaveLock(gateLock
);
498 // Internal APIs used by event sources to control the thread
500 IOWorkLoop::signalWorkAvailable()
503 IOInterruptState is
= IOSimpleLockLockDisableInterrupt(workToDoLock
);
505 thread_wakeup_thread((void *) &workToDo
, workThread
);
506 IOSimpleLockUnlockEnableInterrupt(workToDoLock
, is
);
511 IOWorkLoop::openGate()
513 IOStatisticsOpenGate();
514 IORecursiveLockUnlock(gateLock
);
518 IOWorkLoop::closeGate()
520 IORecursiveLockLock(gateLock
);
521 IOStatisticsCloseGate();
525 IOWorkLoop::tryCloseGate()
527 bool res
= (IORecursiveLockTryLock(gateLock
) != 0);
529 IOStatisticsCloseGate();
535 IOWorkLoop::sleepGate(void *event
, UInt32 interuptibleType
)
538 IOStatisticsOpenGate();
539 res
= IORecursiveLockSleep(gateLock
, event
, interuptibleType
);
540 IOStatisticsCloseGate();
545 IOWorkLoop::sleepGate(void *event
, AbsoluteTime deadline
, UInt32 interuptibleType
)
548 IOStatisticsOpenGate();
549 res
= IORecursiveLockSleepDeadline(gateLock
, event
, deadline
, interuptibleType
);
550 IOStatisticsCloseGate();
555 IOWorkLoop::wakeupGate(void *event
, bool oneThread
)
557 IORecursiveLockWakeup(gateLock
, event
, oneThread
);
561 IOWorkLoopActionToBlock(OSObject
*owner
,
562 void *arg0
, void *arg1
,
563 void *arg2
, void *arg3
)
565 return ((IOWorkLoop::ActionBlock
) arg0
)();
569 IOWorkLoop::runActionBlock(ActionBlock action
)
571 return runAction(&IOWorkLoopActionToBlock
, this, action
);
575 IOWorkLoop::runAction(Action inAction
, OSObject
*target
,
576 void *arg0
, void *arg1
,
577 void *arg2
, void *arg3
)
581 // closeGate is recursive so don't worry if we already hold the lock.
583 res
= (*inAction
)(target
, arg0
, arg1
, arg2
, arg3
);
590 IOWorkLoop::_maintRequest(void *inC
, void *inD
, void *, void *)
592 maintCommandEnum command
= (maintCommandEnum
) (uintptr_t) inC
;
593 IOEventSource
*inEvent
= (IOEventSource
*) inD
;
594 IOReturn res
= kIOReturnSuccess
;
598 if (!inEvent
->getWorkLoop()) {
599 SETP(&fFlags
, kLoopRestart
);
602 inEvent
->setWorkLoop(this);
603 inEvent
->setNext(NULL
);
605 /* Check if this is a passive or active event source being added */
606 if (eventSourcePerformsWork(inEvent
)) {
608 eventChain
= inEvent
;
610 IOEventSource
*event
, *next
;
612 for (event
= eventChain
; (next
= event
->getNext()); event
= next
) {
615 event
->setNext(inEvent
);
618 if (!passiveEventChain
) {
619 passiveEventChain
= inEvent
;
621 IOEventSource
*event
, *next
;
623 for (event
= passiveEventChain
; (next
= event
->getNext()); event
= next
) {
626 event
->setNext(inEvent
);
629 IOStatisticsAttachEventSource();
634 if (inEvent
->getWorkLoop()) {
635 IOStatisticsDetachEventSource();
637 if (eventSourcePerformsWork(inEvent
)) {
638 if (eventChain
== inEvent
) {
639 eventChain
= inEvent
->getNext();
641 IOEventSource
*event
, *next
= NULL
;
645 while ((next
= event
->getNext()) && (next
!= inEvent
)) {
651 res
= kIOReturnBadArgument
;
654 event
->setNext(inEvent
->getNext());
657 if (passiveEventChain
== inEvent
) {
658 passiveEventChain
= inEvent
->getNext();
660 IOEventSource
*event
, *next
= NULL
;
662 event
= passiveEventChain
;
664 while ((next
= event
->getNext()) && (next
!= inEvent
)) {
670 res
= kIOReturnBadArgument
;
673 event
->setNext(inEvent
->getNext());
677 inEvent
->setWorkLoop(NULL
);
678 inEvent
->setNext(NULL
);
680 SETP(&fFlags
, kLoopRestart
);
685 return kIOReturnUnsupported
;
692 IOWorkLoop::eventSourcePerformsWork(IOEventSource
*inEventSource
)
697 * The idea here is to see if the subclass of IOEventSource has overridden checkForWork().
698 * The assumption is that if you override checkForWork(), you need to be
699 * active and not passive.
701 * We picked a known quantity controlG that does not override
702 * IOEventSource::checkForWork(), namely the IOCommandGate associated with
703 * the workloop to which this event source is getting attached.
705 * We do a pointer comparison on the offset in the vtable for inNewEvent against
706 * the offset in the vtable for inReferenceEvent. This works because
707 * IOCommandGate's slot for checkForWork() has the address of
708 * IOEventSource::checkForWork() in it.
710 * Think of OSMemberFunctionCast yielding the value at the vtable offset for
711 * checkForWork() here. We're just testing to see if it's the same or not.
715 if (IOEventSource::kPassive
& inEventSource
->flags
) {
717 } else if (IOEventSource::kActive
& inEventSource
->flags
) {
719 } else if (controlG
) {
723 ptr1
= OSMemberFunctionCast(void*, inEventSource
, &IOEventSource::checkForWork
);
724 ptr2
= OSMemberFunctionCast(void*, controlG
, &IOEventSource::checkForWork
);
735 IOWorkLoop::lockTime(void)
738 time
= mach_absolute_time() - reserved
->lockTime
;
739 if (time
> reserved
->lockInterval
) {
740 absolutetime_to_nanoseconds(time
, &time
);
741 if (kTimeLockPanics
& reserved
->options
) {
742 panic("IOWorkLoop %p lock time %qd us", this, time
/ 1000ULL);
744 OSReportWithBacktrace("IOWorkLoop %p lock time %qd us", this, time
/ 1000ULL);
750 IOWorkLoop::setMaximumLockTime(uint64_t interval
, uint32_t options
)
752 IORecursiveLockLock(gateLock
);
753 reserved
->lockInterval
= interval
;
754 reserved
->options
= (reserved
->options
& ~kTimeLockPanics
) | (options
& kTimeLockPanics
);
755 IORecursiveLockUnlock(gateLock
);