2 * Copyright (c) 1998-2010 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #define IOKIT_ENABLE_SHARED_PTR
31 #if !defined(__LP64__)
33 #include <IOKit/IOCommandQueue.h>
34 #include <IOKit/IOWorkLoop.h>
35 #include <IOKit/IOTimeStamp.h>
36 #include <IOKit/IOKitDebug.h>
37 #include <libkern/c++/OSSharedPtr.h>
39 #include <mach/sync_policy.h>
43 #define IOStatisticsInitializeCounter() \
44 IOStatistics::setCounterType(reserved->counter, kIOStatisticsCommandQueueCounter)
46 #define IOStatisticsActionCall() \
47 IOStatistics::countCommandQueueActionCall(reserved->counter)
51 #define IOStatisticsInitializeCounter()
52 #define IOStatisticsActionCall()
54 #endif /* IOKITSTATS */
56 #define NUM_FIELDS_IN_COMMAND 4
57 typedef struct commandEntryTag
{
58 void *f
[NUM_FIELDS_IN_COMMAND
];
61 #define super IOEventSource
63 OSDefineMetaClassAndStructors(IOCommandQueue
, IOEventSource
)
68 * initWithNext:owner:action:size:
69 * - initWithNext: (IOEventSource *) inNext
71 * action: (SEL) inAction
74 * Primary initialiser for the IOCommandQueue class. Returns an
75 * IOCommandQueue object that is initialised with the next object in
76 * the chain and the owner and action. On return the signalWorkAvailableIMP
77 * has been cached for this function.
79 * If the object fails to initialise for some reason then [self free] will
80 * be called and nil will be returned.
82 * See also: initWithNext:owner:action:(IOEventSource)
85 IOCommandQueue::init(OSObject
*inOwner
,
86 IOCommandQueueAction inAction
,
89 if (!super::init(inOwner
, (IOEventSourceAction
) inAction
)) {
94 != semaphore_create(kernel_task
, &producerSema
, SYNC_POLICY_FIFO
, inSize
)) {
98 size
= inSize
+ 1; /* Allocate one more entry than needed */
100 queue
= (void *)kalloc(size
* sizeof(commandEntryT
));
105 producerLock
= IOLockAlloc();
110 producerIndex
= consumerIndex
= 0;
112 IOStatisticsInitializeCounter();
117 OSSharedPtr
<IOCommandQueue
>
118 IOCommandQueue::commandQueue(OSObject
*inOwner
,
119 IOCommandQueueAction inAction
,
122 OSSharedPtr
<IOCommandQueue
> me
= OSMakeShared
<IOCommandQueue
>();
124 if (me
&& !me
->init(inOwner
, inAction
, inSize
)) {
136 * Mandatory free of the object independent of the current retain count.
140 IOCommandQueue::free()
143 kfree(queue
, size
* sizeof(commandEntryT
));
146 semaphore_destroy(kernel_task
, producerSema
);
149 IOLockFree(producerLock
);
155 #if NUM_FIELDS_IN_COMMAND != 4
156 #error IOCommandQueue::checkForWork needs to be updated for new command size
160 IOCommandQueue::checkForWork()
162 void *field0
, *field1
, *field2
, *field3
;
163 bool trace
= (gIOKitTrace
& kIOTraceCommandGates
) ? true : false;
165 if (!enabled
|| consumerIndex
== producerIndex
) {
170 commandEntryT
*q
= (commandEntryT
*) queue
;
171 int localIndex
= consumerIndex
;
173 field0
= q
[localIndex
].f
[0]; field1
= q
[localIndex
].f
[1];
174 field2
= q
[localIndex
].f
[2]; field3
= q
[localIndex
].f
[3];
175 semaphore_signal(producerSema
);
178 if (++consumerIndex
>= size
) {
183 IOTimeStampStartConstant(IODBG_CMDQ(IOCMDQ_ACTION
),
184 VM_KERNEL_ADDRHIDE(action
), VM_KERNEL_ADDRHIDE(owner
));
187 IOStatisticsActionCall();
188 (*(IOCommandQueueAction
) action
)(owner
, field0
, field1
, field2
, field3
);
191 IOTimeStampEndConstant(IODBG_CMDQ(IOCMDQ_ACTION
),
192 VM_KERNEL_ADDRHIDE(action
), VM_KERNEL_ADDRHIDE(owner
));
195 return consumerIndex
!= producerIndex
;
199 * enqueueSleep:command:
200 * - (kern_return_t) enqueueSleepRaw: (BOOL) gotoSleep
201 * field0: (void *) field0 field1: (void *) field1
202 * field2: (void *) field2 field3: (void *) field3;
204 * Key method that enqueues the four input fields onto the command queue
205 * and calls signalWorkAvailable to indicate that work is available to the
206 * consumer. This routine is safe against multiple threaded producers.
208 * A family of convenience functions have been provided to assist with the
209 * enqueueing of an method selector and an integer tag. This relies on the
210 * IODevice rawCommandOccurred... command to forward on the requests.
212 * See also: signalWorkAvailable, checkForWork
214 #if NUM_FIELDS_IN_COMMAND != 4
215 #error IOCommandQueue::enqueueCommand needs to be updated
219 IOCommandQueue::enqueueCommand(bool gotoSleep
,
220 void *field0
, void *field1
,
221 void *field2
, void *field3
)
223 kern_return_t rtn
= KERN_SUCCESS
;
226 /* Make sure there is room in the queue before doing anything else */
231 rtn
= semaphore_wait(producerSema
);
232 } while ((KERN_SUCCESS
!= rtn
)
233 && (KERN_OPERATION_TIMED_OUT
!= rtn
)
234 && (KERN_SEMAPHORE_DESTROYED
!= rtn
)
235 && (KERN_TERMINATED
!= rtn
)
238 rtn
= semaphore_timedwait(producerSema
, MACH_TIMESPEC_ZERO
);
241 if (KERN_SUCCESS
!= rtn
) {
245 /* Block other producers */
246 IOTakeLock(producerLock
);
249 * Make sure that we update the current producer entry before we
250 * increment the producer pointer. This avoids a nasty race as the
251 * test for work is producerIndex != consumerIndex and a signal.
254 commandEntryT
*q
= (commandEntryT
*) queue
;
255 int localIndex
= producerIndex
;
257 q
[localIndex
].f
[0] = field0
; q
[localIndex
].f
[1] = field1
;
258 q
[localIndex
].f
[2] = field2
; q
[localIndex
].f
[3] = field3
;
260 if (++producerIndex
>= size
) {
264 /* Clear to allow other producers to go now */
265 IOUnlock(producerLock
);
268 * Right we have created some new work, we had better make sure that
269 * we notify the work loop that it has to test producerIndex.
271 signalWorkAvailable();
276 IOCommandQueue::performAndFlush(OSObject
*target
,
277 IOCommandQueueAction inAction
)
282 // Set the defaults if necessary
287 inAction
= (IOCommandQueueAction
) action
;
290 // Lock out the producers first
292 rtn
= semaphore_timedwait(producerSema
, MACH_TIMESPEC_ZERO
);
293 } while (rtn
== KERN_SUCCESS
);
295 // now step over all remaining entries in the command queue
296 for (numEntries
= 0; consumerIndex
!= producerIndex
;) {
297 void *field0
, *field1
, *field2
, *field3
;
300 commandEntryT
*q
= (commandEntryT
*) queue
;
301 int localIndex
= consumerIndex
;
303 field0
= q
[localIndex
].f
[0]; field1
= q
[localIndex
].f
[1];
304 field2
= q
[localIndex
].f
[2]; field3
= q
[localIndex
].f
[3];
307 if (++consumerIndex
>= size
) {
311 (*inAction
)(target
, field0
, field1
, field2
, field3
);
314 // finally refill the producer semaphore to size - 1
315 for (int i
= 1; i
< size
; i
++) {
316 semaphore_signal(producerSema
);
322 #endif /* !defined(__LP64__) */