]> git.saurik.com Git - apple/xnu.git/blame - iokit/Kernel/IOCommandQueue.cpp
xnu-4570.51.1.tar.gz
[apple/xnu.git] / iokit / Kernel / IOCommandQueue.cpp
CommitLineData
1c79356b 1/*
6d2010ae 2 * Copyright (c) 1998-2010 Apple Inc. All rights reserved.
1c79356b 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
1c79356b 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
8f6c56a5 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
8f6c56a5 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b 27 */
b0d623f7
A
28
29#if !defined(__LP64__)
30
1c79356b
A
31#include <IOKit/IOCommandQueue.h>
32#include <IOKit/IOWorkLoop.h>
33#include <IOKit/IOTimeStamp.h>
060df5ea 34#include <IOKit/IOKitDebug.h>
1c79356b
A
35
36#include <mach/sync_policy.h>
37
6d2010ae
A
38#if IOKITSTATS
39
40#define IOStatisticsInitializeCounter() \
41 IOStatistics::setCounterType(reserved->counter, kIOStatisticsCommandQueueCounter)
42
43#define IOStatisticsActionCall() \
44 IOStatistics::countCommandQueueActionCall(reserved->counter)
45
46#else
47
48#define IOStatisticsInitializeCounter()
49#define IOStatisticsActionCall()
50
51#endif /* IOKITSTATS */
b0d623f7 52
1c79356b
A
53#define NUM_FIELDS_IN_COMMAND 4
54typedef struct commandEntryTag {
55 void *f[NUM_FIELDS_IN_COMMAND];
56} commandEntryT;
57
58#define super IOEventSource
59
60OSDefineMetaClassAndStructors(IOCommandQueue, IOEventSource)
61
62/*[
63Instance Methods
64
65initWithNext:owner:action:size:
66 - initWithNext: (IOEventSource *) inNext
67 owner: (id) inOwner
68 action: (SEL) inAction
69 size: (int) inSize;
70
71Primary initialiser for the IOCommandQueue class. Returns an
72IOCommandQueue object that is initialised with the next object in
73the chain and the owner and action. On return the signalWorkAvailableIMP
74has been cached for this function.
75
76If the object fails to initialise for some reason then [self free] will
77be called and nil will be returned.
78
79See also: initWithNext:owner:action:(IOEventSource)
80]*/
81bool IOCommandQueue::init(OSObject *inOwner,
82 IOCommandQueueAction inAction,
83 int inSize)
84{
85 if ( !super::init(inOwner, (IOEventSourceAction) inAction) )
86 return false;
87
88 if (KERN_SUCCESS
89 != semaphore_create(kernel_task, &producerSema, SYNC_POLICY_FIFO, inSize))
90 return false;
91
92 size = inSize + 1; /* Allocate one more entry than needed */
93
94 queue = (void *)kalloc(size * sizeof(commandEntryT));
95 if (!queue)
96 return false;
97
98 producerLock = IOLockAlloc();
99 if (!producerLock)
100 return false;
101
102 producerIndex = consumerIndex = 0;
103
6d2010ae
A
104 IOStatisticsInitializeCounter();
105
1c79356b
A
106 return true;
107}
108
109IOCommandQueue *
110IOCommandQueue::commandQueue(OSObject *inOwner,
111 IOCommandQueueAction inAction,
112 int inSize)
113{
114 IOCommandQueue *me = new IOCommandQueue;
115
116 if (me && !me->init(inOwner, inAction, inSize)) {
117 me->free();
118 return 0;
119 }
120
121 return me;
122}
123
124/*[
125free
126 - free
127
128Mandatory free of the object independent of the current retain count.
129Returns nil.
130]*/
131void IOCommandQueue::free()
132{
133 if (queue)
91447636 134 kfree(queue, size * sizeof(commandEntryT));
1c79356b
A
135 if (producerSema)
136 semaphore_destroy(kernel_task, producerSema);
137 if (producerLock)
138 IOLockFree(producerLock);
139
140 super::free();
141}
142
143#if NUM_FIELDS_IN_COMMAND != 4
144#error IOCommandQueue::checkForWork needs to be updated for new command size
145#endif
146
147bool IOCommandQueue::checkForWork()
148{
6d2010ae 149 void *field0, *field1, *field2, *field3;
060df5ea 150 bool trace = ( gIOKitTrace & kIOTraceCommandGates ) ? true : false;
1c79356b
A
151
152 if (!enabled || consumerIndex == producerIndex)
153 return false;
154
155 {
156 commandEntryT *q = (commandEntryT *) queue;
157 int localIndex = consumerIndex;
158
159 field0 = q[localIndex].f[0]; field1 = q[localIndex].f[1];
160 field2 = q[localIndex].f[2]; field3 = q[localIndex].f[3];
161 semaphore_signal(producerSema);
162 }
163
164 if (++consumerIndex >= size)
165 consumerIndex = 0;
166
060df5ea
A
167 if (trace)
168 IOTimeStampStartConstant(IODBG_CMDQ(IOCMDQ_ACTION),
5ba3f43e 169 VM_KERNEL_ADDRHIDE(action), VM_KERNEL_ADDRHIDE(owner));
6d2010ae
A
170
171 IOStatisticsActionCall();
1c79356b 172 (*(IOCommandQueueAction) action)(owner, field0, field1, field2, field3);
6d2010ae 173
060df5ea
A
174 if (trace)
175 IOTimeStampEndConstant(IODBG_CMDQ(IOCMDQ_ACTION),
5ba3f43e 176 VM_KERNEL_ADDRHIDE(action), VM_KERNEL_ADDRHIDE(owner));
060df5ea 177
1c79356b
A
178 return (consumerIndex != producerIndex);
179}
180
181/*[
182enqueueSleep:command:
183 - (kern_return_t) enqueueSleepRaw: (BOOL) gotoSleep
184 field0: (void *) field0 field1: (void *) field1
185 field2: (void *) field2 field3: (void *) field3;
186
187Key method that enqueues the four input fields onto the command queue
188and calls signalWorkAvailable to indicate that work is available to the
189consumer. This routine is safe against multiple threaded producers.
190
191A family of convenience functions have been provided to assist with the
192enqueueing of an method selector and an integer tag. This relies on the
193IODevice rawCommandOccurred... command to forward on the requests.
194
195See also: signalWorkAvailable, checkForWork
196]*/
197#if NUM_FIELDS_IN_COMMAND != 4
198#error IOCommandQueue::enqueueCommand needs to be updated
199#endif
200
201kern_return_t
202IOCommandQueue::enqueueCommand(bool gotoSleep,
203 void *field0, void *field1,
204 void *field2, void *field3)
205{
206 kern_return_t rtn = KERN_SUCCESS;
207 int retry;
208
209 /* Make sure there is room in the queue before doing anything else */
210
211 if (gotoSleep) {
212 retry = 0;
213 do
214 rtn = semaphore_wait(producerSema);
215 while( (KERN_SUCCESS != rtn)
216 && (KERN_OPERATION_TIMED_OUT != rtn)
217 && (KERN_SEMAPHORE_DESTROYED != rtn)
218 && (KERN_TERMINATED != rtn)
219 && ((retry++) < 4));
220 } else
221 rtn = semaphore_timedwait(producerSema, MACH_TIMESPEC_ZERO);
222
223 if (KERN_SUCCESS != rtn)
224 return rtn;
225
226 /* Block other producers */
227 IOTakeLock(producerLock);
228
229 /*
230 * Make sure that we update the current producer entry before we
231 * increment the producer pointer. This avoids a nasty race as the
a39ff7e2 232 * test for work is producerIndex != consumerIndex and a signal.
1c79356b
A
233 */
234 {
235 commandEntryT *q = (commandEntryT *) queue;
236 int localIndex = producerIndex;
237
238 q[localIndex].f[0] = field0; q[localIndex].f[1] = field1;
239 q[localIndex].f[2] = field2; q[localIndex].f[3] = field3;
240 }
241 if (++producerIndex >= size)
242 producerIndex = 0;
243
244 /* Clear to allow other producers to go now */
245 IOUnlock(producerLock);
246
247 /*
248 * Right we have created some new work, we had better make sure that
249 * we notify the work loop that it has to test producerIndex.
250 */
251 signalWorkAvailable();
252 return rtn;
253}
254
255int IOCommandQueue::performAndFlush(OSObject *target,
256 IOCommandQueueAction inAction)
257{
258 int numEntries;
259 kern_return_t rtn;
260
261 // Set the defaults if necessary
262 if (!target)
263 target = owner;
264 if (!inAction)
265 inAction = (IOCommandQueueAction) action;
266
267 // Lock out the producers first
268 do {
269 rtn = semaphore_timedwait(producerSema, MACH_TIMESPEC_ZERO);
270 } while (rtn == KERN_SUCCESS);
271
272 // now step over all remaining entries in the command queue
273 for (numEntries = 0; consumerIndex != producerIndex; ) {
274 void *field0, *field1, *field2, *field3;
275
276 {
277 commandEntryT *q = (commandEntryT *) queue;
278 int localIndex = consumerIndex;
279
280 field0 = q[localIndex].f[0]; field1 = q[localIndex].f[1];
281 field2 = q[localIndex].f[2]; field3 = q[localIndex].f[3];
282 }
283
284 if (++consumerIndex >= size)
285 consumerIndex = 0;
286
287 (*inAction)(target, field0, field1, field2, field3);
288 }
289
290 // finally refill the producer semaphore to size - 1
291 for (int i = 1; i < size; i++)
292 semaphore_signal(producerSema);
293
294 return numEntries;
295}
b0d623f7
A
296
297#endif /* !defined(__LP64__) */