]> git.saurik.com Git - apple/xnu.git/blob - iokit/Kernel/IOCommandQueue.cpp
xnu-6153.121.1.tar.gz
[apple/xnu.git] / iokit / Kernel / IOCommandQueue.cpp
1 /*
2 * Copyright (c) 1998-2010 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #if !defined(__LP64__)
30
31 #include <IOKit/IOCommandQueue.h>
32 #include <IOKit/IOWorkLoop.h>
33 #include <IOKit/IOTimeStamp.h>
34 #include <IOKit/IOKitDebug.h>
35
36 #include <mach/sync_policy.h>
37
38 #if IOKITSTATS
39
40 #define IOStatisticsInitializeCounter() \
41 IOStatistics::setCounterType(reserved->counter, kIOStatisticsCommandQueueCounter)
42
43 #define IOStatisticsActionCall() \
44 IOStatistics::countCommandQueueActionCall(reserved->counter)
45
46 #else
47
48 #define IOStatisticsInitializeCounter()
49 #define IOStatisticsActionCall()
50
51 #endif /* IOKITSTATS */
52
53 #define NUM_FIELDS_IN_COMMAND 4
54 typedef struct commandEntryTag {
55 void *f[NUM_FIELDS_IN_COMMAND];
56 } commandEntryT;
57
58 #define super IOEventSource
59
60 OSDefineMetaClassAndStructors(IOCommandQueue, IOEventSource)
61
62 /*[
63 * Instance Methods
64 *
65 * initWithNext:owner:action:size:
66 * - initWithNext: (IOEventSource *) inNext
67 * owner: (id) inOwner
68 * action: (SEL) inAction
69 * size: (int) inSize;
70 *
71 * Primary initialiser for the IOCommandQueue class. Returns an
72 * IOCommandQueue object that is initialised with the next object in
73 * the chain and the owner and action. On return the signalWorkAvailableIMP
74 * has been cached for this function.
75 *
76 * If the object fails to initialise for some reason then [self free] will
77 * be called and nil will be returned.
78 *
79 * See also: initWithNext:owner:action:(IOEventSource)
80 * ]*/
81 bool
82 IOCommandQueue::init(OSObject *inOwner,
83 IOCommandQueueAction inAction,
84 int inSize)
85 {
86 if (!super::init(inOwner, (IOEventSourceAction) inAction)) {
87 return false;
88 }
89
90 if (KERN_SUCCESS
91 != semaphore_create(kernel_task, &producerSema, SYNC_POLICY_FIFO, inSize)) {
92 return false;
93 }
94
95 size = inSize + 1; /* Allocate one more entry than needed */
96
97 queue = (void *)kalloc(size * sizeof(commandEntryT));
98 if (!queue) {
99 return false;
100 }
101
102 producerLock = IOLockAlloc();
103 if (!producerLock) {
104 return false;
105 }
106
107 producerIndex = consumerIndex = 0;
108
109 IOStatisticsInitializeCounter();
110
111 return true;
112 }
113
114 IOCommandQueue *
115 IOCommandQueue::commandQueue(OSObject *inOwner,
116 IOCommandQueueAction inAction,
117 int inSize)
118 {
119 IOCommandQueue *me = new IOCommandQueue;
120
121 if (me && !me->init(inOwner, inAction, inSize)) {
122 me->free();
123 return NULL;
124 }
125
126 return me;
127 }
128
129 /*[
130 * free
131 * - free
132 *
133 * Mandatory free of the object independent of the current retain count.
134 * Returns nil.
135 * ]*/
136 void
137 IOCommandQueue::free()
138 {
139 if (queue) {
140 kfree(queue, size * sizeof(commandEntryT));
141 }
142 if (producerSema) {
143 semaphore_destroy(kernel_task, producerSema);
144 }
145 if (producerLock) {
146 IOLockFree(producerLock);
147 }
148
149 super::free();
150 }
151
152 #if NUM_FIELDS_IN_COMMAND != 4
153 #error IOCommandQueue::checkForWork needs to be updated for new command size
154 #endif
155
156 bool
157 IOCommandQueue::checkForWork()
158 {
159 void *field0, *field1, *field2, *field3;
160 bool trace = (gIOKitTrace & kIOTraceCommandGates) ? true : false;
161
162 if (!enabled || consumerIndex == producerIndex) {
163 return false;
164 }
165
166 {
167 commandEntryT *q = (commandEntryT *) queue;
168 int localIndex = consumerIndex;
169
170 field0 = q[localIndex].f[0]; field1 = q[localIndex].f[1];
171 field2 = q[localIndex].f[2]; field3 = q[localIndex].f[3];
172 semaphore_signal(producerSema);
173 }
174
175 if (++consumerIndex >= size) {
176 consumerIndex = 0;
177 }
178
179 if (trace) {
180 IOTimeStampStartConstant(IODBG_CMDQ(IOCMDQ_ACTION),
181 VM_KERNEL_ADDRHIDE(action), VM_KERNEL_ADDRHIDE(owner));
182 }
183
184 IOStatisticsActionCall();
185 (*(IOCommandQueueAction) action)(owner, field0, field1, field2, field3);
186
187 if (trace) {
188 IOTimeStampEndConstant(IODBG_CMDQ(IOCMDQ_ACTION),
189 VM_KERNEL_ADDRHIDE(action), VM_KERNEL_ADDRHIDE(owner));
190 }
191
192 return consumerIndex != producerIndex;
193 }
194
195 /*[
196 * enqueueSleep:command:
197 * - (kern_return_t) enqueueSleepRaw: (BOOL) gotoSleep
198 * field0: (void *) field0 field1: (void *) field1
199 * field2: (void *) field2 field3: (void *) field3;
200 *
201 * Key method that enqueues the four input fields onto the command queue
202 * and calls signalWorkAvailable to indicate that work is available to the
203 * consumer. This routine is safe against multiple threaded producers.
204 *
205 * A family of convenience functions have been provided to assist with the
206 * enqueueing of an method selector and an integer tag. This relies on the
207 * IODevice rawCommandOccurred... command to forward on the requests.
208 *
209 * See also: signalWorkAvailable, checkForWork
210 * ]*/
211 #if NUM_FIELDS_IN_COMMAND != 4
212 #error IOCommandQueue::enqueueCommand needs to be updated
213 #endif
214
215 kern_return_t
216 IOCommandQueue::enqueueCommand(bool gotoSleep,
217 void *field0, void *field1,
218 void *field2, void *field3)
219 {
220 kern_return_t rtn = KERN_SUCCESS;
221 int retry;
222
223 /* Make sure there is room in the queue before doing anything else */
224
225 if (gotoSleep) {
226 retry = 0;
227 do{
228 rtn = semaphore_wait(producerSema);
229 } while ((KERN_SUCCESS != rtn)
230 && (KERN_OPERATION_TIMED_OUT != rtn)
231 && (KERN_SEMAPHORE_DESTROYED != rtn)
232 && (KERN_TERMINATED != rtn)
233 && ((retry++) < 4));
234 } else {
235 rtn = semaphore_timedwait(producerSema, MACH_TIMESPEC_ZERO);
236 }
237
238 if (KERN_SUCCESS != rtn) {
239 return rtn;
240 }
241
242 /* Block other producers */
243 IOTakeLock(producerLock);
244
245 /*
246 * Make sure that we update the current producer entry before we
247 * increment the producer pointer. This avoids a nasty race as the
248 * test for work is producerIndex != consumerIndex and a signal.
249 */
250 {
251 commandEntryT *q = (commandEntryT *) queue;
252 int localIndex = producerIndex;
253
254 q[localIndex].f[0] = field0; q[localIndex].f[1] = field1;
255 q[localIndex].f[2] = field2; q[localIndex].f[3] = field3;
256 }
257 if (++producerIndex >= size) {
258 producerIndex = 0;
259 }
260
261 /* Clear to allow other producers to go now */
262 IOUnlock(producerLock);
263
264 /*
265 * Right we have created some new work, we had better make sure that
266 * we notify the work loop that it has to test producerIndex.
267 */
268 signalWorkAvailable();
269 return rtn;
270 }
271
272 int
273 IOCommandQueue::performAndFlush(OSObject *target,
274 IOCommandQueueAction inAction)
275 {
276 int numEntries;
277 kern_return_t rtn;
278
279 // Set the defaults if necessary
280 if (!target) {
281 target = owner;
282 }
283 if (!inAction) {
284 inAction = (IOCommandQueueAction) action;
285 }
286
287 // Lock out the producers first
288 do {
289 rtn = semaphore_timedwait(producerSema, MACH_TIMESPEC_ZERO);
290 } while (rtn == KERN_SUCCESS);
291
292 // now step over all remaining entries in the command queue
293 for (numEntries = 0; consumerIndex != producerIndex;) {
294 void *field0, *field1, *field2, *field3;
295
296 {
297 commandEntryT *q = (commandEntryT *) queue;
298 int localIndex = consumerIndex;
299
300 field0 = q[localIndex].f[0]; field1 = q[localIndex].f[1];
301 field2 = q[localIndex].f[2]; field3 = q[localIndex].f[3];
302 }
303
304 if (++consumerIndex >= size) {
305 consumerIndex = 0;
306 }
307
308 (*inAction)(target, field0, field1, field2, field3);
309 }
310
311 // finally refill the producer semaphore to size - 1
312 for (int i = 1; i < size; i++) {
313 semaphore_signal(producerSema);
314 }
315
316 return numEntries;
317 }
318
319 #endif /* !defined(__LP64__) */