]> git.saurik.com Git - apple/xnu.git/blame - iokit/Kernel/IOSharedDataQueue.cpp
xnu-3789.1.32.tar.gz
[apple/xnu.git] / iokit / Kernel / IOSharedDataQueue.cpp
CommitLineData
2d21ac55
A
1/*
2 * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include <IOKit/IOSharedDataQueue.h>
30#include <IOKit/IODataQueueShared.h>
31#include <IOKit/IOLib.h>
32#include <IOKit/IOMemoryDescriptor.h>
33
143464d5
A
34#ifdef enqueue
35#undef enqueue
36#endif
37
2d21ac55
A
38#ifdef dequeue
39#undef dequeue
40#endif
41
42#define super IODataQueue
43
44OSDefineMetaClassAndStructors(IOSharedDataQueue, IODataQueue)
45
46IOSharedDataQueue *IOSharedDataQueue::withCapacity(UInt32 size)
47{
48 IOSharedDataQueue *dataQueue = new IOSharedDataQueue;
49
50 if (dataQueue) {
51 if (!dataQueue->initWithCapacity(size)) {
52 dataQueue->release();
53 dataQueue = 0;
54 }
55 }
56
57 return dataQueue;
58}
59
60IOSharedDataQueue *IOSharedDataQueue::withEntries(UInt32 numEntries, UInt32 entrySize)
61{
62 IOSharedDataQueue *dataQueue = new IOSharedDataQueue;
63
64 if (dataQueue) {
65 if (!dataQueue->initWithEntries(numEntries, entrySize)) {
66 dataQueue->release();
67 dataQueue = 0;
68 }
69 }
70
71 return dataQueue;
72}
73
74Boolean IOSharedDataQueue::initWithCapacity(UInt32 size)
75{
76 IODataQueueAppendix * appendix;
143464d5 77 vm_size_t allocSize;
fe8ab488 78
2d21ac55
A
79 if (!super::init()) {
80 return false;
81 }
fe8ab488 82
143464d5
A
83 _reserved = (ExpansionData *)IOMalloc(sizeof(struct ExpansionData));
84 if (!_reserved) {
85 return false;
86 }
fe8ab488 87
143464d5
A
88 if (size > UINT32_MAX - DATA_QUEUE_MEMORY_HEADER_SIZE - DATA_QUEUE_MEMORY_APPENDIX_SIZE) {
89 return false;
90 }
91
92 allocSize = round_page(size + DATA_QUEUE_MEMORY_HEADER_SIZE + DATA_QUEUE_MEMORY_APPENDIX_SIZE);
fe8ab488 93
143464d5
A
94 if (allocSize < size) {
95 return false;
96 }
fe8ab488 97
143464d5 98 dataQueue = (IODataQueueMemory *)IOMallocAligned(allocSize, PAGE_SIZE);
2d21ac55
A
99 if (dataQueue == 0) {
100 return false;
101 }
a1c7dba1 102 bzero(dataQueue, allocSize);
2d21ac55
A
103
104 dataQueue->queueSize = size;
a1c7dba1
A
105// dataQueue->head = 0;
106// dataQueue->tail = 0;
fe8ab488 107
143464d5
A
108 if (!setQueueSize(size)) {
109 return false;
110 }
111
2d21ac55
A
112 appendix = (IODataQueueAppendix *)((UInt8 *)dataQueue + size + DATA_QUEUE_MEMORY_HEADER_SIZE);
113 appendix->version = 0;
a1c7dba1
A
114
115 if (!notifyMsg) {
116 notifyMsg = IOMalloc(sizeof(mach_msg_header_t));
117 if (!notifyMsg)
118 return false;
119 }
120 bzero(notifyMsg, sizeof(mach_msg_header_t));
121
2d21ac55
A
122 setNotificationPort(MACH_PORT_NULL);
123
124 return true;
125}
126
127void IOSharedDataQueue::free()
128{
129 if (dataQueue) {
143464d5 130 IOFreeAligned(dataQueue, round_page(getQueueSize() + DATA_QUEUE_MEMORY_HEADER_SIZE + DATA_QUEUE_MEMORY_APPENDIX_SIZE));
2d21ac55 131 dataQueue = NULL;
a1c7dba1
A
132 if (notifyMsg) {
133 IOFree(notifyMsg, sizeof(mach_msg_header_t));
134 notifyMsg = NULL;
135 }
2d21ac55
A
136 }
137
143464d5
A
138 if (_reserved) {
139 IOFree (_reserved, sizeof(struct ExpansionData));
140 _reserved = NULL;
fe8ab488 141 }
143464d5 142
2d21ac55
A
143 super::free();
144}
145
146IOMemoryDescriptor *IOSharedDataQueue::getMemoryDescriptor()
147{
148 IOMemoryDescriptor *descriptor = 0;
149
150 if (dataQueue != 0) {
143464d5 151 descriptor = IOMemoryDescriptor::withAddress(dataQueue, getQueueSize() + DATA_QUEUE_MEMORY_HEADER_SIZE + DATA_QUEUE_MEMORY_APPENDIX_SIZE, kIODirectionOutIn);
2d21ac55
A
152 }
153
154 return descriptor;
155}
156
157
158IODataQueueEntry * IOSharedDataQueue::peek()
159{
39037602
A
160 IODataQueueEntry *entry = 0;
161 UInt32 headOffset;
162 UInt32 tailOffset;
2d21ac55 163
39037602
A
164 if (!dataQueue) {
165 return NULL;
166 }
167
168 // Read head and tail with acquire barrier
169 headOffset = __c11_atomic_load((_Atomic UInt32 *)&dataQueue->head, __ATOMIC_RELAXED);
170 tailOffset = __c11_atomic_load((_Atomic UInt32 *)&dataQueue->tail, __ATOMIC_ACQUIRE);
171
172 if (headOffset != tailOffset) {
fe8ab488 173 IODataQueueEntry * head = 0;
2d21ac55
A
174 UInt32 headSize = 0;
175 UInt32 headOffset = dataQueue->head;
143464d5 176 UInt32 queueSize = getQueueSize();
39037602 177
143464d5
A
178 if (headOffset >= queueSize) {
179 return NULL;
180 }
39037602 181
fe8ab488
A
182 head = (IODataQueueEntry *)((char *)dataQueue->queue + headOffset);
183 headSize = head->size;
39037602 184
fe8ab488 185 // Check if there's enough room before the end of the queue for a header.
2d21ac55
A
186 // If there is room, check if there's enough room to hold the header and
187 // the data.
39037602 188
143464d5
A
189 if ((headOffset > UINT32_MAX - DATA_QUEUE_ENTRY_HEADER_SIZE) ||
190 (headOffset + DATA_QUEUE_ENTRY_HEADER_SIZE > queueSize) ||
191 (headOffset + DATA_QUEUE_ENTRY_HEADER_SIZE > UINT32_MAX - headSize) ||
192 (headOffset + headSize + DATA_QUEUE_ENTRY_HEADER_SIZE > queueSize)) {
2d21ac55 193 // No room for the header or the data, wrap to the beginning of the queue.
143464d5
A
194 // Note: wrapping even with the UINT32_MAX checks, as we have to support
195 // queueSize of UINT32_MAX
2d21ac55
A
196 entry = dataQueue->queue;
197 } else {
198 entry = head;
199 }
200 }
201
202 return entry;
203}
204
143464d5
A
205Boolean IOSharedDataQueue::enqueue(void * data, UInt32 dataSize)
206{
39037602
A
207 UInt32 head;
208 UInt32 tail;
209 UInt32 newTail;
143464d5
A
210 const UInt32 entrySize = dataSize + DATA_QUEUE_ENTRY_HEADER_SIZE;
211 IODataQueueEntry * entry;
212
39037602
A
213 // Force a single read of head and tail
214 head = __c11_atomic_load((_Atomic UInt32 *)&dataQueue->head, __ATOMIC_RELAXED);
215 tail = __c11_atomic_load((_Atomic UInt32 *)&dataQueue->tail, __ATOMIC_RELAXED);
216
143464d5
A
217 // Check for overflow of entrySize
218 if (dataSize > UINT32_MAX - DATA_QUEUE_ENTRY_HEADER_SIZE) {
219 return false;
220 }
221 // Check for underflow of (getQueueSize() - tail)
a1c7dba1 222 if (getQueueSize() < tail || getQueueSize() < head) {
143464d5
A
223 return false;
224 }
225
226 if ( tail >= head )
227 {
228 // Is there enough room at the end for the entry?
229 if ((entrySize <= UINT32_MAX - tail) &&
230 ((tail + entrySize) <= getQueueSize()) )
231 {
232 entry = (IODataQueueEntry *)((UInt8 *)dataQueue->queue + tail);
233
234 entry->size = dataSize;
235 memcpy(&entry->data, data, dataSize);
236
237 // The tail can be out of bound when the size of the new entry
238 // exactly matches the available space at the end of the queue.
239 // The tail can range from 0 to dataQueue->queueSize inclusive.
240
39037602 241 newTail = tail + entrySize;
143464d5
A
242 }
243 else if ( head > entrySize ) // Is there enough room at the beginning?
244 {
245 // Wrap around to the beginning, but do not allow the tail to catch
246 // up to the head.
247
248 dataQueue->queue->size = dataSize;
249
250 // We need to make sure that there is enough room to set the size before
251 // doing this. The user client checks for this and will look for the size
252 // at the beginning if there isn't room for it at the end.
253
254 if ( ( getQueueSize() - tail ) >= DATA_QUEUE_ENTRY_HEADER_SIZE )
255 {
256 ((IODataQueueEntry *)((UInt8 *)dataQueue->queue + tail))->size = dataSize;
257 }
258
259 memcpy(&dataQueue->queue->data, data, dataSize);
39037602 260 newTail = entrySize;
143464d5
A
261 }
262 else
263 {
264 return false; // queue is full
265 }
266 }
267 else
268 {
269 // Do not allow the tail to catch up to the head when the queue is full.
270 // That's why the comparison uses a '>' rather than '>='.
271
272 if ( (head - tail) > entrySize )
273 {
274 entry = (IODataQueueEntry *)((UInt8 *)dataQueue->queue + tail);
275
276 entry->size = dataSize;
277 memcpy(&entry->data, data, dataSize);
39037602 278 newTail = tail + entrySize;
143464d5
A
279 }
280 else
281 {
282 return false; // queue is full
283 }
284 }
39037602
A
285
286 // Update tail with release barrier
287 __c11_atomic_store((_Atomic UInt32 *)&dataQueue->tail, newTail, __ATOMIC_RELEASE);
143464d5
A
288
289 // Send notification (via mach message) that data is available.
290
39037602
A
291 if ( ( tail == head ) /* queue was empty prior to enqueue() */
292 || ( tail == __c11_atomic_load((_Atomic UInt32 *)&dataQueue->head, __ATOMIC_RELAXED) ) ) /* queue was emptied during enqueue() */
143464d5
A
293 {
294 sendDataAvailableNotification();
295 }
296
297 return true;
298}
299
2d21ac55
A
300Boolean IOSharedDataQueue::dequeue(void *data, UInt32 *dataSize)
301{
302 Boolean retVal = TRUE;
303 IODataQueueEntry * entry = 0;
304 UInt32 entrySize = 0;
39037602
A
305 UInt32 headOffset = 0;
306 UInt32 tailOffset = 0;
2d21ac55
A
307 UInt32 newHeadOffset = 0;
308
39037602
A
309 if (!dataQueue) {
310 return false;
311 }
312
313 // Read head and tail with acquire barrier
314 tailOffset = __c11_atomic_load((_Atomic UInt32 *)&dataQueue->tail, __ATOMIC_RELAXED);
315 headOffset = __c11_atomic_load((_Atomic UInt32 *)&dataQueue->head, __ATOMIC_ACQUIRE);
316
317 if (headOffset != tailOffset) {
318 IODataQueueEntry * head = 0;
319 UInt32 headSize = 0;
320 UInt32 queueSize = getQueueSize();
321
322 if (headOffset > queueSize) {
323 return false;
324 }
325
326 head = (IODataQueueEntry *)((char *)dataQueue->queue + headOffset);
327 headSize = head->size;
328
329 // we wrapped around to beginning, so read from there
330 // either there was not even room for the header
331 if ((headOffset > UINT32_MAX - DATA_QUEUE_ENTRY_HEADER_SIZE) ||
332 (headOffset + DATA_QUEUE_ENTRY_HEADER_SIZE > queueSize) ||
333 // or there was room for the header, but not for the data
334 (headOffset + DATA_QUEUE_ENTRY_HEADER_SIZE > UINT32_MAX - headSize) ||
335 (headOffset + headSize + DATA_QUEUE_ENTRY_HEADER_SIZE > queueSize)) {
336 // Note: we have to wrap to the beginning even with the UINT32_MAX checks
337 // because we have to support a queueSize of UINT32_MAX.
338 entry = dataQueue->queue;
339 entrySize = entry->size;
340 if ((entrySize > UINT32_MAX - DATA_QUEUE_ENTRY_HEADER_SIZE) ||
341 (entrySize + DATA_QUEUE_ENTRY_HEADER_SIZE > queueSize)) {
143464d5
A
342 return false;
343 }
39037602
A
344 newHeadOffset = entrySize + DATA_QUEUE_ENTRY_HEADER_SIZE;
345 // else it is at the end
346 } else {
347 entry = head;
348 entrySize = entry->size;
349 if ((entrySize > UINT32_MAX - DATA_QUEUE_ENTRY_HEADER_SIZE) ||
350 (entrySize + DATA_QUEUE_ENTRY_HEADER_SIZE > UINT32_MAX - headOffset) ||
351 (entrySize + DATA_QUEUE_ENTRY_HEADER_SIZE + headOffset > queueSize)) {
352 return false;
2d21ac55 353 }
39037602 354 newHeadOffset = headOffset + entrySize + DATA_QUEUE_ENTRY_HEADER_SIZE;
2d21ac55 355 }
39037602
A
356 }
357
358 if (entry) {
359 if (data) {
360 if (dataSize) {
361 if (entrySize <= *dataSize) {
362 memcpy(data, &(entry->data), entrySize);
363 __c11_atomic_store((_Atomic UInt32 *)&dataQueue->head, newHeadOffset, __ATOMIC_RELEASE);
2d21ac55
A
364 } else {
365 retVal = FALSE;
366 }
367 } else {
39037602 368 retVal = FALSE;
2d21ac55
A
369 }
370 } else {
39037602
A
371 __c11_atomic_store((_Atomic UInt32 *)&dataQueue->head, newHeadOffset, __ATOMIC_RELEASE);
372 }
373
374 if (dataSize) {
375 *dataSize = entrySize;
2d21ac55
A
376 }
377 } else {
378 retVal = FALSE;
379 }
380
381 return retVal;
382}
383
143464d5
A
384UInt32 IOSharedDataQueue::getQueueSize()
385{
386 if (!_reserved) {
387 return 0;
388 }
389 return _reserved->queueSize;
390}
391
392Boolean IOSharedDataQueue::setQueueSize(UInt32 size)
393{
394 if (!_reserved) {
395 return false;
396 }
397 _reserved->queueSize = size;
398 return true;
399}
2d21ac55
A
400
401OSMetaClassDefineReservedUnused(IOSharedDataQueue, 0);
402OSMetaClassDefineReservedUnused(IOSharedDataQueue, 1);
403OSMetaClassDefineReservedUnused(IOSharedDataQueue, 2);
404OSMetaClassDefineReservedUnused(IOSharedDataQueue, 3);
405OSMetaClassDefineReservedUnused(IOSharedDataQueue, 4);
406OSMetaClassDefineReservedUnused(IOSharedDataQueue, 5);
407OSMetaClassDefineReservedUnused(IOSharedDataQueue, 6);
408OSMetaClassDefineReservedUnused(IOSharedDataQueue, 7);