]> git.saurik.com Git - apple/xnu.git/blob - iokit/Kernel/IOSharedDataQueue.cpp
17656644ad104e1882528a49dc78cda2b0ac7b5d
[apple/xnu.git] / iokit / Kernel / IOSharedDataQueue.cpp
1 /*
2 * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <IOKit/IOSharedDataQueue.h>
30 #include <IOKit/IODataQueueShared.h>
31 #include <IOKit/IOLib.h>
32 #include <IOKit/IOMemoryDescriptor.h>
33
34 #ifdef enqueue
35 #undef enqueue
36 #endif
37
38 #ifdef dequeue
39 #undef dequeue
40 #endif
41
42 #define super IODataQueue
43
44 OSDefineMetaClassAndStructors(IOSharedDataQueue, IODataQueue)
45
46 IOSharedDataQueue *IOSharedDataQueue::withCapacity(UInt32 size)
47 {
48 IOSharedDataQueue *dataQueue = new IOSharedDataQueue;
49
50 if (dataQueue) {
51 if (!dataQueue->initWithCapacity(size)) {
52 dataQueue->release();
53 dataQueue = 0;
54 }
55 }
56
57 return dataQueue;
58 }
59
60 IOSharedDataQueue *IOSharedDataQueue::withEntries(UInt32 numEntries, UInt32 entrySize)
61 {
62 IOSharedDataQueue *dataQueue = new IOSharedDataQueue;
63
64 if (dataQueue) {
65 if (!dataQueue->initWithEntries(numEntries, entrySize)) {
66 dataQueue->release();
67 dataQueue = 0;
68 }
69 }
70
71 return dataQueue;
72 }
73
74 Boolean IOSharedDataQueue::initWithCapacity(UInt32 size)
75 {
76 IODataQueueAppendix * appendix;
77 vm_size_t allocSize;
78
79 if (!super::init()) {
80 return false;
81 }
82
83 _reserved = (ExpansionData *)IOMalloc(sizeof(struct ExpansionData));
84 if (!_reserved) {
85 return false;
86 }
87
88 if (size > UINT32_MAX - DATA_QUEUE_MEMORY_HEADER_SIZE - DATA_QUEUE_MEMORY_APPENDIX_SIZE) {
89 return false;
90 }
91
92 allocSize = round_page(size + DATA_QUEUE_MEMORY_HEADER_SIZE + DATA_QUEUE_MEMORY_APPENDIX_SIZE);
93
94 if (allocSize < size) {
95 return false;
96 }
97
98 dataQueue = (IODataQueueMemory *)IOMallocAligned(allocSize, PAGE_SIZE);
99 if (dataQueue == 0) {
100 return false;
101 }
102 bzero(dataQueue, allocSize);
103
104 dataQueue->queueSize = size;
105 // dataQueue->head = 0;
106 // dataQueue->tail = 0;
107
108 if (!setQueueSize(size)) {
109 return false;
110 }
111
112 appendix = (IODataQueueAppendix *)((UInt8 *)dataQueue + size + DATA_QUEUE_MEMORY_HEADER_SIZE);
113 appendix->version = 0;
114
115 if (!notifyMsg) {
116 notifyMsg = IOMalloc(sizeof(mach_msg_header_t));
117 if (!notifyMsg)
118 return false;
119 }
120 bzero(notifyMsg, sizeof(mach_msg_header_t));
121
122 setNotificationPort(MACH_PORT_NULL);
123
124 return true;
125 }
126
127 void IOSharedDataQueue::free()
128 {
129 if (dataQueue) {
130 IOFreeAligned(dataQueue, round_page(getQueueSize() + DATA_QUEUE_MEMORY_HEADER_SIZE + DATA_QUEUE_MEMORY_APPENDIX_SIZE));
131 dataQueue = NULL;
132 if (notifyMsg) {
133 IOFree(notifyMsg, sizeof(mach_msg_header_t));
134 notifyMsg = NULL;
135 }
136 }
137
138 if (_reserved) {
139 IOFree (_reserved, sizeof(struct ExpansionData));
140 _reserved = NULL;
141 }
142
143 super::free();
144 }
145
146 IOMemoryDescriptor *IOSharedDataQueue::getMemoryDescriptor()
147 {
148 IOMemoryDescriptor *descriptor = 0;
149
150 if (dataQueue != 0) {
151 descriptor = IOMemoryDescriptor::withAddress(dataQueue, getQueueSize() + DATA_QUEUE_MEMORY_HEADER_SIZE + DATA_QUEUE_MEMORY_APPENDIX_SIZE, kIODirectionOutIn);
152 }
153
154 return descriptor;
155 }
156
157
158 IODataQueueEntry * IOSharedDataQueue::peek()
159 {
160 IODataQueueEntry *entry = 0;
161 UInt32 headOffset;
162 UInt32 tailOffset;
163
164 if (!dataQueue) {
165 return NULL;
166 }
167
168 // Read head and tail with acquire barrier
169 // See rdar://problem/40780584 for an explanation of relaxed/acquire barriers
170 headOffset = __c11_atomic_load((_Atomic UInt32 *)&dataQueue->head, __ATOMIC_RELAXED);
171 tailOffset = __c11_atomic_load((_Atomic UInt32 *)&dataQueue->tail, __ATOMIC_ACQUIRE);
172
173 if (headOffset != tailOffset) {
174 IODataQueueEntry * head = 0;
175 UInt32 headSize = 0;
176 UInt32 headOffset = dataQueue->head;
177 UInt32 queueSize = getQueueSize();
178
179 if (headOffset >= queueSize) {
180 return NULL;
181 }
182
183 head = (IODataQueueEntry *)((char *)dataQueue->queue + headOffset);
184 headSize = head->size;
185
186 // Check if there's enough room before the end of the queue for a header.
187 // If there is room, check if there's enough room to hold the header and
188 // the data.
189
190 if ((headOffset > UINT32_MAX - DATA_QUEUE_ENTRY_HEADER_SIZE) ||
191 (headOffset + DATA_QUEUE_ENTRY_HEADER_SIZE > queueSize) ||
192 (headOffset + DATA_QUEUE_ENTRY_HEADER_SIZE > UINT32_MAX - headSize) ||
193 (headOffset + headSize + DATA_QUEUE_ENTRY_HEADER_SIZE > queueSize)) {
194 // No room for the header or the data, wrap to the beginning of the queue.
195 // Note: wrapping even with the UINT32_MAX checks, as we have to support
196 // queueSize of UINT32_MAX
197 entry = dataQueue->queue;
198 } else {
199 entry = head;
200 }
201 }
202
203 return entry;
204 }
205
206 Boolean IOSharedDataQueue::enqueue(void * data, UInt32 dataSize)
207 {
208 UInt32 head;
209 UInt32 tail;
210 UInt32 newTail;
211 const UInt32 entrySize = dataSize + DATA_QUEUE_ENTRY_HEADER_SIZE;
212 IODataQueueEntry * entry;
213
214 // Force a single read of head and tail
215 // See rdar://problem/40780584 for an explanation of relaxed/acquire barriers
216 tail = __c11_atomic_load((_Atomic UInt32 *)&dataQueue->tail, __ATOMIC_RELAXED);
217 head = __c11_atomic_load((_Atomic UInt32 *)&dataQueue->head, __ATOMIC_ACQUIRE);
218
219 // Check for overflow of entrySize
220 if (dataSize > UINT32_MAX - DATA_QUEUE_ENTRY_HEADER_SIZE) {
221 return false;
222 }
223 // Check for underflow of (getQueueSize() - tail)
224 if (getQueueSize() < tail || getQueueSize() < head) {
225 return false;
226 }
227
228 if ( tail >= head )
229 {
230 // Is there enough room at the end for the entry?
231 if ((entrySize <= UINT32_MAX - tail) &&
232 ((tail + entrySize) <= getQueueSize()) )
233 {
234 entry = (IODataQueueEntry *)((UInt8 *)dataQueue->queue + tail);
235
236 entry->size = dataSize;
237 memcpy(&entry->data, data, dataSize);
238
239 // The tail can be out of bound when the size of the new entry
240 // exactly matches the available space at the end of the queue.
241 // The tail can range from 0 to dataQueue->queueSize inclusive.
242
243 newTail = tail + entrySize;
244 }
245 else if ( head > entrySize ) // Is there enough room at the beginning?
246 {
247 // Wrap around to the beginning, but do not allow the tail to catch
248 // up to the head.
249
250 dataQueue->queue->size = dataSize;
251
252 // We need to make sure that there is enough room to set the size before
253 // doing this. The user client checks for this and will look for the size
254 // at the beginning if there isn't room for it at the end.
255
256 if ( ( getQueueSize() - tail ) >= DATA_QUEUE_ENTRY_HEADER_SIZE )
257 {
258 ((IODataQueueEntry *)((UInt8 *)dataQueue->queue + tail))->size = dataSize;
259 }
260
261 memcpy(&dataQueue->queue->data, data, dataSize);
262 newTail = entrySize;
263 }
264 else
265 {
266 return false; // queue is full
267 }
268 }
269 else
270 {
271 // Do not allow the tail to catch up to the head when the queue is full.
272 // That's why the comparison uses a '>' rather than '>='.
273
274 if ( (head - tail) > entrySize )
275 {
276 entry = (IODataQueueEntry *)((UInt8 *)dataQueue->queue + tail);
277
278 entry->size = dataSize;
279 memcpy(&entry->data, data, dataSize);
280 newTail = tail + entrySize;
281 }
282 else
283 {
284 return false; // queue is full
285 }
286 }
287
288 // Update tail with release barrier
289 __c11_atomic_store((_Atomic UInt32 *)&dataQueue->tail, newTail, __ATOMIC_RELEASE);
290
291 // Send notification (via mach message) that data is available.
292
293 if ( ( tail == head ) /* queue was empty prior to enqueue() */
294 || ( tail == __c11_atomic_load((_Atomic UInt32 *)&dataQueue->head, __ATOMIC_ACQUIRE) ) ) /* queue was emptied during enqueue() */
295 {
296 sendDataAvailableNotification();
297 }
298
299 return true;
300 }
301
302 Boolean IOSharedDataQueue::dequeue(void *data, UInt32 *dataSize)
303 {
304 Boolean retVal = TRUE;
305 IODataQueueEntry * entry = 0;
306 UInt32 entrySize = 0;
307 UInt32 headOffset = 0;
308 UInt32 tailOffset = 0;
309 UInt32 newHeadOffset = 0;
310
311 if (!dataQueue) {
312 return false;
313 }
314
315 // Read head and tail with acquire barrier
316 // See rdar://problem/40780584 for an explanation of relaxed/acquire barriers
317 headOffset = __c11_atomic_load((_Atomic UInt32 *)&dataQueue->head, __ATOMIC_RELAXED);
318 tailOffset = __c11_atomic_load((_Atomic UInt32 *)&dataQueue->tail, __ATOMIC_ACQUIRE);
319
320 if (headOffset != tailOffset) {
321 IODataQueueEntry * head = 0;
322 UInt32 headSize = 0;
323 UInt32 queueSize = getQueueSize();
324
325 if (headOffset > queueSize) {
326 return false;
327 }
328
329 head = (IODataQueueEntry *)((char *)dataQueue->queue + headOffset);
330 headSize = head->size;
331
332 // we wrapped around to beginning, so read from there
333 // either there was not even room for the header
334 if ((headOffset > UINT32_MAX - DATA_QUEUE_ENTRY_HEADER_SIZE) ||
335 (headOffset + DATA_QUEUE_ENTRY_HEADER_SIZE > queueSize) ||
336 // or there was room for the header, but not for the data
337 (headOffset + DATA_QUEUE_ENTRY_HEADER_SIZE > UINT32_MAX - headSize) ||
338 (headOffset + headSize + DATA_QUEUE_ENTRY_HEADER_SIZE > queueSize)) {
339 // Note: we have to wrap to the beginning even with the UINT32_MAX checks
340 // because we have to support a queueSize of UINT32_MAX.
341 entry = dataQueue->queue;
342 entrySize = entry->size;
343 if ((entrySize > UINT32_MAX - DATA_QUEUE_ENTRY_HEADER_SIZE) ||
344 (entrySize + DATA_QUEUE_ENTRY_HEADER_SIZE > queueSize)) {
345 return false;
346 }
347 newHeadOffset = entrySize + DATA_QUEUE_ENTRY_HEADER_SIZE;
348 // else it is at the end
349 } else {
350 entry = head;
351 entrySize = entry->size;
352 if ((entrySize > UINT32_MAX - DATA_QUEUE_ENTRY_HEADER_SIZE) ||
353 (entrySize + DATA_QUEUE_ENTRY_HEADER_SIZE > UINT32_MAX - headOffset) ||
354 (entrySize + DATA_QUEUE_ENTRY_HEADER_SIZE + headOffset > queueSize)) {
355 return false;
356 }
357 newHeadOffset = headOffset + entrySize + DATA_QUEUE_ENTRY_HEADER_SIZE;
358 }
359 }
360
361 if (entry) {
362 if (data) {
363 if (dataSize) {
364 if (entrySize <= *dataSize) {
365 memcpy(data, &(entry->data), entrySize);
366 __c11_atomic_store((_Atomic UInt32 *)&dataQueue->head, newHeadOffset, __ATOMIC_RELEASE);
367 } else {
368 retVal = FALSE;
369 }
370 } else {
371 retVal = FALSE;
372 }
373 } else {
374 __c11_atomic_store((_Atomic UInt32 *)&dataQueue->head, newHeadOffset, __ATOMIC_RELEASE);
375 }
376
377 if (dataSize) {
378 *dataSize = entrySize;
379 }
380 } else {
381 retVal = FALSE;
382 }
383
384 return retVal;
385 }
386
387 UInt32 IOSharedDataQueue::getQueueSize()
388 {
389 if (!_reserved) {
390 return 0;
391 }
392 return _reserved->queueSize;
393 }
394
395 Boolean IOSharedDataQueue::setQueueSize(UInt32 size)
396 {
397 if (!_reserved) {
398 return false;
399 }
400 _reserved->queueSize = size;
401 return true;
402 }
403
404 OSMetaClassDefineReservedUnused(IOSharedDataQueue, 0);
405 OSMetaClassDefineReservedUnused(IOSharedDataQueue, 1);
406 OSMetaClassDefineReservedUnused(IOSharedDataQueue, 2);
407 OSMetaClassDefineReservedUnused(IOSharedDataQueue, 3);
408 OSMetaClassDefineReservedUnused(IOSharedDataQueue, 4);
409 OSMetaClassDefineReservedUnused(IOSharedDataQueue, 5);
410 OSMetaClassDefineReservedUnused(IOSharedDataQueue, 6);
411 OSMetaClassDefineReservedUnused(IOSharedDataQueue, 7);