]> git.saurik.com Git - apple/xnu.git/blob - iokit/System/IODataQueueDispatchSourceShared.h
xnu-6153.141.1.tar.gz
[apple/xnu.git] / iokit / System / IODataQueueDispatchSourceShared.h
1 typedef struct _IODataQueueEntry {
2 uint32_t size;
3 uint8_t data[0];
4 } IODataQueueEntry;
5
6 #define DATA_QUEUE_ENTRY_HEADER_SIZE sizeof(IODataQueueEntry)
7
8 typedef struct _IODataQueueMemory {
9 volatile uint32_t head;
10 volatile uint32_t tail;
11 volatile uint8_t needServicedCallback;
12 volatile uint8_t _resv[31];
13 IODataQueueEntry queue[0];
14 } IODataQueueMemory;
15
16 struct IODataQueueDispatchSource_IVars {
17 IODataQueueMemory * dataQueue;
18 IODataQueueDispatchSource * source;
19 // IODispatchQueue * queue;
20 IOMemoryDescriptor * memory;
21 OSAction * dataAvailableAction;
22 OSAction * dataServicedAction;
23 uint64_t options;
24 uint32_t queueByteCount;
25
26 #if !KERNEL
27 bool enable;
28 bool canceled;
29 #endif
30 };
31
32 bool
33 IODataQueueDispatchSource::init()
34 {
35 if (!super::init()) {
36 return false;
37 }
38
39 ivars = IONewZero(IODataQueueDispatchSource_IVars, 1);
40 ivars->source = this;
41
42 #if !KERNEL
43 kern_return_t ret;
44
45 ret = CopyMemory(&ivars->memory);
46 assert(kIOReturnSuccess == ret);
47
48 uint64_t address;
49 uint64_t length;
50
51 ret = ivars->memory->Map(0, 0, 0, 0, &address, &length);
52 assert(kIOReturnSuccess == ret);
53 ivars->dataQueue = (typeof(ivars->dataQueue))(uintptr_t) address;
54 ivars->queueByteCount = length;
55 #endif
56
57 return true;
58 }
59
60 kern_return_t
61 IMPL(IODataQueueDispatchSource, CheckForWork)
62 {
63 IOReturn ret = kIOReturnNotReady;
64
65 return ret;
66 }
67
68 #if KERNEL
69
70 kern_return_t
71 IMPL(IODataQueueDispatchSource, Create)
72 {
73 IODataQueueDispatchSource * inst;
74 IOBufferMemoryDescriptor * bmd;
75
76 if (3 & queueByteCount) {
77 return kIOReturnBadArgument;
78 }
79 inst = OSTypeAlloc(IODataQueueDispatchSource);
80 if (!inst) {
81 return kIOReturnNoMemory;
82 }
83 if (!inst->init()) {
84 inst->release();
85 return kIOReturnError;
86 }
87
88 bmd = IOBufferMemoryDescriptor::withOptions(
89 kIODirectionOutIn | kIOMemoryKernelUserShared,
90 queueByteCount, page_size);
91 if (!bmd) {
92 inst->release();
93 return kIOReturnNoMemory;
94 }
95 inst->ivars->memory = bmd;
96 inst->ivars->queueByteCount = queueByteCount;
97 inst->ivars->options = 0;
98 inst->ivars->dataQueue = (typeof(inst->ivars->dataQueue))bmd->getBytesNoCopy();
99
100 *source = inst;
101
102 return kIOReturnSuccess;
103 }
104
105 kern_return_t
106 IMPL(IODataQueueDispatchSource, CopyMemory)
107 {
108 kern_return_t ret;
109 IOMemoryDescriptor * result;
110
111 result = ivars->memory;
112 if (result) {
113 result->retain();
114 ret = kIOReturnSuccess;
115 } else {
116 ret = kIOReturnNotReady;
117 }
118 *memory = result;
119
120 return ret;
121 }
122
123 kern_return_t
124 IMPL(IODataQueueDispatchSource, CopyDataAvailableHandler)
125 {
126 kern_return_t ret;
127 OSAction * result;
128
129 result = ivars->dataAvailableAction;
130 if (result) {
131 result->retain();
132 ret = kIOReturnSuccess;
133 } else {
134 ret = kIOReturnNotReady;
135 }
136 *action = result;
137
138 return ret;
139 }
140
141 kern_return_t
142 IMPL(IODataQueueDispatchSource, CopyDataServicedHandler)
143 {
144 kern_return_t ret;
145 OSAction * result;
146
147 result = ivars->dataServicedAction;
148 if (result) {
149 result->retain();
150 ret = kIOReturnSuccess;
151 } else {
152 ret = kIOReturnNotReady;
153 }
154 *action = result;
155 return ret;
156 }
157
158 kern_return_t
159 IMPL(IODataQueueDispatchSource, SetDataAvailableHandler)
160 {
161 IOReturn ret;
162 OSAction * oldAction;
163
164 oldAction = ivars->dataAvailableAction;
165 if (oldAction && OSCompareAndSwapPtr(oldAction, NULL, &ivars->dataAvailableAction)) {
166 oldAction->release();
167 }
168 if (action) {
169 action->retain();
170 ivars->dataAvailableAction = action;
171 if (IsDataAvailable()) {
172 DataAvailable(ivars->dataAvailableAction);
173 }
174 }
175 ret = kIOReturnSuccess;
176
177 return ret;
178 }
179
180 kern_return_t
181 IMPL(IODataQueueDispatchSource, SetDataServicedHandler)
182 {
183 IOReturn ret;
184 OSAction * oldAction;
185
186 oldAction = ivars->dataServicedAction;
187 if (oldAction && OSCompareAndSwapPtr(oldAction, NULL, &ivars->dataServicedAction)) {
188 oldAction->release();
189 }
190 if (action) {
191 action->retain();
192 ivars->dataServicedAction = action;
193 }
194 ret = kIOReturnSuccess;
195
196 return ret;
197 }
198
199 #endif /* KERNEL */
200
201 void
202 IODataQueueDispatchSource::SendDataAvailable(void)
203 {
204 IOReturn ret;
205
206 if (!ivars->dataAvailableAction) {
207 ret = CopyDataAvailableHandler(&ivars->dataAvailableAction);
208 if (kIOReturnSuccess != ret) {
209 ivars->dataAvailableAction = NULL;
210 }
211 }
212 if (ivars->dataAvailableAction) {
213 DataAvailable(ivars->dataAvailableAction);
214 }
215 }
216
217 void
218 IODataQueueDispatchSource::SendDataServiced(void)
219 {
220 IOReturn ret;
221
222 if (!ivars->dataServicedAction) {
223 ret = CopyDataServicedHandler(&ivars->dataServicedAction);
224 if (kIOReturnSuccess != ret) {
225 ivars->dataServicedAction = NULL;
226 }
227 }
228 if (ivars->dataServicedAction) {
229 ivars->dataQueue->needServicedCallback = false;
230 DataServiced(ivars->dataServicedAction);
231 }
232 }
233
234 kern_return_t
235 IMPL(IODataQueueDispatchSource, SetEnableWithCompletion)
236 {
237 IOReturn ret;
238
239 #if !KERNEL
240 ivars->enable = enable;
241 #endif
242
243 ret = kIOReturnSuccess;
244 return ret;
245 }
246
247 void
248 IODataQueueDispatchSource::free()
249 {
250 OSSafeReleaseNULL(ivars->memory);
251 OSSafeReleaseNULL(ivars->dataAvailableAction);
252 OSSafeReleaseNULL(ivars->dataServicedAction);
253 IOSafeDeleteNULL(ivars, IODataQueueDispatchSource_IVars, 1);
254 super::free();
255 }
256
257 kern_return_t
258 IMPL(IODataQueueDispatchSource, Cancel)
259 {
260 return kIOReturnSuccess;
261 }
262
263 bool
264 IODataQueueDispatchSource::IsDataAvailable(void)
265 {
266 IODataQueueMemory *dataQueue = ivars->dataQueue;
267
268 return dataQueue && (dataQueue->head != dataQueue->tail);
269 }
270
271 kern_return_t
272 IODataQueueDispatchSource::Peek(IODataQueueClientDequeueEntryBlock callback)
273 {
274 IODataQueueEntry * entry = NULL;
275 IODataQueueMemory * dataQueue;
276 uint32_t callerDataSize;
277 uint32_t dataSize;
278 uint32_t headOffset;
279 uint32_t tailOffset;
280
281 dataQueue = ivars->dataQueue;
282 if (!dataQueue) {
283 return kIOReturnNoMemory;
284 }
285
286 // Read head and tail with acquire barrier
287 headOffset = __c11_atomic_load((_Atomic uint32_t *)&dataQueue->head, __ATOMIC_RELAXED);
288 tailOffset = __c11_atomic_load((_Atomic uint32_t *)&dataQueue->tail, __ATOMIC_ACQUIRE);
289
290 if (headOffset != tailOffset) {
291 IODataQueueEntry * head = NULL;
292 uint32_t headSize = 0;
293 uint32_t queueSize = ivars->queueByteCount;
294
295 if (headOffset > queueSize) {
296 return kIOReturnError;
297 }
298
299 head = (IODataQueueEntry *)((uintptr_t)dataQueue->queue + headOffset);
300 callerDataSize = head->size;
301 if (os_add_overflow(3, callerDataSize, &headSize)) {
302 return kIOReturnError;
303 }
304 headSize &= ~3U;
305
306 // Check if there's enough room before the end of the queue for a header.
307 // If there is room, check if there's enough room to hold the header and
308 // the data.
309
310 if ((headOffset > UINT32_MAX - DATA_QUEUE_ENTRY_HEADER_SIZE) ||
311 (headOffset + DATA_QUEUE_ENTRY_HEADER_SIZE > queueSize) ||
312 (headOffset + DATA_QUEUE_ENTRY_HEADER_SIZE > UINT32_MAX - headSize) ||
313 (headOffset + headSize + DATA_QUEUE_ENTRY_HEADER_SIZE > queueSize)) {
314 // No room for the header or the data, wrap to the beginning of the queue.
315 // Note: wrapping even with the UINT32_MAX checks, as we have to support
316 // queueSize of UINT32_MAX
317 entry = dataQueue->queue;
318 callerDataSize = entry->size;
319 dataSize = entry->size;
320 if (os_add_overflow(3, callerDataSize, &dataSize)) {
321 return kIOReturnError;
322 }
323 dataSize &= ~3U;
324
325 if ((dataSize > UINT32_MAX - DATA_QUEUE_ENTRY_HEADER_SIZE) ||
326 (dataSize + DATA_QUEUE_ENTRY_HEADER_SIZE > queueSize)) {
327 return kIOReturnError;
328 }
329
330 callback(&entry->data, callerDataSize);
331 return kIOReturnSuccess;
332 } else {
333 callback(&head->data, callerDataSize);
334 return kIOReturnSuccess;
335 }
336 }
337
338 return kIOReturnUnderrun;
339 }
340
341 kern_return_t
342 IODataQueueDispatchSource::Dequeue(IODataQueueClientDequeueEntryBlock callback)
343 {
344 kern_return_t ret;
345 bool sendDataServiced;
346
347 sendDataServiced = false;
348 ret = DequeueWithCoalesce(&sendDataServiced, callback);
349 if (sendDataServiced) {
350 SendDataServiced();
351 }
352 return ret;
353 }
354
355 kern_return_t
356 IODataQueueDispatchSource::DequeueWithCoalesce(bool * sendDataServiced,
357 IODataQueueClientDequeueEntryBlock callback)
358 {
359 IOReturn retVal = kIOReturnSuccess;
360 IODataQueueEntry * entry = NULL;
361 IODataQueueMemory * dataQueue;
362 uint32_t callerDataSize;
363 uint32_t dataSize = 0;
364 uint32_t headOffset = 0;
365 uint32_t tailOffset = 0;
366 uint32_t newHeadOffset = 0;
367
368 dataQueue = ivars->dataQueue;
369 if (!dataQueue) {
370 return kIOReturnNoMemory;
371 }
372
373 // Read head and tail with acquire barrier
374 headOffset = __c11_atomic_load((_Atomic uint32_t *)&dataQueue->head, __ATOMIC_RELAXED);
375 tailOffset = __c11_atomic_load((_Atomic uint32_t *)&dataQueue->tail, __ATOMIC_ACQUIRE);
376
377 if (headOffset != tailOffset) {
378 IODataQueueEntry * head = NULL;
379 uint32_t headSize = 0;
380 uint32_t queueSize = ivars->queueByteCount;
381
382 if (headOffset > queueSize) {
383 return kIOReturnError;
384 }
385
386 head = (IODataQueueEntry *)((uintptr_t)dataQueue->queue + headOffset);
387 callerDataSize = head->size;
388 if (os_add_overflow(3, callerDataSize, &headSize)) {
389 return kIOReturnError;
390 }
391 headSize &= ~3U;
392
393 // we wrapped around to beginning, so read from there
394 // either there was not even room for the header
395 if ((headOffset > UINT32_MAX - DATA_QUEUE_ENTRY_HEADER_SIZE) ||
396 (headOffset + DATA_QUEUE_ENTRY_HEADER_SIZE > queueSize) ||
397 // or there was room for the header, but not for the data
398 (headOffset + DATA_QUEUE_ENTRY_HEADER_SIZE > UINT32_MAX - headSize) ||
399 (headOffset + headSize + DATA_QUEUE_ENTRY_HEADER_SIZE > queueSize)) {
400 // Note: we have to wrap to the beginning even with the UINT32_MAX checks
401 // because we have to support a queueSize of UINT32_MAX.
402 entry = dataQueue->queue;
403 callerDataSize = entry->size;
404
405 if (os_add_overflow(callerDataSize, 3, &dataSize)) {
406 return kIOReturnError;
407 }
408 dataSize &= ~3U;
409 if ((dataSize > UINT32_MAX - DATA_QUEUE_ENTRY_HEADER_SIZE) ||
410 (dataSize + DATA_QUEUE_ENTRY_HEADER_SIZE > queueSize)) {
411 return kIOReturnError;
412 }
413 newHeadOffset = dataSize + DATA_QUEUE_ENTRY_HEADER_SIZE;
414 // else it is at the end
415 } else {
416 entry = head;
417
418 if ((headSize > UINT32_MAX - DATA_QUEUE_ENTRY_HEADER_SIZE) ||
419 (headSize + DATA_QUEUE_ENTRY_HEADER_SIZE > UINT32_MAX - headOffset) ||
420 (headSize + DATA_QUEUE_ENTRY_HEADER_SIZE + headOffset > queueSize)) {
421 return kIOReturnError;
422 }
423 newHeadOffset = headOffset + headSize + DATA_QUEUE_ENTRY_HEADER_SIZE;
424 }
425 } else {
426 // empty queue
427 if (dataQueue->needServicedCallback) {
428 *sendDataServiced = true;
429 }
430 return kIOReturnUnderrun;
431 }
432
433 callback(&entry->data, callerDataSize);
434 if (dataQueue->needServicedCallback) {
435 *sendDataServiced = true;
436 }
437
438 __c11_atomic_store((_Atomic uint32_t *)&dataQueue->head, newHeadOffset, __ATOMIC_RELEASE);
439
440 if (newHeadOffset == tailOffset) {
441 //
442 // If we are making the queue empty, then we need to make sure
443 // that either the enqueuer notices, or we notice the enqueue
444 // that raced with our making of the queue empty.
445 //
446 __c11_atomic_thread_fence(__ATOMIC_SEQ_CST);
447 }
448
449 return retVal;
450 }
451
452 kern_return_t
453 IODataQueueDispatchSource::Enqueue(uint32_t callerDataSize,
454 IODataQueueClientEnqueueEntryBlock callback)
455 {
456 kern_return_t ret;
457 bool sendDataAvailable;
458
459 sendDataAvailable = false;
460 ret = EnqueueWithCoalesce(callerDataSize, &sendDataAvailable, callback);
461 if (sendDataAvailable) {
462 SendDataAvailable();
463 }
464 return ret;
465 }
466
467 kern_return_t
468 IODataQueueDispatchSource::EnqueueWithCoalesce(uint32_t callerDataSize,
469 bool * sendDataAvailable,
470 IODataQueueClientEnqueueEntryBlock callback)
471 {
472 IODataQueueMemory * dataQueue;
473 IODataQueueEntry * entry;
474 uint32_t head;
475 uint32_t tail;
476 uint32_t newTail;
477 uint32_t dataSize;
478 uint32_t queueSize;
479 uint32_t entrySize;
480 IOReturn retVal = kIOReturnSuccess;
481
482 dataQueue = ivars->dataQueue;
483 if (!dataQueue) {
484 return kIOReturnNoMemory;
485 }
486 queueSize = ivars->queueByteCount;
487
488 // Force a single read of head and tail
489 tail = __c11_atomic_load((_Atomic uint32_t *)&dataQueue->tail, __ATOMIC_RELAXED);
490 head = __c11_atomic_load((_Atomic uint32_t *)&dataQueue->head, __ATOMIC_ACQUIRE);
491
492 if (os_add_overflow(callerDataSize, 3, &dataSize)) {
493 return kIOReturnOverrun;
494 }
495 dataSize &= ~3U;
496
497 // Check for overflow of entrySize
498 if (os_add_overflow(DATA_QUEUE_ENTRY_HEADER_SIZE, dataSize, &entrySize)) {
499 return kIOReturnOverrun;
500 }
501
502 // Check for underflow of (getQueueSize() - tail)
503 if (queueSize < tail || queueSize < head) {
504 return kIOReturnUnderrun;
505 }
506
507 newTail = tail;
508 if (tail >= head) {
509 // Is there enough room at the end for the entry?
510 if ((entrySize <= (UINT32_MAX - tail)) &&
511 ((tail + entrySize) <= queueSize)) {
512 entry = (IODataQueueEntry *)((uintptr_t)dataQueue->queue + tail);
513
514 callback(&entry->data, callerDataSize);
515
516 entry->size = callerDataSize;
517
518 // The tail can be out of bound when the size of the new entry
519 // exactly matches the available space at the end of the queue.
520 // The tail can range from 0 to queueSize inclusive.
521
522 newTail = tail + entrySize;
523 } else if (head > entrySize) { // Is there enough room at the beginning?
524 entry = (IODataQueueEntry *)((uintptr_t)dataQueue->queue);
525
526 callback(&entry->data, callerDataSize);
527
528 // Wrap around to the beginning, but do not allow the tail to catch
529 // up to the head.
530
531 entry->size = callerDataSize;
532
533 // We need to make sure that there is enough room to set the size before
534 // doing this. The user client checks for this and will look for the size
535 // at the beginning if there isn't room for it at the end.
536
537 if ((queueSize - tail) >= DATA_QUEUE_ENTRY_HEADER_SIZE) {
538 ((IODataQueueEntry *)((uintptr_t)dataQueue->queue + tail))->size = dataSize;
539 }
540
541 newTail = entrySize;
542 } else {
543 retVal = kIOReturnOverrun; // queue is full
544 }
545 } else {
546 // Do not allow the tail to catch up to the head when the queue is full.
547 // That's why the comparison uses a '>' rather than '>='.
548
549 if ((head - tail) > entrySize) {
550 entry = (IODataQueueEntry *)((uintptr_t)dataQueue->queue + tail);
551
552 callback(&entry->data, callerDataSize);
553
554 entry->size = callerDataSize;
555
556 newTail = tail + entrySize;
557 } else {
558 retVal = kIOReturnOverrun; // queue is full
559 }
560 }
561
562 // Send notification (via mach message) that data is available.
563
564 if (retVal == kIOReturnSuccess) {
565 // Publish the data we just enqueued
566 __c11_atomic_store((_Atomic uint32_t *)&dataQueue->tail, newTail, __ATOMIC_RELEASE);
567
568 if (tail != head) {
569 //
570 // The memory barrier below pairs with the one in dequeue
571 // so that either our store to the tail cannot be missed by
572 // the next dequeue attempt, or we will observe the dequeuer
573 // making the queue empty.
574 //
575 // Of course, if we already think the queue is empty,
576 // there's no point paying this extra cost.
577 //
578 __c11_atomic_thread_fence(__ATOMIC_SEQ_CST);
579 head = __c11_atomic_load((_Atomic uint32_t *)&dataQueue->head, __ATOMIC_RELAXED);
580 }
581
582 if (tail == head) {
583 // Send notification that data is now available.
584 *sendDataAvailable = true;
585 retVal = kIOReturnSuccess;
586 }
587 } else if (retVal == kIOReturnOverrun) {
588 // ask to be notified of Dequeue()
589 dataQueue->needServicedCallback = true;
590 *sendDataAvailable = true;
591 }
592
593 return retVal;
594 }