]> git.saurik.com Git - apple/xnu.git/blame_incremental - iokit/System/IODataQueueDispatchSourceShared.h
xnu-7195.50.7.100.1.tar.gz
[apple/xnu.git] / iokit / System / IODataQueueDispatchSourceShared.h
... / ...
CommitLineData
1typedef struct _IODataQueueEntry {
2 uint32_t size;
3 uint8_t data[0];
4} IODataQueueEntry;
5
6#define DATA_QUEUE_ENTRY_HEADER_SIZE sizeof(IODataQueueEntry)
7
8typedef struct _IODataQueueMemory {
9 volatile uint32_t head;
10 volatile uint32_t tail;
11 volatile uint8_t needServicedCallback;
12 volatile uint8_t _resv[31];
13 IODataQueueEntry queue[0];
14} IODataQueueMemory;
15
16struct IODataQueueDispatchSource_IVars {
17 IODataQueueMemory * dataQueue;
18 IODataQueueDispatchSource * source;
19// IODispatchQueue * queue;
20 IOMemoryDescriptor * memory;
21 OSAction * dataAvailableAction;
22 OSAction * dataServicedAction;
23 uint64_t options;
24 uint32_t queueByteCount;
25
26#if !KERNEL
27 bool enable;
28 bool canceled;
29#endif
30};
31
32bool
33IODataQueueDispatchSource::init()
34{
35 if (!super::init()) {
36 return false;
37 }
38
39 ivars = IONewZero(IODataQueueDispatchSource_IVars, 1);
40 ivars->source = this;
41
42#if !KERNEL
43 kern_return_t ret;
44
45 ret = CopyMemory(&ivars->memory);
46 assert(kIOReturnSuccess == ret);
47
48 uint64_t address;
49 uint64_t length;
50
51 ret = ivars->memory->Map(0, 0, 0, 0, &address, &length);
52 assert(kIOReturnSuccess == ret);
53 ivars->dataQueue = (typeof(ivars->dataQueue))(uintptr_t) address;
54 ivars->queueByteCount = length;
55#endif
56
57 return true;
58}
59
60kern_return_t
61IODataQueueDispatchSource::CheckForWork_Impl(
62 const IORPC rpc,
63 bool synchronous)
64{
65 IOReturn ret = kIOReturnNotReady;
66
67 return ret;
68}
69
70#if KERNEL
71
72kern_return_t
73IODataQueueDispatchSource::Create_Impl(
74 uint64_t queueByteCount,
75 IODispatchQueue * queue,
76 IODataQueueDispatchSource ** source)
77{
78 IODataQueueDispatchSource * inst;
79 IOBufferMemoryDescriptor * bmd;
80
81 if (3 & queueByteCount) {
82 return kIOReturnBadArgument;
83 }
84 if (queueByteCount > UINT_MAX) {
85 return kIOReturnBadArgument;
86 }
87 inst = OSTypeAlloc(IODataQueueDispatchSource);
88 if (!inst) {
89 return kIOReturnNoMemory;
90 }
91 if (!inst->init()) {
92 inst->release();
93 return kIOReturnError;
94 }
95
96 bmd = IOBufferMemoryDescriptor::withOptions(
97 kIODirectionOutIn | kIOMemoryKernelUserShared,
98 queueByteCount, page_size);
99 if (!bmd) {
100 inst->release();
101 return kIOReturnNoMemory;
102 }
103 inst->ivars->memory = bmd;
104 inst->ivars->queueByteCount = ((uint32_t) queueByteCount);
105 inst->ivars->options = 0;
106 inst->ivars->dataQueue = (typeof(inst->ivars->dataQueue))bmd->getBytesNoCopy();
107
108 *source = inst;
109
110 return kIOReturnSuccess;
111}
112
113kern_return_t
114IODataQueueDispatchSource::CopyMemory_Impl(
115 IOMemoryDescriptor ** memory)
116{
117 kern_return_t ret;
118 IOMemoryDescriptor * result;
119
120 result = ivars->memory;
121 if (result) {
122 result->retain();
123 ret = kIOReturnSuccess;
124 } else {
125 ret = kIOReturnNotReady;
126 }
127 *memory = result;
128
129 return ret;
130}
131
132kern_return_t
133IODataQueueDispatchSource::CopyDataAvailableHandler_Impl(
134 OSAction ** action)
135{
136 kern_return_t ret;
137 OSAction * result;
138
139 result = ivars->dataAvailableAction;
140 if (result) {
141 result->retain();
142 ret = kIOReturnSuccess;
143 } else {
144 ret = kIOReturnNotReady;
145 }
146 *action = result;
147
148 return ret;
149}
150
151kern_return_t
152IODataQueueDispatchSource::CopyDataServicedHandler_Impl(
153 OSAction ** action)
154{
155 kern_return_t ret;
156 OSAction * result;
157
158 result = ivars->dataServicedAction;
159 if (result) {
160 result->retain();
161 ret = kIOReturnSuccess;
162 } else {
163 ret = kIOReturnNotReady;
164 }
165 *action = result;
166 return ret;
167}
168
169kern_return_t
170IODataQueueDispatchSource::SetDataAvailableHandler_Impl(
171 OSAction * action)
172{
173 IOReturn ret;
174 OSAction * oldAction;
175
176 oldAction = ivars->dataAvailableAction;
177 if (oldAction && OSCompareAndSwapPtr(oldAction, NULL, &ivars->dataAvailableAction)) {
178 oldAction->release();
179 }
180 if (action) {
181 action->retain();
182 ivars->dataAvailableAction = action;
183 if (IsDataAvailable()) {
184 DataAvailable(ivars->dataAvailableAction);
185 }
186 }
187 ret = kIOReturnSuccess;
188
189 return ret;
190}
191
192kern_return_t
193IODataQueueDispatchSource::SetDataServicedHandler_Impl(
194 OSAction * action)
195{
196 IOReturn ret;
197 OSAction * oldAction;
198
199 oldAction = ivars->dataServicedAction;
200 if (oldAction && OSCompareAndSwapPtr(oldAction, NULL, &ivars->dataServicedAction)) {
201 oldAction->release();
202 }
203 if (action) {
204 action->retain();
205 ivars->dataServicedAction = action;
206 }
207 ret = kIOReturnSuccess;
208
209 return ret;
210}
211
212#endif /* KERNEL */
213
214void
215IODataQueueDispatchSource::SendDataAvailable(void)
216{
217 IOReturn ret;
218
219 if (!ivars->dataAvailableAction) {
220 ret = CopyDataAvailableHandler(&ivars->dataAvailableAction);
221 if (kIOReturnSuccess != ret) {
222 ivars->dataAvailableAction = NULL;
223 }
224 }
225 if (ivars->dataAvailableAction) {
226 DataAvailable(ivars->dataAvailableAction);
227 }
228}
229
230void
231IODataQueueDispatchSource::SendDataServiced(void)
232{
233 IOReturn ret;
234
235 if (!ivars->dataServicedAction) {
236 ret = CopyDataServicedHandler(&ivars->dataServicedAction);
237 if (kIOReturnSuccess != ret) {
238 ivars->dataServicedAction = NULL;
239 }
240 }
241 if (ivars->dataServicedAction) {
242 ivars->dataQueue->needServicedCallback = false;
243 DataServiced(ivars->dataServicedAction);
244 }
245}
246
247kern_return_t
248IODataQueueDispatchSource::SetEnableWithCompletion_Impl(
249 bool enable,
250 IODispatchSourceCancelHandler handler)
251{
252 IOReturn ret;
253
254#if !KERNEL
255 ivars->enable = enable;
256#endif
257
258 ret = kIOReturnSuccess;
259 return ret;
260}
261
262void
263IODataQueueDispatchSource::free()
264{
265 OSSafeReleaseNULL(ivars->memory);
266 OSSafeReleaseNULL(ivars->dataAvailableAction);
267 OSSafeReleaseNULL(ivars->dataServicedAction);
268 IOSafeDeleteNULL(ivars, IODataQueueDispatchSource_IVars, 1);
269 super::free();
270}
271
272kern_return_t
273IODataQueueDispatchSource::Cancel_Impl(
274 IODispatchSourceCancelHandler handler)
275{
276 return kIOReturnSuccess;
277}
278
279bool
280IODataQueueDispatchSource::IsDataAvailable(void)
281{
282 IODataQueueMemory *dataQueue = ivars->dataQueue;
283
284 return dataQueue && (dataQueue->head != dataQueue->tail);
285}
286
287kern_return_t
288IODataQueueDispatchSource::Peek(IODataQueueClientDequeueEntryBlock callback)
289{
290 IODataQueueEntry * entry = NULL;
291 IODataQueueMemory * dataQueue;
292 uint32_t callerDataSize;
293 uint32_t dataSize;
294 uint32_t headOffset;
295 uint32_t tailOffset;
296
297 dataQueue = ivars->dataQueue;
298 if (!dataQueue) {
299 return kIOReturnNoMemory;
300 }
301
302 // Read head and tail with acquire barrier
303 headOffset = __c11_atomic_load((_Atomic uint32_t *)&dataQueue->head, __ATOMIC_RELAXED);
304 tailOffset = __c11_atomic_load((_Atomic uint32_t *)&dataQueue->tail, __ATOMIC_ACQUIRE);
305
306 if (headOffset != tailOffset) {
307 IODataQueueEntry * head = NULL;
308 uint32_t headSize = 0;
309 uint32_t queueSize = ivars->queueByteCount;
310
311 if (headOffset > queueSize) {
312 return kIOReturnError;
313 }
314
315 head = (IODataQueueEntry *)((uintptr_t)dataQueue->queue + headOffset);
316 callerDataSize = head->size;
317 if (os_add_overflow(3, callerDataSize, &headSize)) {
318 return kIOReturnError;
319 }
320 headSize &= ~3U;
321
322 // Check if there's enough room before the end of the queue for a header.
323 // If there is room, check if there's enough room to hold the header and
324 // the data.
325
326 if ((headOffset > UINT32_MAX - DATA_QUEUE_ENTRY_HEADER_SIZE) ||
327 (headOffset + DATA_QUEUE_ENTRY_HEADER_SIZE > queueSize) ||
328 (headOffset + DATA_QUEUE_ENTRY_HEADER_SIZE > UINT32_MAX - headSize) ||
329 (headOffset + headSize + DATA_QUEUE_ENTRY_HEADER_SIZE > queueSize)) {
330 // No room for the header or the data, wrap to the beginning of the queue.
331 // Note: wrapping even with the UINT32_MAX checks, as we have to support
332 // queueSize of UINT32_MAX
333 entry = dataQueue->queue;
334 callerDataSize = entry->size;
335 dataSize = entry->size;
336 if (os_add_overflow(3, callerDataSize, &dataSize)) {
337 return kIOReturnError;
338 }
339 dataSize &= ~3U;
340
341 if ((dataSize > UINT32_MAX - DATA_QUEUE_ENTRY_HEADER_SIZE) ||
342 (dataSize + DATA_QUEUE_ENTRY_HEADER_SIZE > queueSize)) {
343 return kIOReturnError;
344 }
345
346 callback(&entry->data, callerDataSize);
347 return kIOReturnSuccess;
348 } else {
349 callback(&head->data, callerDataSize);
350 return kIOReturnSuccess;
351 }
352 }
353
354 return kIOReturnUnderrun;
355}
356
357kern_return_t
358IODataQueueDispatchSource::Dequeue(IODataQueueClientDequeueEntryBlock callback)
359{
360 kern_return_t ret;
361 bool sendDataServiced;
362
363 sendDataServiced = false;
364 ret = DequeueWithCoalesce(&sendDataServiced, callback);
365 if (sendDataServiced) {
366 SendDataServiced();
367 }
368 return ret;
369}
370
371kern_return_t
372IODataQueueDispatchSource::DequeueWithCoalesce(bool * sendDataServiced,
373 IODataQueueClientDequeueEntryBlock callback)
374{
375 IOReturn retVal = kIOReturnSuccess;
376 IODataQueueEntry * entry = NULL;
377 IODataQueueMemory * dataQueue;
378 uint32_t callerDataSize;
379 uint32_t dataSize = 0;
380 uint32_t headOffset = 0;
381 uint32_t tailOffset = 0;
382 uint32_t newHeadOffset = 0;
383
384 dataQueue = ivars->dataQueue;
385 if (!dataQueue) {
386 return kIOReturnNoMemory;
387 }
388
389 // Read head and tail with acquire barrier
390 headOffset = __c11_atomic_load((_Atomic uint32_t *)&dataQueue->head, __ATOMIC_RELAXED);
391 tailOffset = __c11_atomic_load((_Atomic uint32_t *)&dataQueue->tail, __ATOMIC_ACQUIRE);
392
393 if (headOffset != tailOffset) {
394 IODataQueueEntry * head = NULL;
395 uint32_t headSize = 0;
396 uint32_t queueSize = ivars->queueByteCount;
397
398 if (headOffset > queueSize) {
399 return kIOReturnError;
400 }
401
402 head = (IODataQueueEntry *)((uintptr_t)dataQueue->queue + headOffset);
403 callerDataSize = head->size;
404 if (os_add_overflow(3, callerDataSize, &headSize)) {
405 return kIOReturnError;
406 }
407 headSize &= ~3U;
408
409 // we wrapped around to beginning, so read from there
410 // either there was not even room for the header
411 if ((headOffset > UINT32_MAX - DATA_QUEUE_ENTRY_HEADER_SIZE) ||
412 (headOffset + DATA_QUEUE_ENTRY_HEADER_SIZE > queueSize) ||
413 // or there was room for the header, but not for the data
414 (headOffset + DATA_QUEUE_ENTRY_HEADER_SIZE > UINT32_MAX - headSize) ||
415 (headOffset + headSize + DATA_QUEUE_ENTRY_HEADER_SIZE > queueSize)) {
416 // Note: we have to wrap to the beginning even with the UINT32_MAX checks
417 // because we have to support a queueSize of UINT32_MAX.
418 entry = dataQueue->queue;
419 callerDataSize = entry->size;
420
421 if (os_add_overflow(callerDataSize, 3, &dataSize)) {
422 return kIOReturnError;
423 }
424 dataSize &= ~3U;
425 if ((dataSize > UINT32_MAX - DATA_QUEUE_ENTRY_HEADER_SIZE) ||
426 (dataSize + DATA_QUEUE_ENTRY_HEADER_SIZE > queueSize)) {
427 return kIOReturnError;
428 }
429 newHeadOffset = dataSize + DATA_QUEUE_ENTRY_HEADER_SIZE;
430 // else it is at the end
431 } else {
432 entry = head;
433
434 if ((headSize > UINT32_MAX - DATA_QUEUE_ENTRY_HEADER_SIZE) ||
435 (headSize + DATA_QUEUE_ENTRY_HEADER_SIZE > UINT32_MAX - headOffset) ||
436 (headSize + DATA_QUEUE_ENTRY_HEADER_SIZE + headOffset > queueSize)) {
437 return kIOReturnError;
438 }
439 newHeadOffset = headOffset + headSize + DATA_QUEUE_ENTRY_HEADER_SIZE;
440 }
441 } else {
442 // empty queue
443 if (dataQueue->needServicedCallback) {
444 *sendDataServiced = true;
445 }
446 return kIOReturnUnderrun;
447 }
448
449 callback(&entry->data, callerDataSize);
450 if (dataQueue->needServicedCallback) {
451 *sendDataServiced = true;
452 }
453
454 __c11_atomic_store((_Atomic uint32_t *)&dataQueue->head, newHeadOffset, __ATOMIC_RELEASE);
455
456 if (newHeadOffset == tailOffset) {
457 //
458 // If we are making the queue empty, then we need to make sure
459 // that either the enqueuer notices, or we notice the enqueue
460 // that raced with our making of the queue empty.
461 //
462 __c11_atomic_thread_fence(__ATOMIC_SEQ_CST);
463 }
464
465 return retVal;
466}
467
468kern_return_t
469IODataQueueDispatchSource::Enqueue(uint32_t callerDataSize,
470 IODataQueueClientEnqueueEntryBlock callback)
471{
472 kern_return_t ret;
473 bool sendDataAvailable;
474
475 sendDataAvailable = false;
476 ret = EnqueueWithCoalesce(callerDataSize, &sendDataAvailable, callback);
477 if (sendDataAvailable) {
478 SendDataAvailable();
479 }
480 return ret;
481}
482
483kern_return_t
484IODataQueueDispatchSource::EnqueueWithCoalesce(uint32_t callerDataSize,
485 bool * sendDataAvailable,
486 IODataQueueClientEnqueueEntryBlock callback)
487{
488 IODataQueueMemory * dataQueue;
489 IODataQueueEntry * entry;
490 uint32_t head;
491 uint32_t tail;
492 uint32_t newTail;
493 uint32_t dataSize;
494 uint32_t queueSize;
495 uint32_t entrySize;
496 IOReturn retVal = kIOReturnSuccess;
497
498 dataQueue = ivars->dataQueue;
499 if (!dataQueue) {
500 return kIOReturnNoMemory;
501 }
502 queueSize = ivars->queueByteCount;
503
504 // Force a single read of head and tail
505 tail = __c11_atomic_load((_Atomic uint32_t *)&dataQueue->tail, __ATOMIC_RELAXED);
506 head = __c11_atomic_load((_Atomic uint32_t *)&dataQueue->head, __ATOMIC_ACQUIRE);
507
508 if (os_add_overflow(callerDataSize, 3, &dataSize)) {
509 return kIOReturnOverrun;
510 }
511 dataSize &= ~3U;
512
513 // Check for overflow of entrySize
514 if (os_add_overflow(DATA_QUEUE_ENTRY_HEADER_SIZE, dataSize, &entrySize)) {
515 return kIOReturnOverrun;
516 }
517
518 // Check for underflow of (getQueueSize() - tail)
519 if (queueSize < tail || queueSize < head) {
520 return kIOReturnUnderrun;
521 }
522
523 newTail = tail;
524 if (tail >= head) {
525 // Is there enough room at the end for the entry?
526 if ((entrySize <= (UINT32_MAX - tail)) &&
527 ((tail + entrySize) <= queueSize)) {
528 entry = (IODataQueueEntry *)((uintptr_t)dataQueue->queue + tail);
529
530 callback(&entry->data, callerDataSize);
531
532 entry->size = callerDataSize;
533
534 // The tail can be out of bound when the size of the new entry
535 // exactly matches the available space at the end of the queue.
536 // The tail can range from 0 to queueSize inclusive.
537
538 newTail = tail + entrySize;
539 } else if (head > entrySize) { // Is there enough room at the beginning?
540 entry = (IODataQueueEntry *)((uintptr_t)dataQueue->queue);
541
542 callback(&entry->data, callerDataSize);
543
544 // Wrap around to the beginning, but do not allow the tail to catch
545 // up to the head.
546
547 entry->size = callerDataSize;
548
549 // We need to make sure that there is enough room to set the size before
550 // doing this. The user client checks for this and will look for the size
551 // at the beginning if there isn't room for it at the end.
552
553 if ((queueSize - tail) >= DATA_QUEUE_ENTRY_HEADER_SIZE) {
554 ((IODataQueueEntry *)((uintptr_t)dataQueue->queue + tail))->size = dataSize;
555 }
556
557 newTail = entrySize;
558 } else {
559 retVal = kIOReturnOverrun; // queue is full
560 }
561 } else {
562 // Do not allow the tail to catch up to the head when the queue is full.
563 // That's why the comparison uses a '>' rather than '>='.
564
565 if ((head - tail) > entrySize) {
566 entry = (IODataQueueEntry *)((uintptr_t)dataQueue->queue + tail);
567
568 callback(&entry->data, callerDataSize);
569
570 entry->size = callerDataSize;
571
572 newTail = tail + entrySize;
573 } else {
574 retVal = kIOReturnOverrun; // queue is full
575 }
576 }
577
578 // Send notification (via mach message) that data is available.
579
580 if (retVal == kIOReturnSuccess) {
581 // Publish the data we just enqueued
582 __c11_atomic_store((_Atomic uint32_t *)&dataQueue->tail, newTail, __ATOMIC_RELEASE);
583
584 if (tail != head) {
585 //
586 // The memory barrier below pairs with the one in dequeue
587 // so that either our store to the tail cannot be missed by
588 // the next dequeue attempt, or we will observe the dequeuer
589 // making the queue empty.
590 //
591 // Of course, if we already think the queue is empty,
592 // there's no point paying this extra cost.
593 //
594 __c11_atomic_thread_fence(__ATOMIC_SEQ_CST);
595 head = __c11_atomic_load((_Atomic uint32_t *)&dataQueue->head, __ATOMIC_RELAXED);
596 }
597
598 if (tail == head) {
599 // Send notification that data is now available.
600 *sendDataAvailable = true;
601 retVal = kIOReturnSuccess;
602 }
603 } else if (retVal == kIOReturnOverrun) {
604 // ask to be notified of Dequeue()
605 dataQueue->needServicedCallback = true;
606 *sendDataAvailable = true;
607 }
608
609 return retVal;
610}