X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/d26ffc64f583ab2d29df48f13518685602bc8832..e8c3f78193f1895ea514044358b93b1add9322f3:/iokit/Kernel/IOSharedDataQueue.cpp?ds=sidebyside diff --git a/iokit/Kernel/IOSharedDataQueue.cpp b/iokit/Kernel/IOSharedDataQueue.cpp index 17656644a..385393f65 100644 --- a/iokit/Kernel/IOSharedDataQueue.cpp +++ b/iokit/Kernel/IOSharedDataQueue.cpp @@ -285,18 +285,28 @@ Boolean IOSharedDataQueue::enqueue(void * data, UInt32 dataSize) } } - // Update tail with release barrier - __c11_atomic_store((_Atomic UInt32 *)&dataQueue->tail, newTail, __ATOMIC_RELEASE); - - // Send notification (via mach message) that data is available. - - if ( ( tail == head ) /* queue was empty prior to enqueue() */ - || ( tail == __c11_atomic_load((_Atomic UInt32 *)&dataQueue->head, __ATOMIC_ACQUIRE) ) ) /* queue was emptied during enqueue() */ - { - sendDataAvailableNotification(); - } - - return true; + // Publish the data we just enqueued + __c11_atomic_store((_Atomic UInt32 *)&dataQueue->tail, newTail, __ATOMIC_RELEASE); + + if (tail != head) { + // + // The memory barrier below paris with the one in ::dequeue + // so that either our store to the tail cannot be missed by + // the next dequeue attempt, or we will observe the dequeuer + // making the queue empty. + // + // Of course, if we already think the queue is empty, + // there's no point paying this extra cost. + // + __c11_atomic_thread_fence(__ATOMIC_SEQ_CST); + head = __c11_atomic_load((_Atomic UInt32 *)&dataQueue->head, __ATOMIC_RELAXED); + } + + if (tail == head) { + // Send notification (via mach message) that data is now available. + sendDataAvailableNotification(); + } + return true; } Boolean IOSharedDataQueue::dequeue(void *data, UInt32 *dataSize) @@ -308,7 +318,7 @@ Boolean IOSharedDataQueue::dequeue(void *data, UInt32 *dataSize) UInt32 tailOffset = 0; UInt32 newHeadOffset = 0; - if (!dataQueue) { + if (!dataQueue || (data && !dataSize)) { return false; } @@ -356,30 +366,30 @@ Boolean IOSharedDataQueue::dequeue(void *data, UInt32 *dataSize) } newHeadOffset = headOffset + entrySize + DATA_QUEUE_ENTRY_HEADER_SIZE; } - } - - if (entry) { - if (data) { - if (dataSize) { - if (entrySize <= *dataSize) { - memcpy(data, &(entry->data), entrySize); - __c11_atomic_store((_Atomic UInt32 *)&dataQueue->head, newHeadOffset, __ATOMIC_RELEASE); - } else { - retVal = FALSE; - } - } else { - retVal = FALSE; - } - } else { - __c11_atomic_store((_Atomic UInt32 *)&dataQueue->head, newHeadOffset, __ATOMIC_RELEASE); - } - - if (dataSize) { - *dataSize = entrySize; - } - } else { - retVal = FALSE; - } + } else { + // empty queue + return false; + } + + if (data) { + if (entrySize > *dataSize) { + // not enough space + return false; + } + memcpy(data, &(entry->data), entrySize); + *dataSize = entrySize; + } + + __c11_atomic_store((_Atomic UInt32 *)&dataQueue->head, newHeadOffset, __ATOMIC_RELEASE); + + if (newHeadOffset == tailOffset) { + // + // If we are making the queue empty, then we need to make sure + // that either the enqueuer notices, or we notice the enqueue + // that raced with our making of the queue empty. + // + __c11_atomic_thread_fence(__ATOMIC_SEQ_CST); + } return retVal; }