}
}
- // Update tail with release barrier
- __c11_atomic_store((_Atomic UInt32 *)&dataQueue->tail, newTail, __ATOMIC_RELEASE);
-
- // Send notification (via mach message) that data is available.
-
- if ( ( tail == head ) /* queue was empty prior to enqueue() */
- || ( tail == __c11_atomic_load((_Atomic UInt32 *)&dataQueue->head, __ATOMIC_ACQUIRE) ) ) /* queue was emptied during enqueue() */
- {
- sendDataAvailableNotification();
- }
-
- return true;
+ // Publish the data we just enqueued
+ __c11_atomic_store((_Atomic UInt32 *)&dataQueue->tail, newTail, __ATOMIC_RELEASE);
+
+ if (tail != head) {
+ //
+ // The memory barrier below paris with the one in ::dequeue
+ // so that either our store to the tail cannot be missed by
+ // the next dequeue attempt, or we will observe the dequeuer
+ // making the queue empty.
+ //
+ // Of course, if we already think the queue is empty,
+ // there's no point paying this extra cost.
+ //
+ __c11_atomic_thread_fence(__ATOMIC_SEQ_CST);
+ head = __c11_atomic_load((_Atomic UInt32 *)&dataQueue->head, __ATOMIC_RELAXED);
+ }
+
+ if (tail == head) {
+ // Send notification (via mach message) that data is now available.
+ sendDataAvailableNotification();
+ }
+ return true;
}
Boolean IOSharedDataQueue::dequeue(void *data, UInt32 *dataSize)
UInt32 tailOffset = 0;
UInt32 newHeadOffset = 0;
- if (!dataQueue) {
+ if (!dataQueue || (data && !dataSize)) {
return false;
}
}
newHeadOffset = headOffset + entrySize + DATA_QUEUE_ENTRY_HEADER_SIZE;
}
- }
-
- if (entry) {
- if (data) {
- if (dataSize) {
- if (entrySize <= *dataSize) {
- memcpy(data, &(entry->data), entrySize);
- __c11_atomic_store((_Atomic UInt32 *)&dataQueue->head, newHeadOffset, __ATOMIC_RELEASE);
- } else {
- retVal = FALSE;
- }
- } else {
- retVal = FALSE;
- }
- } else {
- __c11_atomic_store((_Atomic UInt32 *)&dataQueue->head, newHeadOffset, __ATOMIC_RELEASE);
- }
-
- if (dataSize) {
- *dataSize = entrySize;
- }
- } else {
- retVal = FALSE;
- }
+ } else {
+ // empty queue
+ return false;
+ }
+
+ if (data) {
+ if (entrySize > *dataSize) {
+ // not enough space
+ return false;
+ }
+ memcpy(data, &(entry->data), entrySize);
+ *dataSize = entrySize;
+ }
+
+ __c11_atomic_store((_Atomic UInt32 *)&dataQueue->head, newHeadOffset, __ATOMIC_RELEASE);
+
+ if (newHeadOffset == tailOffset) {
+ //
+ // If we are making the queue empty, then we need to make sure
+ // that either the enqueuer notices, or we notice the enqueue
+ // that raced with our making of the queue empty.
+ //
+ __c11_atomic_thread_fence(__ATOMIC_SEQ_CST);
+ }
return retVal;
}