2 * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #define IOKIT_ENABLE_SHARED_PTR
31 #include <IOKit/IOSharedDataQueue.h>
32 #include <IOKit/IODataQueueShared.h>
33 #include <IOKit/IOLib.h>
34 #include <IOKit/IOMemoryDescriptor.h>
35 #include <libkern/c++/OSSharedPtr.h>
45 #define super IODataQueue
47 OSDefineMetaClassAndStructors(IOSharedDataQueue
, IODataQueue
)
49 OSSharedPtr
<IOSharedDataQueue
>
50 IOSharedDataQueue::withCapacity(UInt32 size
)
52 OSSharedPtr
<IOSharedDataQueue
> dataQueue
= OSMakeShared
<IOSharedDataQueue
>();
55 if (!dataQueue
->initWithCapacity(size
)) {
63 OSSharedPtr
<IOSharedDataQueue
>
64 IOSharedDataQueue::withEntries(UInt32 numEntries
, UInt32 entrySize
)
66 OSSharedPtr
<IOSharedDataQueue
> dataQueue
= OSMakeShared
<IOSharedDataQueue
>();
69 if (!dataQueue
->initWithEntries(numEntries
, entrySize
)) {
78 IOSharedDataQueue::initWithCapacity(UInt32 size
)
80 IODataQueueAppendix
* appendix
;
87 _reserved
= (ExpansionData
*)IOMalloc(sizeof(struct ExpansionData
));
92 if (size
> UINT32_MAX
- DATA_QUEUE_MEMORY_HEADER_SIZE
- DATA_QUEUE_MEMORY_APPENDIX_SIZE
) {
96 allocSize
= round_page(size
+ DATA_QUEUE_MEMORY_HEADER_SIZE
+ DATA_QUEUE_MEMORY_APPENDIX_SIZE
);
98 if (allocSize
< size
) {
102 dataQueue
= (IODataQueueMemory
*)IOMallocAligned(allocSize
, PAGE_SIZE
);
103 if (dataQueue
== NULL
) {
106 bzero(dataQueue
, allocSize
);
108 dataQueue
->queueSize
= size
;
109 // dataQueue->head = 0;
110 // dataQueue->tail = 0;
112 if (!setQueueSize(size
)) {
116 appendix
= (IODataQueueAppendix
*)((UInt8
*)dataQueue
+ size
+ DATA_QUEUE_MEMORY_HEADER_SIZE
);
117 appendix
->version
= 0;
120 notifyMsg
= IOMalloc(sizeof(mach_msg_header_t
));
125 bzero(notifyMsg
, sizeof(mach_msg_header_t
));
127 setNotificationPort(MACH_PORT_NULL
);
133 IOSharedDataQueue::free()
136 IOFreeAligned(dataQueue
, round_page(getQueueSize() + DATA_QUEUE_MEMORY_HEADER_SIZE
+ DATA_QUEUE_MEMORY_APPENDIX_SIZE
));
139 IOFree(notifyMsg
, sizeof(mach_msg_header_t
));
145 IOFree(_reserved
, sizeof(struct ExpansionData
));
152 OSSharedPtr
<IOMemoryDescriptor
>
153 IOSharedDataQueue::getMemoryDescriptor()
155 OSSharedPtr
<IOMemoryDescriptor
> descriptor
;
157 if (dataQueue
!= NULL
) {
158 descriptor
= IOMemoryDescriptor::withAddress(dataQueue
, getQueueSize() + DATA_QUEUE_MEMORY_HEADER_SIZE
+ DATA_QUEUE_MEMORY_APPENDIX_SIZE
, kIODirectionOutIn
);
166 IOSharedDataQueue::peek()
168 IODataQueueEntry
*entry
= NULL
;
176 // Read head and tail with acquire barrier
177 // See rdar://problem/40780584 for an explanation of relaxed/acquire barriers
178 headOffset
= __c11_atomic_load((_Atomic UInt32
*)&dataQueue
->head
, __ATOMIC_RELAXED
);
179 tailOffset
= __c11_atomic_load((_Atomic UInt32
*)&dataQueue
->tail
, __ATOMIC_ACQUIRE
);
181 if (headOffset
!= tailOffset
) {
182 volatile IODataQueueEntry
* head
= NULL
;
184 UInt32 headOffset
= dataQueue
->head
;
185 UInt32 queueSize
= getQueueSize();
187 if (headOffset
> queueSize
) {
191 head
= (IODataQueueEntry
*)((char *)dataQueue
->queue
+ headOffset
);
192 headSize
= head
->size
;
194 // Check if there's enough room before the end of the queue for a header.
195 // If there is room, check if there's enough room to hold the header and
198 if ((headOffset
> UINT32_MAX
- DATA_QUEUE_ENTRY_HEADER_SIZE
) ||
199 (headOffset
+ DATA_QUEUE_ENTRY_HEADER_SIZE
> queueSize
) ||
200 (headOffset
+ DATA_QUEUE_ENTRY_HEADER_SIZE
> UINT32_MAX
- headSize
) ||
201 (headOffset
+ headSize
+ DATA_QUEUE_ENTRY_HEADER_SIZE
> queueSize
)) {
202 // No room for the header or the data, wrap to the beginning of the queue.
203 // Note: wrapping even with the UINT32_MAX checks, as we have to support
204 // queueSize of UINT32_MAX
205 entry
= dataQueue
->queue
;
207 entry
= (IODataQueueEntry
*)head
;
215 IOSharedDataQueue::enqueue(void * data
, UInt32 dataSize
)
220 const UInt32 entrySize
= dataSize
+ DATA_QUEUE_ENTRY_HEADER_SIZE
;
221 IODataQueueEntry
* entry
;
223 // Force a single read of head and tail
224 // See rdar://problem/40780584 for an explanation of relaxed/acquire barriers
225 tail
= __c11_atomic_load((_Atomic UInt32
*)&dataQueue
->tail
, __ATOMIC_RELAXED
);
226 head
= __c11_atomic_load((_Atomic UInt32
*)&dataQueue
->head
, __ATOMIC_ACQUIRE
);
228 // Check for overflow of entrySize
229 if (dataSize
> UINT32_MAX
- DATA_QUEUE_ENTRY_HEADER_SIZE
) {
232 // Check for underflow of (getQueueSize() - tail)
233 if (getQueueSize() < tail
|| getQueueSize() < head
) {
238 // Is there enough room at the end for the entry?
239 if ((entrySize
<= UINT32_MAX
- tail
) &&
240 ((tail
+ entrySize
) <= getQueueSize())) {
241 entry
= (IODataQueueEntry
*)((UInt8
*)dataQueue
->queue
+ tail
);
243 entry
->size
= dataSize
;
244 __nochk_memcpy(&entry
->data
, data
, dataSize
);
246 // The tail can be out of bound when the size of the new entry
247 // exactly matches the available space at the end of the queue.
248 // The tail can range from 0 to dataQueue->queueSize inclusive.
250 newTail
= tail
+ entrySize
;
251 } else if (head
> entrySize
) { // Is there enough room at the beginning?
252 // Wrap around to the beginning, but do not allow the tail to catch
255 dataQueue
->queue
->size
= dataSize
;
257 // We need to make sure that there is enough room to set the size before
258 // doing this. The user client checks for this and will look for the size
259 // at the beginning if there isn't room for it at the end.
261 if ((getQueueSize() - tail
) >= DATA_QUEUE_ENTRY_HEADER_SIZE
) {
262 ((IODataQueueEntry
*)((UInt8
*)dataQueue
->queue
+ tail
))->size
= dataSize
;
265 __nochk_memcpy(&dataQueue
->queue
->data
, data
, dataSize
);
268 return false; // queue is full
271 // Do not allow the tail to catch up to the head when the queue is full.
272 // That's why the comparison uses a '>' rather than '>='.
274 if ((head
- tail
) > entrySize
) {
275 entry
= (IODataQueueEntry
*)((UInt8
*)dataQueue
->queue
+ tail
);
277 entry
->size
= dataSize
;
278 __nochk_memcpy(&entry
->data
, data
, dataSize
);
279 newTail
= tail
+ entrySize
;
281 return false; // queue is full
285 // Publish the data we just enqueued
286 __c11_atomic_store((_Atomic UInt32
*)&dataQueue
->tail
, newTail
, __ATOMIC_RELEASE
);
290 // The memory barrier below paris with the one in ::dequeue
291 // so that either our store to the tail cannot be missed by
292 // the next dequeue attempt, or we will observe the dequeuer
293 // making the queue empty.
295 // Of course, if we already think the queue is empty,
296 // there's no point paying this extra cost.
298 __c11_atomic_thread_fence(__ATOMIC_SEQ_CST
);
299 head
= __c11_atomic_load((_Atomic UInt32
*)&dataQueue
->head
, __ATOMIC_RELAXED
);
303 // Send notification (via mach message) that data is now available.
304 sendDataAvailableNotification();
310 IOSharedDataQueue::dequeue(void *data
, UInt32
*dataSize
)
312 Boolean retVal
= TRUE
;
313 volatile IODataQueueEntry
* entry
= NULL
;
314 UInt32 entrySize
= 0;
315 UInt32 headOffset
= 0;
316 UInt32 tailOffset
= 0;
317 UInt32 newHeadOffset
= 0;
319 if (!dataQueue
|| (data
&& !dataSize
)) {
323 // Read head and tail with acquire barrier
324 // See rdar://problem/40780584 for an explanation of relaxed/acquire barriers
325 headOffset
= __c11_atomic_load((_Atomic UInt32
*)&dataQueue
->head
, __ATOMIC_RELAXED
);
326 tailOffset
= __c11_atomic_load((_Atomic UInt32
*)&dataQueue
->tail
, __ATOMIC_ACQUIRE
);
328 if (headOffset
!= tailOffset
) {
329 volatile IODataQueueEntry
* head
= NULL
;
331 UInt32 queueSize
= getQueueSize();
333 if (headOffset
> queueSize
) {
337 head
= (IODataQueueEntry
*)((char *)dataQueue
->queue
+ headOffset
);
338 headSize
= head
->size
;
340 // we wrapped around to beginning, so read from there
341 // either there was not even room for the header
342 if ((headOffset
> UINT32_MAX
- DATA_QUEUE_ENTRY_HEADER_SIZE
) ||
343 (headOffset
+ DATA_QUEUE_ENTRY_HEADER_SIZE
> queueSize
) ||
344 // or there was room for the header, but not for the data
345 (headOffset
+ DATA_QUEUE_ENTRY_HEADER_SIZE
> UINT32_MAX
- headSize
) ||
346 (headOffset
+ headSize
+ DATA_QUEUE_ENTRY_HEADER_SIZE
> queueSize
)) {
347 // Note: we have to wrap to the beginning even with the UINT32_MAX checks
348 // because we have to support a queueSize of UINT32_MAX.
349 entry
= dataQueue
->queue
;
350 entrySize
= entry
->size
;
351 if ((entrySize
> UINT32_MAX
- DATA_QUEUE_ENTRY_HEADER_SIZE
) ||
352 (entrySize
+ DATA_QUEUE_ENTRY_HEADER_SIZE
> queueSize
)) {
355 newHeadOffset
= entrySize
+ DATA_QUEUE_ENTRY_HEADER_SIZE
;
356 // else it is at the end
359 entrySize
= entry
->size
;
360 if ((entrySize
> UINT32_MAX
- DATA_QUEUE_ENTRY_HEADER_SIZE
) ||
361 (entrySize
+ DATA_QUEUE_ENTRY_HEADER_SIZE
> UINT32_MAX
- headOffset
) ||
362 (entrySize
+ DATA_QUEUE_ENTRY_HEADER_SIZE
+ headOffset
> queueSize
)) {
365 newHeadOffset
= headOffset
+ entrySize
+ DATA_QUEUE_ENTRY_HEADER_SIZE
;
373 if (entrySize
> *dataSize
) {
377 __nochk_memcpy(data
, (void *)entry
->data
, entrySize
);
378 *dataSize
= entrySize
;
381 __c11_atomic_store((_Atomic UInt32
*)&dataQueue
->head
, newHeadOffset
, __ATOMIC_RELEASE
);
383 if (newHeadOffset
== tailOffset
) {
385 // If we are making the queue empty, then we need to make sure
386 // that either the enqueuer notices, or we notice the enqueue
387 // that raced with our making of the queue empty.
389 __c11_atomic_thread_fence(__ATOMIC_SEQ_CST
);
396 IOSharedDataQueue::getQueueSize()
401 return _reserved
->queueSize
;
405 IOSharedDataQueue::setQueueSize(UInt32 size
)
410 _reserved
->queueSize
= size
;
414 OSMetaClassDefineReservedUnused(IOSharedDataQueue
, 0);
415 OSMetaClassDefineReservedUnused(IOSharedDataQueue
, 1);
416 OSMetaClassDefineReservedUnused(IOSharedDataQueue
, 2);
417 OSMetaClassDefineReservedUnused(IOSharedDataQueue
, 3);
418 OSMetaClassDefineReservedUnused(IOSharedDataQueue
, 4);
419 OSMetaClassDefineReservedUnused(IOSharedDataQueue
, 5);
420 OSMetaClassDefineReservedUnused(IOSharedDataQueue
, 6);
421 OSMetaClassDefineReservedUnused(IOSharedDataQueue
, 7);