2 * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <IOKit/IOSharedDataQueue.h>
30 #include <IOKit/IODataQueueShared.h>
31 #include <IOKit/IOLib.h>
32 #include <IOKit/IOMemoryDescriptor.h>
42 #define super IODataQueue
44 OSDefineMetaClassAndStructors(IOSharedDataQueue
, IODataQueue
)
46 IOSharedDataQueue
*IOSharedDataQueue::withCapacity(UInt32 size
)
48 IOSharedDataQueue
*dataQueue
= new IOSharedDataQueue
;
51 if (!dataQueue
->initWithCapacity(size
)) {
60 IOSharedDataQueue
*IOSharedDataQueue::withEntries(UInt32 numEntries
, UInt32 entrySize
)
62 IOSharedDataQueue
*dataQueue
= new IOSharedDataQueue
;
65 if (!dataQueue
->initWithEntries(numEntries
, entrySize
)) {
74 Boolean
IOSharedDataQueue::initWithCapacity(UInt32 size
)
76 IODataQueueAppendix
* appendix
;
83 _reserved
= (ExpansionData
*)IOMalloc(sizeof(struct ExpansionData
));
88 if (size
> UINT32_MAX
- DATA_QUEUE_MEMORY_HEADER_SIZE
- DATA_QUEUE_MEMORY_APPENDIX_SIZE
) {
92 allocSize
= round_page(size
+ DATA_QUEUE_MEMORY_HEADER_SIZE
+ DATA_QUEUE_MEMORY_APPENDIX_SIZE
);
94 if (allocSize
< size
) {
98 dataQueue
= (IODataQueueMemory
*)IOMallocAligned(allocSize
, PAGE_SIZE
);
102 bzero(dataQueue
, allocSize
);
104 dataQueue
->queueSize
= size
;
105 // dataQueue->head = 0;
106 // dataQueue->tail = 0;
108 if (!setQueueSize(size
)) {
112 appendix
= (IODataQueueAppendix
*)((UInt8
*)dataQueue
+ size
+ DATA_QUEUE_MEMORY_HEADER_SIZE
);
113 appendix
->version
= 0;
116 notifyMsg
= IOMalloc(sizeof(mach_msg_header_t
));
120 bzero(notifyMsg
, sizeof(mach_msg_header_t
));
122 setNotificationPort(MACH_PORT_NULL
);
127 void IOSharedDataQueue::free()
130 IOFreeAligned(dataQueue
, round_page(getQueueSize() + DATA_QUEUE_MEMORY_HEADER_SIZE
+ DATA_QUEUE_MEMORY_APPENDIX_SIZE
));
133 IOFree(notifyMsg
, sizeof(mach_msg_header_t
));
139 IOFree (_reserved
, sizeof(struct ExpansionData
));
146 IOMemoryDescriptor
*IOSharedDataQueue::getMemoryDescriptor()
148 IOMemoryDescriptor
*descriptor
= 0;
150 if (dataQueue
!= 0) {
151 descriptor
= IOMemoryDescriptor::withAddress(dataQueue
, getQueueSize() + DATA_QUEUE_MEMORY_HEADER_SIZE
+ DATA_QUEUE_MEMORY_APPENDIX_SIZE
, kIODirectionOutIn
);
158 IODataQueueEntry
* IOSharedDataQueue::peek()
160 IODataQueueEntry
*entry
= 0;
168 // Read head and tail with acquire barrier
169 headOffset
= __c11_atomic_load((_Atomic UInt32
*)&dataQueue
->head
, __ATOMIC_RELAXED
);
170 tailOffset
= __c11_atomic_load((_Atomic UInt32
*)&dataQueue
->tail
, __ATOMIC_ACQUIRE
);
172 if (headOffset
!= tailOffset
) {
173 IODataQueueEntry
* head
= 0;
175 UInt32 headOffset
= dataQueue
->head
;
176 UInt32 queueSize
= getQueueSize();
178 if (headOffset
>= queueSize
) {
182 head
= (IODataQueueEntry
*)((char *)dataQueue
->queue
+ headOffset
);
183 headSize
= head
->size
;
185 // Check if there's enough room before the end of the queue for a header.
186 // If there is room, check if there's enough room to hold the header and
189 if ((headOffset
> UINT32_MAX
- DATA_QUEUE_ENTRY_HEADER_SIZE
) ||
190 (headOffset
+ DATA_QUEUE_ENTRY_HEADER_SIZE
> queueSize
) ||
191 (headOffset
+ DATA_QUEUE_ENTRY_HEADER_SIZE
> UINT32_MAX
- headSize
) ||
192 (headOffset
+ headSize
+ DATA_QUEUE_ENTRY_HEADER_SIZE
> queueSize
)) {
193 // No room for the header or the data, wrap to the beginning of the queue.
194 // Note: wrapping even with the UINT32_MAX checks, as we have to support
195 // queueSize of UINT32_MAX
196 entry
= dataQueue
->queue
;
205 Boolean
IOSharedDataQueue::enqueue(void * data
, UInt32 dataSize
)
210 const UInt32 entrySize
= dataSize
+ DATA_QUEUE_ENTRY_HEADER_SIZE
;
211 IODataQueueEntry
* entry
;
213 // Force a single read of head and tail
214 head
= __c11_atomic_load((_Atomic UInt32
*)&dataQueue
->head
, __ATOMIC_RELAXED
);
215 tail
= __c11_atomic_load((_Atomic UInt32
*)&dataQueue
->tail
, __ATOMIC_RELAXED
);
217 // Check for overflow of entrySize
218 if (dataSize
> UINT32_MAX
- DATA_QUEUE_ENTRY_HEADER_SIZE
) {
221 // Check for underflow of (getQueueSize() - tail)
222 if (getQueueSize() < tail
|| getQueueSize() < head
) {
228 // Is there enough room at the end for the entry?
229 if ((entrySize
<= UINT32_MAX
- tail
) &&
230 ((tail
+ entrySize
) <= getQueueSize()) )
232 entry
= (IODataQueueEntry
*)((UInt8
*)dataQueue
->queue
+ tail
);
234 entry
->size
= dataSize
;
235 memcpy(&entry
->data
, data
, dataSize
);
237 // The tail can be out of bound when the size of the new entry
238 // exactly matches the available space at the end of the queue.
239 // The tail can range from 0 to dataQueue->queueSize inclusive.
241 newTail
= tail
+ entrySize
;
243 else if ( head
> entrySize
) // Is there enough room at the beginning?
245 // Wrap around to the beginning, but do not allow the tail to catch
248 dataQueue
->queue
->size
= dataSize
;
250 // We need to make sure that there is enough room to set the size before
251 // doing this. The user client checks for this and will look for the size
252 // at the beginning if there isn't room for it at the end.
254 if ( ( getQueueSize() - tail
) >= DATA_QUEUE_ENTRY_HEADER_SIZE
)
256 ((IODataQueueEntry
*)((UInt8
*)dataQueue
->queue
+ tail
))->size
= dataSize
;
259 memcpy(&dataQueue
->queue
->data
, data
, dataSize
);
264 return false; // queue is full
269 // Do not allow the tail to catch up to the head when the queue is full.
270 // That's why the comparison uses a '>' rather than '>='.
272 if ( (head
- tail
) > entrySize
)
274 entry
= (IODataQueueEntry
*)((UInt8
*)dataQueue
->queue
+ tail
);
276 entry
->size
= dataSize
;
277 memcpy(&entry
->data
, data
, dataSize
);
278 newTail
= tail
+ entrySize
;
282 return false; // queue is full
286 // Update tail with release barrier
287 __c11_atomic_store((_Atomic UInt32
*)&dataQueue
->tail
, newTail
, __ATOMIC_RELEASE
);
289 // Send notification (via mach message) that data is available.
291 if ( ( tail
== head
) /* queue was empty prior to enqueue() */
292 || ( tail
== __c11_atomic_load((_Atomic UInt32
*)&dataQueue
->head
, __ATOMIC_RELAXED
) ) ) /* queue was emptied during enqueue() */
294 sendDataAvailableNotification();
300 Boolean
IOSharedDataQueue::dequeue(void *data
, UInt32
*dataSize
)
302 Boolean retVal
= TRUE
;
303 IODataQueueEntry
* entry
= 0;
304 UInt32 entrySize
= 0;
305 UInt32 headOffset
= 0;
306 UInt32 tailOffset
= 0;
307 UInt32 newHeadOffset
= 0;
313 // Read head and tail with acquire barrier
314 tailOffset
= __c11_atomic_load((_Atomic UInt32
*)&dataQueue
->tail
, __ATOMIC_RELAXED
);
315 headOffset
= __c11_atomic_load((_Atomic UInt32
*)&dataQueue
->head
, __ATOMIC_ACQUIRE
);
317 if (headOffset
!= tailOffset
) {
318 IODataQueueEntry
* head
= 0;
320 UInt32 queueSize
= getQueueSize();
322 if (headOffset
> queueSize
) {
326 head
= (IODataQueueEntry
*)((char *)dataQueue
->queue
+ headOffset
);
327 headSize
= head
->size
;
329 // we wrapped around to beginning, so read from there
330 // either there was not even room for the header
331 if ((headOffset
> UINT32_MAX
- DATA_QUEUE_ENTRY_HEADER_SIZE
) ||
332 (headOffset
+ DATA_QUEUE_ENTRY_HEADER_SIZE
> queueSize
) ||
333 // or there was room for the header, but not for the data
334 (headOffset
+ DATA_QUEUE_ENTRY_HEADER_SIZE
> UINT32_MAX
- headSize
) ||
335 (headOffset
+ headSize
+ DATA_QUEUE_ENTRY_HEADER_SIZE
> queueSize
)) {
336 // Note: we have to wrap to the beginning even with the UINT32_MAX checks
337 // because we have to support a queueSize of UINT32_MAX.
338 entry
= dataQueue
->queue
;
339 entrySize
= entry
->size
;
340 if ((entrySize
> UINT32_MAX
- DATA_QUEUE_ENTRY_HEADER_SIZE
) ||
341 (entrySize
+ DATA_QUEUE_ENTRY_HEADER_SIZE
> queueSize
)) {
344 newHeadOffset
= entrySize
+ DATA_QUEUE_ENTRY_HEADER_SIZE
;
345 // else it is at the end
348 entrySize
= entry
->size
;
349 if ((entrySize
> UINT32_MAX
- DATA_QUEUE_ENTRY_HEADER_SIZE
) ||
350 (entrySize
+ DATA_QUEUE_ENTRY_HEADER_SIZE
> UINT32_MAX
- headOffset
) ||
351 (entrySize
+ DATA_QUEUE_ENTRY_HEADER_SIZE
+ headOffset
> queueSize
)) {
354 newHeadOffset
= headOffset
+ entrySize
+ DATA_QUEUE_ENTRY_HEADER_SIZE
;
361 if (entrySize
<= *dataSize
) {
362 memcpy(data
, &(entry
->data
), entrySize
);
363 __c11_atomic_store((_Atomic UInt32
*)&dataQueue
->head
, newHeadOffset
, __ATOMIC_RELEASE
);
371 __c11_atomic_store((_Atomic UInt32
*)&dataQueue
->head
, newHeadOffset
, __ATOMIC_RELEASE
);
375 *dataSize
= entrySize
;
384 UInt32
IOSharedDataQueue::getQueueSize()
389 return _reserved
->queueSize
;
392 Boolean
IOSharedDataQueue::setQueueSize(UInt32 size
)
397 _reserved
->queueSize
= size
;
401 OSMetaClassDefineReservedUnused(IOSharedDataQueue
, 0);
402 OSMetaClassDefineReservedUnused(IOSharedDataQueue
, 1);
403 OSMetaClassDefineReservedUnused(IOSharedDataQueue
, 2);
404 OSMetaClassDefineReservedUnused(IOSharedDataQueue
, 3);
405 OSMetaClassDefineReservedUnused(IOSharedDataQueue
, 4);
406 OSMetaClassDefineReservedUnused(IOSharedDataQueue
, 5);
407 OSMetaClassDefineReservedUnused(IOSharedDataQueue
, 6);
408 OSMetaClassDefineReservedUnused(IOSharedDataQueue
, 7);