2 * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #define IOKIT_ENABLE_SHARED_PTR
31 #define DISABLE_DATAQUEUE_WARNING
33 #include <IOKit/IODataQueue.h>
35 #undef DISABLE_DATAQUEUE_WARNING
37 #include <IOKit/IODataQueueShared.h>
38 #include <IOKit/IOLib.h>
39 #include <IOKit/IOMemoryDescriptor.h>
40 #include <libkern/OSAtomic.h>
41 #include <libkern/c++/OSSharedPtr.h>
43 struct IODataQueueInternal
{
44 mach_msg_header_t msg
;
56 #define super OSObject
58 OSDefineMetaClassAndStructors(IODataQueue
, OSObject
)
60 OSSharedPtr
<IODataQueue
>
61 IODataQueue::withCapacity(UInt32 size
)
63 OSSharedPtr
<IODataQueue
> dataQueue
= OSMakeShared
<IODataQueue
>();
66 if (!dataQueue
->initWithCapacity(size
)) {
74 OSSharedPtr
<IODataQueue
>
75 IODataQueue::withEntries(UInt32 numEntries
, UInt32 entrySize
)
77 OSSharedPtr
<IODataQueue
> dataQueue
= OSMakeShared
<IODataQueue
>();
80 if (!dataQueue
->initWithEntries(numEntries
, entrySize
)) {
89 IODataQueue::initWithCapacity(UInt32 size
)
91 vm_size_t allocSize
= 0;
97 if (size
> UINT32_MAX
- DATA_QUEUE_MEMORY_HEADER_SIZE
) {
101 allocSize
= round_page(size
+ DATA_QUEUE_MEMORY_HEADER_SIZE
);
103 if (allocSize
< size
) {
108 notifyMsg
= IONew(IODataQueueInternal
, 1);
112 bzero(notifyMsg
, sizeof(IODataQueueInternal
));
113 ((IODataQueueInternal
*)notifyMsg
)->queueSize
= size
;
115 dataQueue
= (IODataQueueMemory
*)IOMallocAligned(allocSize
, PAGE_SIZE
);
116 if (dataQueue
== NULL
) {
119 bzero(dataQueue
, allocSize
);
121 dataQueue
->queueSize
= size
;
122 // dataQueue->head = 0;
123 // dataQueue->tail = 0;
129 IODataQueue::initWithEntries(UInt32 numEntries
, UInt32 entrySize
)
131 // Checking overflow for (numEntries + 1)*(entrySize + DATA_QUEUE_ENTRY_HEADER_SIZE):
132 // check (entrySize + DATA_QUEUE_ENTRY_HEADER_SIZE)
133 if ((entrySize
> UINT32_MAX
- DATA_QUEUE_ENTRY_HEADER_SIZE
) ||
134 // check (numEntries + 1)
135 (numEntries
> UINT32_MAX
- 1) ||
136 // check (numEntries + 1)*(entrySize + DATA_QUEUE_ENTRY_HEADER_SIZE)
137 (entrySize
+ DATA_QUEUE_ENTRY_HEADER_SIZE
> UINT32_MAX
/ (numEntries
+ 1))) {
141 return initWithCapacity((numEntries
+ 1) * (DATA_QUEUE_ENTRY_HEADER_SIZE
+ entrySize
));
149 IOFreeAligned(dataQueue
, round_page(((IODataQueueInternal
*)notifyMsg
)->queueSize
+ DATA_QUEUE_MEMORY_HEADER_SIZE
));
153 IODelete(notifyMsg
, IODataQueueInternal
, 1);
163 IODataQueue::enqueue(void * data
, UInt32 dataSize
)
168 const UInt32 entrySize
= dataSize
+ DATA_QUEUE_ENTRY_HEADER_SIZE
;
170 IODataQueueEntry
* entry
;
172 // Check for overflow of entrySize
173 if (dataSize
> UINT32_MAX
- DATA_QUEUE_ENTRY_HEADER_SIZE
) {
177 // Force a single read of head and tail
178 // See rdar://problem/40780584 for an explanation of relaxed/acquire barriers
179 tail
= __c11_atomic_load((_Atomic UInt32
*)&dataQueue
->tail
, __ATOMIC_RELAXED
);
180 head
= __c11_atomic_load((_Atomic UInt32
*)&dataQueue
->head
, __ATOMIC_ACQUIRE
);
182 // Check for underflow of (dataQueue->queueSize - tail)
183 queueSize
= ((IODataQueueInternal
*) notifyMsg
)->queueSize
;
184 if ((queueSize
< tail
) || (queueSize
< head
)) {
189 // Is there enough room at the end for the entry?
190 if ((entrySize
<= UINT32_MAX
- tail
) &&
191 ((tail
+ entrySize
) <= queueSize
)) {
192 entry
= (IODataQueueEntry
*)((UInt8
*)dataQueue
->queue
+ tail
);
194 entry
->size
= dataSize
;
195 __nochk_memcpy(&entry
->data
, data
, dataSize
);
197 // The tail can be out of bound when the size of the new entry
198 // exactly matches the available space at the end of the queue.
199 // The tail can range from 0 to dataQueue->queueSize inclusive.
201 newTail
= tail
+ entrySize
;
202 } else if (head
> entrySize
) { // Is there enough room at the beginning?
203 // Wrap around to the beginning, but do not allow the tail to catch
206 dataQueue
->queue
->size
= dataSize
;
208 // We need to make sure that there is enough room to set the size before
209 // doing this. The user client checks for this and will look for the size
210 // at the beginning if there isn't room for it at the end.
212 if ((queueSize
- tail
) >= DATA_QUEUE_ENTRY_HEADER_SIZE
) {
213 ((IODataQueueEntry
*)((UInt8
*)dataQueue
->queue
+ tail
))->size
= dataSize
;
216 __nochk_memcpy(&dataQueue
->queue
->data
, data
, dataSize
);
219 return false; // queue is full
222 // Do not allow the tail to catch up to the head when the queue is full.
223 // That's why the comparison uses a '>' rather than '>='.
225 if ((head
- tail
) > entrySize
) {
226 entry
= (IODataQueueEntry
*)((UInt8
*)dataQueue
->queue
+ tail
);
228 entry
->size
= dataSize
;
229 __nochk_memcpy(&entry
->data
, data
, dataSize
);
230 newTail
= tail
+ entrySize
;
232 return false; // queue is full
236 // Publish the data we just enqueued
237 __c11_atomic_store((_Atomic UInt32
*)&dataQueue
->tail
, newTail
, __ATOMIC_RELEASE
);
241 // The memory barrier below paris with the one in ::dequeue
242 // so that either our store to the tail cannot be missed by
243 // the next dequeue attempt, or we will observe the dequeuer
244 // making the queue empty.
246 // Of course, if we already think the queue is empty,
247 // there's no point paying this extra cost.
249 __c11_atomic_thread_fence(__ATOMIC_SEQ_CST
);
250 head
= __c11_atomic_load((_Atomic UInt32
*)&dataQueue
->head
, __ATOMIC_RELAXED
);
254 // Send notification (via mach message) that data is now available.
255 sendDataAvailableNotification();
261 IODataQueue::setNotificationPort(mach_port_t port
)
263 mach_msg_header_t
* msgh
;
265 msgh
= &((IODataQueueInternal
*) notifyMsg
)->msg
;
266 bzero(msgh
, sizeof(mach_msg_header_t
));
267 msgh
->msgh_bits
= MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND
, 0);
268 msgh
->msgh_size
= sizeof(mach_msg_header_t
);
269 msgh
->msgh_remote_port
= port
;
273 IODataQueue::sendDataAvailableNotification()
276 mach_msg_header_t
* msgh
;
278 msgh
= &((IODataQueueInternal
*) notifyMsg
)->msg
;
279 if (msgh
->msgh_remote_port
) {
280 kr
= mach_msg_send_from_kernel_with_options(msgh
, msgh
->msgh_size
, MACH_SEND_TIMEOUT
, MACH_MSG_TIMEOUT_NONE
);
282 case MACH_SEND_TIMED_OUT
: // Notification already sent
283 case MACH_MSG_SUCCESS
:
284 case MACH_SEND_NO_BUFFER
:
287 IOLog("%s: dataAvailableNotification failed - msg_send returned: %d\n", /*getName()*/ "IODataQueue", kr
);
293 OSSharedPtr
<IOMemoryDescriptor
>
294 IODataQueue::getMemoryDescriptor()
296 OSSharedPtr
<IOMemoryDescriptor
> descriptor
;
299 queueSize
= ((IODataQueueInternal
*) notifyMsg
)->queueSize
;
300 if (dataQueue
!= NULL
) {
301 descriptor
= IOMemoryDescriptor::withAddress(dataQueue
, queueSize
+ DATA_QUEUE_MEMORY_HEADER_SIZE
, kIODirectionOutIn
);