]> git.saurik.com Git - apple/xnu.git/blame - iokit/Kernel/IODataQueue.cpp
xnu-4903.241.1.tar.gz
[apple/xnu.git] / iokit / Kernel / IODataQueue.cpp
CommitLineData
1c79356b
A
1/*
2 * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved.
3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
1c79356b 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
8f6c56a5 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
8f6c56a5 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28
fe8ab488
A
29#define DISABLE_DATAQUEUE_WARNING
30
1c79356b 31#include <IOKit/IODataQueue.h>
fe8ab488
A
32
33#undef DISABLE_DATAQUEUE_WARNING
34
1c79356b
A
35#include <IOKit/IODataQueueShared.h>
36#include <IOKit/IOLib.h>
37#include <IOKit/IOMemoryDescriptor.h>
143464d5 38#include <libkern/OSAtomic.h>
1c79356b 39
3e170ce0
A
40struct IODataQueueInternal
41{
42 mach_msg_header_t msg;
43 UInt32 queueSize;
44};
45
1c79356b
A
46#ifdef enqueue
47#undef enqueue
48#endif
49
50#ifdef dequeue
51#undef dequeue
52#endif
53
54#define super OSObject
55
56OSDefineMetaClassAndStructors(IODataQueue, OSObject)
57
58IODataQueue *IODataQueue::withCapacity(UInt32 size)
59{
60 IODataQueue *dataQueue = new IODataQueue;
61
62 if (dataQueue) {
63 if (!dataQueue->initWithCapacity(size)) {
64 dataQueue->release();
65 dataQueue = 0;
66 }
67 }
68
69 return dataQueue;
70}
71
72IODataQueue *IODataQueue::withEntries(UInt32 numEntries, UInt32 entrySize)
73{
74 IODataQueue *dataQueue = new IODataQueue;
75
76 if (dataQueue) {
77 if (!dataQueue->initWithEntries(numEntries, entrySize)) {
78 dataQueue->release();
79 dataQueue = 0;
80 }
81 }
82
83 return dataQueue;
84}
85
86Boolean IODataQueue::initWithCapacity(UInt32 size)
87{
316670eb
A
88 vm_size_t allocSize = 0;
89
1c79356b
A
90 if (!super::init()) {
91 return false;
92 }
93
143464d5
A
94 if (size > UINT32_MAX - DATA_QUEUE_MEMORY_HEADER_SIZE) {
95 return false;
96 }
97
316670eb
A
98 allocSize = round_page(size + DATA_QUEUE_MEMORY_HEADER_SIZE);
99
100 if (allocSize < size) {
101 return false;
102 }
103
3e170ce0
A
104 assert(!notifyMsg);
105 notifyMsg = IONew(IODataQueueInternal, 1);
106 if (!notifyMsg) {
107 return false;
108 }
109 bzero(notifyMsg, sizeof(IODataQueueInternal));
110 ((IODataQueueInternal *)notifyMsg)->queueSize = size;
111
316670eb 112 dataQueue = (IODataQueueMemory *)IOMallocAligned(allocSize, PAGE_SIZE);
1c79356b
A
113 if (dataQueue == 0) {
114 return false;
115 }
a1c7dba1 116 bzero(dataQueue, allocSize);
1c79356b 117
2d21ac55 118 dataQueue->queueSize = size;
a1c7dba1
A
119// dataQueue->head = 0;
120// dataQueue->tail = 0;
1c79356b
A
121
122 return true;
123}
124
125Boolean IODataQueue::initWithEntries(UInt32 numEntries, UInt32 entrySize)
126{
143464d5
A
127 // Checking overflow for (numEntries + 1)*(entrySize + DATA_QUEUE_ENTRY_HEADER_SIZE):
128 // check (entrySize + DATA_QUEUE_ENTRY_HEADER_SIZE)
129 if ((entrySize > UINT32_MAX - DATA_QUEUE_ENTRY_HEADER_SIZE) ||
130 // check (numEntries + 1)
131 (numEntries > UINT32_MAX-1) ||
132 // check (numEntries + 1)*(entrySize + DATA_QUEUE_ENTRY_HEADER_SIZE)
133 (entrySize + DATA_QUEUE_ENTRY_HEADER_SIZE > UINT32_MAX/(numEntries+1))) {
134 return false;
135 }
136
1c79356b
A
137 return (initWithCapacity((numEntries + 1) * (DATA_QUEUE_ENTRY_HEADER_SIZE + entrySize)));
138}
139
140void IODataQueue::free()
141{
3e170ce0
A
142 if (notifyMsg) {
143 if (dataQueue) {
144 IOFreeAligned(dataQueue, round_page(((IODataQueueInternal *)notifyMsg)->queueSize + DATA_QUEUE_MEMORY_HEADER_SIZE));
145 dataQueue = NULL;
146 }
147
148 IODelete(notifyMsg, IODataQueueInternal, 1);
149 notifyMsg = NULL;
1c79356b
A
150 }
151
152 super::free();
153
154 return;
155}
156
157Boolean IODataQueue::enqueue(void * data, UInt32 dataSize)
158{
39037602
A
159 UInt32 head;
160 UInt32 tail;
161 UInt32 newTail;
1c79356b 162 const UInt32 entrySize = dataSize + DATA_QUEUE_ENTRY_HEADER_SIZE;
3e170ce0 163 UInt32 queueSize;
1c79356b 164 IODataQueueEntry * entry;
9bccf70c 165
143464d5
A
166 // Check for overflow of entrySize
167 if (dataSize > UINT32_MAX - DATA_QUEUE_ENTRY_HEADER_SIZE) {
168 return false;
169 }
3e170ce0 170
39037602 171 // Force a single read of head and tail
d26ffc64 172 // See rdar://problem/40780584 for an explanation of relaxed/acquire barriers
39037602 173 tail = __c11_atomic_load((_Atomic UInt32 *)&dataQueue->tail, __ATOMIC_RELAXED);
d26ffc64 174 head = __c11_atomic_load((_Atomic UInt32 *)&dataQueue->head, __ATOMIC_ACQUIRE);
39037602 175
143464d5 176 // Check for underflow of (dataQueue->queueSize - tail)
3e170ce0
A
177 queueSize = ((IODataQueueInternal *) notifyMsg)->queueSize;
178 if ((queueSize < tail) || (queueSize < head)) {
143464d5
A
179 return false;
180 }
181
1c79356b
A
182 if ( tail >= head )
183 {
9bccf70c 184 // Is there enough room at the end for the entry?
143464d5 185 if ((entrySize <= UINT32_MAX - tail) &&
3e170ce0 186 ((tail + entrySize) <= queueSize) )
1c79356b
A
187 {
188 entry = (IODataQueueEntry *)((UInt8 *)dataQueue->queue + tail);
189
190 entry->size = dataSize;
191 memcpy(&entry->data, data, dataSize);
9bccf70c
A
192
193 // The tail can be out of bound when the size of the new entry
194 // exactly matches the available space at the end of the queue.
195 // The tail can range from 0 to dataQueue->queueSize inclusive.
39037602
A
196
197 newTail = tail + entrySize;
1c79356b 198 }
fe8ab488 199 else if ( head > entrySize ) // Is there enough room at the beginning?
1c79356b
A
200 {
201 // Wrap around to the beginning, but do not allow the tail to catch
202 // up to the head.
203
204 dataQueue->queue->size = dataSize;
9bccf70c
A
205
206 // We need to make sure that there is enough room to set the size before
207 // doing this. The user client checks for this and will look for the size
208 // at the beginning if there isn't room for it at the end.
209
3e170ce0 210 if ( ( queueSize - tail ) >= DATA_QUEUE_ENTRY_HEADER_SIZE )
9bccf70c
A
211 {
212 ((IODataQueueEntry *)((UInt8 *)dataQueue->queue + tail))->size = dataSize;
213 }
214
1c79356b 215 memcpy(&dataQueue->queue->data, data, dataSize);
39037602 216 newTail = entrySize;
1c79356b
A
217 }
218 else
219 {
fe8ab488 220 return false; // queue is full
1c79356b
A
221 }
222 }
223 else
224 {
225 // Do not allow the tail to catch up to the head when the queue is full.
226 // That's why the comparison uses a '>' rather than '>='.
227
228 if ( (head - tail) > entrySize )
229 {
230 entry = (IODataQueueEntry *)((UInt8 *)dataQueue->queue + tail);
231
232 entry->size = dataSize;
233 memcpy(&entry->data, data, dataSize);
39037602 234 newTail = tail + entrySize;
1c79356b
A
235 }
236 else
237 {
fe8ab488 238 return false; // queue is full
1c79356b
A
239 }
240 }
241
d9a64523
A
242 // Publish the data we just enqueued
243 __c11_atomic_store((_Atomic UInt32 *)&dataQueue->tail, newTail, __ATOMIC_RELEASE);
244
245 if (tail != head) {
246 //
247 // The memory barrier below paris with the one in ::dequeue
248 // so that either our store to the tail cannot be missed by
249 // the next dequeue attempt, or we will observe the dequeuer
250 // making the queue empty.
251 //
252 // Of course, if we already think the queue is empty,
253 // there's no point paying this extra cost.
254 //
255 __c11_atomic_thread_fence(__ATOMIC_SEQ_CST);
256 head = __c11_atomic_load((_Atomic UInt32 *)&dataQueue->head, __ATOMIC_RELAXED);
257 }
1c79356b 258
d9a64523
A
259 if (tail == head) {
260 // Send notification (via mach message) that data is now available.
261 sendDataAvailableNotification();
262 }
263 return true;
1c79356b
A
264}
265
266void IODataQueue::setNotificationPort(mach_port_t port)
267{
3e170ce0 268 mach_msg_header_t * msgh;
1c79356b 269
3e170ce0
A
270 msgh = &((IODataQueueInternal *) notifyMsg)->msg;
271 bzero(msgh, sizeof(mach_msg_header_t));
272 msgh->msgh_bits = MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND, 0);
273 msgh->msgh_size = sizeof(mach_msg_header_t);
274 msgh->msgh_remote_port = port;
1c79356b
A
275}
276
277void IODataQueue::sendDataAvailableNotification()
278{
fe8ab488
A
279 kern_return_t kr;
280 mach_msg_header_t * msgh;
1c79356b 281
3e170ce0
A
282 msgh = &((IODataQueueInternal *) notifyMsg)->msg;
283 if (msgh->msgh_remote_port) {
39236c6e 284 kr = mach_msg_send_from_kernel_with_options(msgh, msgh->msgh_size, MACH_SEND_TIMEOUT, MACH_MSG_TIMEOUT_NONE);
1c79356b 285 switch(kr) {
fe8ab488 286 case MACH_SEND_TIMED_OUT: // Notification already sent
1c79356b 287 case MACH_MSG_SUCCESS:
39236c6e 288 case MACH_SEND_NO_BUFFER:
1c79356b
A
289 break;
290 default:
291 IOLog("%s: dataAvailableNotification failed - msg_send returned: %d\n", /*getName()*/"IODataQueue", kr);
292 break;
293 }
294 }
295}
296
297IOMemoryDescriptor *IODataQueue::getMemoryDescriptor()
298{
299 IOMemoryDescriptor *descriptor = 0;
3e170ce0 300 UInt32 queueSize;
1c79356b 301
3e170ce0 302 queueSize = ((IODataQueueInternal *) notifyMsg)->queueSize;
1c79356b 303 if (dataQueue != 0) {
3e170ce0 304 descriptor = IOMemoryDescriptor::withAddress(dataQueue, queueSize + DATA_QUEUE_MEMORY_HEADER_SIZE, kIODirectionOutIn);
1c79356b
A
305 }
306
307 return descriptor;
308}
309
fe8ab488 310