]> git.saurik.com Git - apple/xnu.git/blame - iokit/Kernel/IODataQueue.cpp
xnu-6153.11.26.tar.gz
[apple/xnu.git] / iokit / Kernel / IODataQueue.cpp
CommitLineData
1c79356b
A
1/*
2 * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved.
3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
0a7de745 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
0a7de745 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
0a7de745 17 *
2d21ac55
A
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
0a7de745 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28
fe8ab488
A
29#define DISABLE_DATAQUEUE_WARNING
30
1c79356b 31#include <IOKit/IODataQueue.h>
fe8ab488
A
32
33#undef DISABLE_DATAQUEUE_WARNING
34
1c79356b
A
35#include <IOKit/IODataQueueShared.h>
36#include <IOKit/IOLib.h>
37#include <IOKit/IOMemoryDescriptor.h>
143464d5 38#include <libkern/OSAtomic.h>
1c79356b 39
0a7de745
A
40struct IODataQueueInternal {
41 mach_msg_header_t msg;
42 UInt32 queueSize;
3e170ce0
A
43};
44
1c79356b
A
45#ifdef enqueue
46#undef enqueue
47#endif
48
49#ifdef dequeue
50#undef dequeue
51#endif
52
53#define super OSObject
54
55OSDefineMetaClassAndStructors(IODataQueue, OSObject)
56
57IODataQueue *IODataQueue::withCapacity(UInt32 size)
58{
0a7de745 59 IODataQueue *dataQueue = new IODataQueue;
1c79356b 60
0a7de745
A
61 if (dataQueue) {
62 if (!dataQueue->initWithCapacity(size)) {
63 dataQueue->release();
cb323159 64 dataQueue = NULL;
0a7de745
A
65 }
66 }
1c79356b 67
0a7de745 68 return dataQueue;
1c79356b
A
69}
70
0a7de745
A
71IODataQueue *
72IODataQueue::withEntries(UInt32 numEntries, UInt32 entrySize)
1c79356b 73{
0a7de745 74 IODataQueue *dataQueue = new IODataQueue;
1c79356b 75
0a7de745
A
76 if (dataQueue) {
77 if (!dataQueue->initWithEntries(numEntries, entrySize)) {
78 dataQueue->release();
cb323159 79 dataQueue = NULL;
0a7de745
A
80 }
81 }
1c79356b 82
0a7de745 83 return dataQueue;
1c79356b
A
84}
85
0a7de745
A
86Boolean
87IODataQueue::initWithCapacity(UInt32 size)
1c79356b 88{
0a7de745 89 vm_size_t allocSize = 0;
316670eb 90
0a7de745
A
91 if (!super::init()) {
92 return false;
93 }
1c79356b 94
0a7de745
A
95 if (size > UINT32_MAX - DATA_QUEUE_MEMORY_HEADER_SIZE) {
96 return false;
97 }
98
99 allocSize = round_page(size + DATA_QUEUE_MEMORY_HEADER_SIZE);
316670eb 100
0a7de745
A
101 if (allocSize < size) {
102 return false;
103 }
316670eb 104
0a7de745
A
105 assert(!notifyMsg);
106 notifyMsg = IONew(IODataQueueInternal, 1);
3e170ce0
A
107 if (!notifyMsg) {
108 return false;
109 }
0a7de745
A
110 bzero(notifyMsg, sizeof(IODataQueueInternal));
111 ((IODataQueueInternal *)notifyMsg)->queueSize = size;
3e170ce0 112
0a7de745 113 dataQueue = (IODataQueueMemory *)IOMallocAligned(allocSize, PAGE_SIZE);
cb323159 114 if (dataQueue == NULL) {
0a7de745
A
115 return false;
116 }
117 bzero(dataQueue, allocSize);
1c79356b 118
0a7de745 119 dataQueue->queueSize = size;
a1c7dba1
A
120// dataQueue->head = 0;
121// dataQueue->tail = 0;
1c79356b 122
0a7de745 123 return true;
1c79356b
A
124}
125
0a7de745
A
126Boolean
127IODataQueue::initWithEntries(UInt32 numEntries, UInt32 entrySize)
1c79356b 128{
0a7de745
A
129 // Checking overflow for (numEntries + 1)*(entrySize + DATA_QUEUE_ENTRY_HEADER_SIZE):
130 // check (entrySize + DATA_QUEUE_ENTRY_HEADER_SIZE)
131 if ((entrySize > UINT32_MAX - DATA_QUEUE_ENTRY_HEADER_SIZE) ||
132 // check (numEntries + 1)
133 (numEntries > UINT32_MAX - 1) ||
134 // check (numEntries + 1)*(entrySize + DATA_QUEUE_ENTRY_HEADER_SIZE)
135 (entrySize + DATA_QUEUE_ENTRY_HEADER_SIZE > UINT32_MAX / (numEntries + 1))) {
136 return false;
137 }
138
139 return initWithCapacity((numEntries + 1) * (DATA_QUEUE_ENTRY_HEADER_SIZE + entrySize));
1c79356b
A
140}
141
0a7de745
A
142void
143IODataQueue::free()
1c79356b 144{
3e170ce0
A
145 if (notifyMsg) {
146 if (dataQueue) {
147 IOFreeAligned(dataQueue, round_page(((IODataQueueInternal *)notifyMsg)->queueSize + DATA_QUEUE_MEMORY_HEADER_SIZE));
148 dataQueue = NULL;
149 }
150
151 IODelete(notifyMsg, IODataQueueInternal, 1);
152 notifyMsg = NULL;
0a7de745 153 }
1c79356b 154
0a7de745 155 super::free();
1c79356b 156
0a7de745 157 return;
1c79356b
A
158}
159
0a7de745
A
160Boolean
161IODataQueue::enqueue(void * data, UInt32 dataSize)
1c79356b 162{
0a7de745
A
163 UInt32 head;
164 UInt32 tail;
165 UInt32 newTail;
166 const UInt32 entrySize = dataSize + DATA_QUEUE_ENTRY_HEADER_SIZE;
167 UInt32 queueSize;
168 IODataQueueEntry * entry;
169
170 // Check for overflow of entrySize
171 if (dataSize > UINT32_MAX - DATA_QUEUE_ENTRY_HEADER_SIZE) {
172 return false;
173 }
174
175 // Force a single read of head and tail
176 // See rdar://problem/40780584 for an explanation of relaxed/acquire barriers
177 tail = __c11_atomic_load((_Atomic UInt32 *)&dataQueue->tail, __ATOMIC_RELAXED);
178 head = __c11_atomic_load((_Atomic UInt32 *)&dataQueue->head, __ATOMIC_ACQUIRE);
179
180 // Check for underflow of (dataQueue->queueSize - tail)
181 queueSize = ((IODataQueueInternal *) notifyMsg)->queueSize;
182 if ((queueSize < tail) || (queueSize < head)) {
183 return false;
184 }
185
186 if (tail >= head) {
187 // Is there enough room at the end for the entry?
188 if ((entrySize <= UINT32_MAX - tail) &&
189 ((tail + entrySize) <= queueSize)) {
190 entry = (IODataQueueEntry *)((UInt8 *)dataQueue->queue + tail);
191
192 entry->size = dataSize;
cb323159 193 __nochk_memcpy(&entry->data, data, dataSize);
0a7de745
A
194
195 // The tail can be out of bound when the size of the new entry
196 // exactly matches the available space at the end of the queue.
197 // The tail can range from 0 to dataQueue->queueSize inclusive.
198
199 newTail = tail + entrySize;
200 } else if (head > entrySize) { // Is there enough room at the beginning?
201 // Wrap around to the beginning, but do not allow the tail to catch
202 // up to the head.
203
204 dataQueue->queue->size = dataSize;
205
206 // We need to make sure that there is enough room to set the size before
207 // doing this. The user client checks for this and will look for the size
208 // at the beginning if there isn't room for it at the end.
209
210 if ((queueSize - tail) >= DATA_QUEUE_ENTRY_HEADER_SIZE) {
211 ((IODataQueueEntry *)((UInt8 *)dataQueue->queue + tail))->size = dataSize;
212 }
213
cb323159 214 __nochk_memcpy(&dataQueue->queue->data, data, dataSize);
0a7de745
A
215 newTail = entrySize;
216 } else {
217 return false; // queue is full
218 }
219 } else {
220 // Do not allow the tail to catch up to the head when the queue is full.
221 // That's why the comparison uses a '>' rather than '>='.
222
223 if ((head - tail) > entrySize) {
224 entry = (IODataQueueEntry *)((UInt8 *)dataQueue->queue + tail);
225
226 entry->size = dataSize;
cb323159 227 __nochk_memcpy(&entry->data, data, dataSize);
0a7de745
A
228 newTail = tail + entrySize;
229 } else {
230 return false; // queue is full
231 }
232 }
1c79356b 233
d9a64523
A
234 // Publish the data we just enqueued
235 __c11_atomic_store((_Atomic UInt32 *)&dataQueue->tail, newTail, __ATOMIC_RELEASE);
236
237 if (tail != head) {
238 //
239 // The memory barrier below paris with the one in ::dequeue
240 // so that either our store to the tail cannot be missed by
241 // the next dequeue attempt, or we will observe the dequeuer
242 // making the queue empty.
243 //
244 // Of course, if we already think the queue is empty,
245 // there's no point paying this extra cost.
246 //
247 __c11_atomic_thread_fence(__ATOMIC_SEQ_CST);
248 head = __c11_atomic_load((_Atomic UInt32 *)&dataQueue->head, __ATOMIC_RELAXED);
249 }
1c79356b 250
d9a64523
A
251 if (tail == head) {
252 // Send notification (via mach message) that data is now available.
253 sendDataAvailableNotification();
254 }
255 return true;
1c79356b
A
256}
257
0a7de745
A
258void
259IODataQueue::setNotificationPort(mach_port_t port)
1c79356b 260{
0a7de745 261 mach_msg_header_t * msgh;
1c79356b 262
0a7de745 263 msgh = &((IODataQueueInternal *) notifyMsg)->msg;
3e170ce0
A
264 bzero(msgh, sizeof(mach_msg_header_t));
265 msgh->msgh_bits = MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND, 0);
266 msgh->msgh_size = sizeof(mach_msg_header_t);
267 msgh->msgh_remote_port = port;
1c79356b
A
268}
269
0a7de745
A
270void
271IODataQueue::sendDataAvailableNotification()
1c79356b 272{
0a7de745
A
273 kern_return_t kr;
274 mach_msg_header_t * msgh;
275
276 msgh = &((IODataQueueInternal *) notifyMsg)->msg;
277 if (msgh->msgh_remote_port) {
278 kr = mach_msg_send_from_kernel_with_options(msgh, msgh->msgh_size, MACH_SEND_TIMEOUT, MACH_MSG_TIMEOUT_NONE);
279 switch (kr) {
280 case MACH_SEND_TIMED_OUT: // Notification already sent
281 case MACH_MSG_SUCCESS:
282 case MACH_SEND_NO_BUFFER:
283 break;
284 default:
285 IOLog("%s: dataAvailableNotification failed - msg_send returned: %d\n", /*getName()*/ "IODataQueue", kr);
286 break;
287 }
288 }
1c79356b
A
289}
290
0a7de745
A
291IOMemoryDescriptor *
292IODataQueue::getMemoryDescriptor()
1c79356b 293{
cb323159 294 IOMemoryDescriptor *descriptor = NULL;
0a7de745 295 UInt32 queueSize;
1c79356b 296
0a7de745 297 queueSize = ((IODataQueueInternal *) notifyMsg)->queueSize;
cb323159 298 if (dataQueue != NULL) {
0a7de745
A
299 descriptor = IOMemoryDescriptor::withAddress(dataQueue, queueSize + DATA_QUEUE_MEMORY_HEADER_SIZE, kIODirectionOutIn);
300 }
1c79356b 301
0a7de745 302 return descriptor;
1c79356b 303}