]> git.saurik.com Git - apple/xnu.git/blob - iokit/Kernel/IODataQueue.cpp
xnu-6153.141.1.tar.gz
[apple/xnu.git] / iokit / Kernel / IODataQueue.cpp
1 /*
2 * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #define DISABLE_DATAQUEUE_WARNING
30
31 #include <IOKit/IODataQueue.h>
32
33 #undef DISABLE_DATAQUEUE_WARNING
34
35 #include <IOKit/IODataQueueShared.h>
36 #include <IOKit/IOLib.h>
37 #include <IOKit/IOMemoryDescriptor.h>
38 #include <libkern/OSAtomic.h>
39
40 struct IODataQueueInternal {
41 mach_msg_header_t msg;
42 UInt32 queueSize;
43 };
44
45 #ifdef enqueue
46 #undef enqueue
47 #endif
48
49 #ifdef dequeue
50 #undef dequeue
51 #endif
52
53 #define super OSObject
54
55 OSDefineMetaClassAndStructors(IODataQueue, OSObject)
56
57 IODataQueue *IODataQueue::withCapacity(UInt32 size)
58 {
59 IODataQueue *dataQueue = new IODataQueue;
60
61 if (dataQueue) {
62 if (!dataQueue->initWithCapacity(size)) {
63 dataQueue->release();
64 dataQueue = NULL;
65 }
66 }
67
68 return dataQueue;
69 }
70
71 IODataQueue *
72 IODataQueue::withEntries(UInt32 numEntries, UInt32 entrySize)
73 {
74 IODataQueue *dataQueue = new IODataQueue;
75
76 if (dataQueue) {
77 if (!dataQueue->initWithEntries(numEntries, entrySize)) {
78 dataQueue->release();
79 dataQueue = NULL;
80 }
81 }
82
83 return dataQueue;
84 }
85
86 Boolean
87 IODataQueue::initWithCapacity(UInt32 size)
88 {
89 vm_size_t allocSize = 0;
90
91 if (!super::init()) {
92 return false;
93 }
94
95 if (size > UINT32_MAX - DATA_QUEUE_MEMORY_HEADER_SIZE) {
96 return false;
97 }
98
99 allocSize = round_page(size + DATA_QUEUE_MEMORY_HEADER_SIZE);
100
101 if (allocSize < size) {
102 return false;
103 }
104
105 assert(!notifyMsg);
106 notifyMsg = IONew(IODataQueueInternal, 1);
107 if (!notifyMsg) {
108 return false;
109 }
110 bzero(notifyMsg, sizeof(IODataQueueInternal));
111 ((IODataQueueInternal *)notifyMsg)->queueSize = size;
112
113 dataQueue = (IODataQueueMemory *)IOMallocAligned(allocSize, PAGE_SIZE);
114 if (dataQueue == NULL) {
115 return false;
116 }
117 bzero(dataQueue, allocSize);
118
119 dataQueue->queueSize = size;
120 // dataQueue->head = 0;
121 // dataQueue->tail = 0;
122
123 return true;
124 }
125
126 Boolean
127 IODataQueue::initWithEntries(UInt32 numEntries, UInt32 entrySize)
128 {
129 // Checking overflow for (numEntries + 1)*(entrySize + DATA_QUEUE_ENTRY_HEADER_SIZE):
130 // check (entrySize + DATA_QUEUE_ENTRY_HEADER_SIZE)
131 if ((entrySize > UINT32_MAX - DATA_QUEUE_ENTRY_HEADER_SIZE) ||
132 // check (numEntries + 1)
133 (numEntries > UINT32_MAX - 1) ||
134 // check (numEntries + 1)*(entrySize + DATA_QUEUE_ENTRY_HEADER_SIZE)
135 (entrySize + DATA_QUEUE_ENTRY_HEADER_SIZE > UINT32_MAX / (numEntries + 1))) {
136 return false;
137 }
138
139 return initWithCapacity((numEntries + 1) * (DATA_QUEUE_ENTRY_HEADER_SIZE + entrySize));
140 }
141
142 void
143 IODataQueue::free()
144 {
145 if (notifyMsg) {
146 if (dataQueue) {
147 IOFreeAligned(dataQueue, round_page(((IODataQueueInternal *)notifyMsg)->queueSize + DATA_QUEUE_MEMORY_HEADER_SIZE));
148 dataQueue = NULL;
149 }
150
151 IODelete(notifyMsg, IODataQueueInternal, 1);
152 notifyMsg = NULL;
153 }
154
155 super::free();
156
157 return;
158 }
159
160 Boolean
161 IODataQueue::enqueue(void * data, UInt32 dataSize)
162 {
163 UInt32 head;
164 UInt32 tail;
165 UInt32 newTail;
166 const UInt32 entrySize = dataSize + DATA_QUEUE_ENTRY_HEADER_SIZE;
167 UInt32 queueSize;
168 IODataQueueEntry * entry;
169
170 // Check for overflow of entrySize
171 if (dataSize > UINT32_MAX - DATA_QUEUE_ENTRY_HEADER_SIZE) {
172 return false;
173 }
174
175 // Force a single read of head and tail
176 // See rdar://problem/40780584 for an explanation of relaxed/acquire barriers
177 tail = __c11_atomic_load((_Atomic UInt32 *)&dataQueue->tail, __ATOMIC_RELAXED);
178 head = __c11_atomic_load((_Atomic UInt32 *)&dataQueue->head, __ATOMIC_ACQUIRE);
179
180 // Check for underflow of (dataQueue->queueSize - tail)
181 queueSize = ((IODataQueueInternal *) notifyMsg)->queueSize;
182 if ((queueSize < tail) || (queueSize < head)) {
183 return false;
184 }
185
186 if (tail >= head) {
187 // Is there enough room at the end for the entry?
188 if ((entrySize <= UINT32_MAX - tail) &&
189 ((tail + entrySize) <= queueSize)) {
190 entry = (IODataQueueEntry *)((UInt8 *)dataQueue->queue + tail);
191
192 entry->size = dataSize;
193 __nochk_memcpy(&entry->data, data, dataSize);
194
195 // The tail can be out of bound when the size of the new entry
196 // exactly matches the available space at the end of the queue.
197 // The tail can range from 0 to dataQueue->queueSize inclusive.
198
199 newTail = tail + entrySize;
200 } else if (head > entrySize) { // Is there enough room at the beginning?
201 // Wrap around to the beginning, but do not allow the tail to catch
202 // up to the head.
203
204 dataQueue->queue->size = dataSize;
205
206 // We need to make sure that there is enough room to set the size before
207 // doing this. The user client checks for this and will look for the size
208 // at the beginning if there isn't room for it at the end.
209
210 if ((queueSize - tail) >= DATA_QUEUE_ENTRY_HEADER_SIZE) {
211 ((IODataQueueEntry *)((UInt8 *)dataQueue->queue + tail))->size = dataSize;
212 }
213
214 __nochk_memcpy(&dataQueue->queue->data, data, dataSize);
215 newTail = entrySize;
216 } else {
217 return false; // queue is full
218 }
219 } else {
220 // Do not allow the tail to catch up to the head when the queue is full.
221 // That's why the comparison uses a '>' rather than '>='.
222
223 if ((head - tail) > entrySize) {
224 entry = (IODataQueueEntry *)((UInt8 *)dataQueue->queue + tail);
225
226 entry->size = dataSize;
227 __nochk_memcpy(&entry->data, data, dataSize);
228 newTail = tail + entrySize;
229 } else {
230 return false; // queue is full
231 }
232 }
233
234 // Publish the data we just enqueued
235 __c11_atomic_store((_Atomic UInt32 *)&dataQueue->tail, newTail, __ATOMIC_RELEASE);
236
237 if (tail != head) {
238 //
239 // The memory barrier below paris with the one in ::dequeue
240 // so that either our store to the tail cannot be missed by
241 // the next dequeue attempt, or we will observe the dequeuer
242 // making the queue empty.
243 //
244 // Of course, if we already think the queue is empty,
245 // there's no point paying this extra cost.
246 //
247 __c11_atomic_thread_fence(__ATOMIC_SEQ_CST);
248 head = __c11_atomic_load((_Atomic UInt32 *)&dataQueue->head, __ATOMIC_RELAXED);
249 }
250
251 if (tail == head) {
252 // Send notification (via mach message) that data is now available.
253 sendDataAvailableNotification();
254 }
255 return true;
256 }
257
258 void
259 IODataQueue::setNotificationPort(mach_port_t port)
260 {
261 mach_msg_header_t * msgh;
262
263 msgh = &((IODataQueueInternal *) notifyMsg)->msg;
264 bzero(msgh, sizeof(mach_msg_header_t));
265 msgh->msgh_bits = MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND, 0);
266 msgh->msgh_size = sizeof(mach_msg_header_t);
267 msgh->msgh_remote_port = port;
268 }
269
270 void
271 IODataQueue::sendDataAvailableNotification()
272 {
273 kern_return_t kr;
274 mach_msg_header_t * msgh;
275
276 msgh = &((IODataQueueInternal *) notifyMsg)->msg;
277 if (msgh->msgh_remote_port) {
278 kr = mach_msg_send_from_kernel_with_options(msgh, msgh->msgh_size, MACH_SEND_TIMEOUT, MACH_MSG_TIMEOUT_NONE);
279 switch (kr) {
280 case MACH_SEND_TIMED_OUT: // Notification already sent
281 case MACH_MSG_SUCCESS:
282 case MACH_SEND_NO_BUFFER:
283 break;
284 default:
285 IOLog("%s: dataAvailableNotification failed - msg_send returned: %d\n", /*getName()*/ "IODataQueue", kr);
286 break;
287 }
288 }
289 }
290
291 IOMemoryDescriptor *
292 IODataQueue::getMemoryDescriptor()
293 {
294 IOMemoryDescriptor *descriptor = NULL;
295 UInt32 queueSize;
296
297 queueSize = ((IODataQueueInternal *) notifyMsg)->queueSize;
298 if (dataQueue != NULL) {
299 descriptor = IOMemoryDescriptor::withAddress(dataQueue, queueSize + DATA_QUEUE_MEMORY_HEADER_SIZE, kIODirectionOutIn);
300 }
301
302 return descriptor;
303 }