]> git.saurik.com Git - apple/xnu.git/blame - iokit/Kernel/IOSharedDataQueue.cpp
xnu-7195.101.1.tar.gz
[apple/xnu.git] / iokit / Kernel / IOSharedDataQueue.cpp
CommitLineData
2d21ac55
A
1/*
2 * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
0a7de745 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
0a7de745 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
0a7de745 17 *
2d21ac55
A
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
0a7de745 25 *
2d21ac55
A
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
f427ee49
A
29#define IOKIT_ENABLE_SHARED_PTR
30
2d21ac55
A
31#include <IOKit/IOSharedDataQueue.h>
32#include <IOKit/IODataQueueShared.h>
33#include <IOKit/IOLib.h>
34#include <IOKit/IOMemoryDescriptor.h>
f427ee49 35#include <libkern/c++/OSSharedPtr.h>
2d21ac55 36
143464d5
A
37#ifdef enqueue
38#undef enqueue
39#endif
40
2d21ac55
A
41#ifdef dequeue
42#undef dequeue
43#endif
44
45#define super IODataQueue
46
47OSDefineMetaClassAndStructors(IOSharedDataQueue, IODataQueue)
48
f427ee49
A
49OSSharedPtr<IOSharedDataQueue>
50IOSharedDataQueue::withCapacity(UInt32 size)
2d21ac55 51{
f427ee49 52 OSSharedPtr<IOSharedDataQueue> dataQueue = OSMakeShared<IOSharedDataQueue>();
2d21ac55 53
0a7de745
A
54 if (dataQueue) {
55 if (!dataQueue->initWithCapacity(size)) {
f427ee49 56 return nullptr;
0a7de745
A
57 }
58 }
2d21ac55 59
0a7de745 60 return dataQueue;
2d21ac55
A
61}
62
f427ee49 63OSSharedPtr<IOSharedDataQueue>
0a7de745 64IOSharedDataQueue::withEntries(UInt32 numEntries, UInt32 entrySize)
2d21ac55 65{
f427ee49 66 OSSharedPtr<IOSharedDataQueue> dataQueue = OSMakeShared<IOSharedDataQueue>();
2d21ac55 67
0a7de745
A
68 if (dataQueue) {
69 if (!dataQueue->initWithEntries(numEntries, entrySize)) {
f427ee49 70 return nullptr;
0a7de745
A
71 }
72 }
2d21ac55 73
0a7de745 74 return dataQueue;
2d21ac55
A
75}
76
0a7de745
A
77Boolean
78IOSharedDataQueue::initWithCapacity(UInt32 size)
2d21ac55 79{
0a7de745
A
80 IODataQueueAppendix * appendix;
81 vm_size_t allocSize;
82
83 if (!super::init()) {
84 return false;
85 }
86
87 _reserved = (ExpansionData *)IOMalloc(sizeof(struct ExpansionData));
88 if (!_reserved) {
89 return false;
90 }
91
92 if (size > UINT32_MAX - DATA_QUEUE_MEMORY_HEADER_SIZE - DATA_QUEUE_MEMORY_APPENDIX_SIZE) {
93 return false;
94 }
95
96 allocSize = round_page(size + DATA_QUEUE_MEMORY_HEADER_SIZE + DATA_QUEUE_MEMORY_APPENDIX_SIZE);
97
98 if (allocSize < size) {
99 return false;
100 }
101
102 dataQueue = (IODataQueueMemory *)IOMallocAligned(allocSize, PAGE_SIZE);
cb323159 103 if (dataQueue == NULL) {
0a7de745
A
104 return false;
105 }
106 bzero(dataQueue, allocSize);
107
108 dataQueue->queueSize = size;
a1c7dba1
A
109// dataQueue->head = 0;
110// dataQueue->tail = 0;
fe8ab488 111
0a7de745
A
112 if (!setQueueSize(size)) {
113 return false;
114 }
115
116 appendix = (IODataQueueAppendix *)((UInt8 *)dataQueue + size + DATA_QUEUE_MEMORY_HEADER_SIZE);
117 appendix->version = 0;
a1c7dba1 118
0a7de745
A
119 if (!notifyMsg) {
120 notifyMsg = IOMalloc(sizeof(mach_msg_header_t));
121 if (!notifyMsg) {
122 return false;
123 }
124 }
125 bzero(notifyMsg, sizeof(mach_msg_header_t));
a1c7dba1 126
0a7de745 127 setNotificationPort(MACH_PORT_NULL);
2d21ac55 128
0a7de745 129 return true;
2d21ac55
A
130}
131
0a7de745
A
132void
133IOSharedDataQueue::free()
2d21ac55 134{
0a7de745
A
135 if (dataQueue) {
136 IOFreeAligned(dataQueue, round_page(getQueueSize() + DATA_QUEUE_MEMORY_HEADER_SIZE + DATA_QUEUE_MEMORY_APPENDIX_SIZE));
137 dataQueue = NULL;
138 if (notifyMsg) {
139 IOFree(notifyMsg, sizeof(mach_msg_header_t));
140 notifyMsg = NULL;
141 }
142 }
143
144 if (_reserved) {
145 IOFree(_reserved, sizeof(struct ExpansionData));
146 _reserved = NULL;
147 }
148
149 super::free();
2d21ac55
A
150}
151
f427ee49 152OSSharedPtr<IOMemoryDescriptor>
0a7de745 153IOSharedDataQueue::getMemoryDescriptor()
2d21ac55 154{
f427ee49 155 OSSharedPtr<IOMemoryDescriptor> descriptor;
2d21ac55 156
cb323159 157 if (dataQueue != NULL) {
0a7de745
A
158 descriptor = IOMemoryDescriptor::withAddress(dataQueue, getQueueSize() + DATA_QUEUE_MEMORY_HEADER_SIZE + DATA_QUEUE_MEMORY_APPENDIX_SIZE, kIODirectionOutIn);
159 }
2d21ac55 160
0a7de745 161 return descriptor;
2d21ac55
A
162}
163
164
0a7de745
A
165IODataQueueEntry *
166IOSharedDataQueue::peek()
2d21ac55 167{
cb323159 168 IODataQueueEntry *entry = NULL;
0a7de745
A
169 UInt32 headOffset;
170 UInt32 tailOffset;
171
172 if (!dataQueue) {
173 return NULL;
174 }
175
176 // Read head and tail with acquire barrier
177 // See rdar://problem/40780584 for an explanation of relaxed/acquire barriers
178 headOffset = __c11_atomic_load((_Atomic UInt32 *)&dataQueue->head, __ATOMIC_RELAXED);
179 tailOffset = __c11_atomic_load((_Atomic UInt32 *)&dataQueue->tail, __ATOMIC_ACQUIRE);
180
181 if (headOffset != tailOffset) {
cb323159 182 volatile IODataQueueEntry * head = NULL;
0a7de745
A
183 UInt32 headSize = 0;
184 UInt32 headOffset = dataQueue->head;
185 UInt32 queueSize = getQueueSize();
186
f427ee49 187 if (headOffset > queueSize) {
0a7de745
A
188 return NULL;
189 }
190
191 head = (IODataQueueEntry *)((char *)dataQueue->queue + headOffset);
192 headSize = head->size;
193
194 // Check if there's enough room before the end of the queue for a header.
195 // If there is room, check if there's enough room to hold the header and
196 // the data.
197
198 if ((headOffset > UINT32_MAX - DATA_QUEUE_ENTRY_HEADER_SIZE) ||
199 (headOffset + DATA_QUEUE_ENTRY_HEADER_SIZE > queueSize) ||
200 (headOffset + DATA_QUEUE_ENTRY_HEADER_SIZE > UINT32_MAX - headSize) ||
201 (headOffset + headSize + DATA_QUEUE_ENTRY_HEADER_SIZE > queueSize)) {
202 // No room for the header or the data, wrap to the beginning of the queue.
203 // Note: wrapping even with the UINT32_MAX checks, as we have to support
204 // queueSize of UINT32_MAX
205 entry = dataQueue->queue;
206 } else {
207 entry = (IODataQueueEntry *)head;
208 }
209 }
210
211 return entry;
2d21ac55
A
212}
213
0a7de745
A
214Boolean
215IOSharedDataQueue::enqueue(void * data, UInt32 dataSize)
143464d5 216{
0a7de745
A
217 UInt32 head;
218 UInt32 tail;
219 UInt32 newTail;
220 const UInt32 entrySize = dataSize + DATA_QUEUE_ENTRY_HEADER_SIZE;
221 IODataQueueEntry * entry;
222
223 // Force a single read of head and tail
224 // See rdar://problem/40780584 for an explanation of relaxed/acquire barriers
225 tail = __c11_atomic_load((_Atomic UInt32 *)&dataQueue->tail, __ATOMIC_RELAXED);
226 head = __c11_atomic_load((_Atomic UInt32 *)&dataQueue->head, __ATOMIC_ACQUIRE);
227
228 // Check for overflow of entrySize
229 if (dataSize > UINT32_MAX - DATA_QUEUE_ENTRY_HEADER_SIZE) {
230 return false;
231 }
232 // Check for underflow of (getQueueSize() - tail)
233 if (getQueueSize() < tail || getQueueSize() < head) {
234 return false;
235 }
236
237 if (tail >= head) {
238 // Is there enough room at the end for the entry?
239 if ((entrySize <= UINT32_MAX - tail) &&
240 ((tail + entrySize) <= getQueueSize())) {
241 entry = (IODataQueueEntry *)((UInt8 *)dataQueue->queue + tail);
242
243 entry->size = dataSize;
cb323159 244 __nochk_memcpy(&entry->data, data, dataSize);
0a7de745
A
245
246 // The tail can be out of bound when the size of the new entry
247 // exactly matches the available space at the end of the queue.
248 // The tail can range from 0 to dataQueue->queueSize inclusive.
249
250 newTail = tail + entrySize;
251 } else if (head > entrySize) { // Is there enough room at the beginning?
252 // Wrap around to the beginning, but do not allow the tail to catch
253 // up to the head.
254
255 dataQueue->queue->size = dataSize;
256
257 // We need to make sure that there is enough room to set the size before
258 // doing this. The user client checks for this and will look for the size
259 // at the beginning if there isn't room for it at the end.
260
261 if ((getQueueSize() - tail) >= DATA_QUEUE_ENTRY_HEADER_SIZE) {
262 ((IODataQueueEntry *)((UInt8 *)dataQueue->queue + tail))->size = dataSize;
263 }
264
cb323159 265 __nochk_memcpy(&dataQueue->queue->data, data, dataSize);
0a7de745
A
266 newTail = entrySize;
267 } else {
268 return false; // queue is full
269 }
270 } else {
271 // Do not allow the tail to catch up to the head when the queue is full.
272 // That's why the comparison uses a '>' rather than '>='.
273
274 if ((head - tail) > entrySize) {
275 entry = (IODataQueueEntry *)((UInt8 *)dataQueue->queue + tail);
276
277 entry->size = dataSize;
cb323159 278 __nochk_memcpy(&entry->data, data, dataSize);
0a7de745
A
279 newTail = tail + entrySize;
280 } else {
281 return false; // queue is full
282 }
283 }
39037602 284
d9a64523
A
285 // Publish the data we just enqueued
286 __c11_atomic_store((_Atomic UInt32 *)&dataQueue->tail, newTail, __ATOMIC_RELEASE);
287
288 if (tail != head) {
289 //
290 // The memory barrier below paris with the one in ::dequeue
291 // so that either our store to the tail cannot be missed by
292 // the next dequeue attempt, or we will observe the dequeuer
293 // making the queue empty.
294 //
295 // Of course, if we already think the queue is empty,
296 // there's no point paying this extra cost.
297 //
298 __c11_atomic_thread_fence(__ATOMIC_SEQ_CST);
299 head = __c11_atomic_load((_Atomic UInt32 *)&dataQueue->head, __ATOMIC_RELAXED);
300 }
301
302 if (tail == head) {
303 // Send notification (via mach message) that data is now available.
304 sendDataAvailableNotification();
305 }
306 return true;
143464d5
A
307}
308
0a7de745
A
309Boolean
310IOSharedDataQueue::dequeue(void *data, UInt32 *dataSize)
2d21ac55 311{
0a7de745 312 Boolean retVal = TRUE;
cb323159 313 volatile IODataQueueEntry * entry = NULL;
0a7de745
A
314 UInt32 entrySize = 0;
315 UInt32 headOffset = 0;
316 UInt32 tailOffset = 0;
317 UInt32 newHeadOffset = 0;
2d21ac55 318
d9a64523 319 if (!dataQueue || (data && !dataSize)) {
0a7de745
A
320 return false;
321 }
322
323 // Read head and tail with acquire barrier
324 // See rdar://problem/40780584 for an explanation of relaxed/acquire barriers
325 headOffset = __c11_atomic_load((_Atomic UInt32 *)&dataQueue->head, __ATOMIC_RELAXED);
326 tailOffset = __c11_atomic_load((_Atomic UInt32 *)&dataQueue->tail, __ATOMIC_ACQUIRE);
327
328 if (headOffset != tailOffset) {
cb323159 329 volatile IODataQueueEntry * head = NULL;
0a7de745
A
330 UInt32 headSize = 0;
331 UInt32 queueSize = getQueueSize();
332
333 if (headOffset > queueSize) {
334 return false;
335 }
336
337 head = (IODataQueueEntry *)((char *)dataQueue->queue + headOffset);
338 headSize = head->size;
339
340 // we wrapped around to beginning, so read from there
341 // either there was not even room for the header
342 if ((headOffset > UINT32_MAX - DATA_QUEUE_ENTRY_HEADER_SIZE) ||
343 (headOffset + DATA_QUEUE_ENTRY_HEADER_SIZE > queueSize) ||
344 // or there was room for the header, but not for the data
345 (headOffset + DATA_QUEUE_ENTRY_HEADER_SIZE > UINT32_MAX - headSize) ||
346 (headOffset + headSize + DATA_QUEUE_ENTRY_HEADER_SIZE > queueSize)) {
347 // Note: we have to wrap to the beginning even with the UINT32_MAX checks
348 // because we have to support a queueSize of UINT32_MAX.
349 entry = dataQueue->queue;
350 entrySize = entry->size;
351 if ((entrySize > UINT32_MAX - DATA_QUEUE_ENTRY_HEADER_SIZE) ||
352 (entrySize + DATA_QUEUE_ENTRY_HEADER_SIZE > queueSize)) {
353 return false;
354 }
355 newHeadOffset = entrySize + DATA_QUEUE_ENTRY_HEADER_SIZE;
356 // else it is at the end
357 } else {
358 entry = head;
359 entrySize = entry->size;
360 if ((entrySize > UINT32_MAX - DATA_QUEUE_ENTRY_HEADER_SIZE) ||
361 (entrySize + DATA_QUEUE_ENTRY_HEADER_SIZE > UINT32_MAX - headOffset) ||
362 (entrySize + DATA_QUEUE_ENTRY_HEADER_SIZE + headOffset > queueSize)) {
363 return false;
364 }
365 newHeadOffset = headOffset + entrySize + DATA_QUEUE_ENTRY_HEADER_SIZE;
366 }
d9a64523
A
367 } else {
368 // empty queue
369 return false;
370 }
371
372 if (data) {
373 if (entrySize > *dataSize) {
374 // not enough space
375 return false;
376 }
cb323159 377 __nochk_memcpy(data, (void *)entry->data, entrySize);
d9a64523
A
378 *dataSize = entrySize;
379 }
380
381 __c11_atomic_store((_Atomic UInt32 *)&dataQueue->head, newHeadOffset, __ATOMIC_RELEASE);
382
383 if (newHeadOffset == tailOffset) {
384 //
385 // If we are making the queue empty, then we need to make sure
386 // that either the enqueuer notices, or we notice the enqueue
387 // that raced with our making of the queue empty.
388 //
389 __c11_atomic_thread_fence(__ATOMIC_SEQ_CST);
390 }
0a7de745
A
391
392 return retVal;
2d21ac55
A
393}
394
0a7de745
A
395UInt32
396IOSharedDataQueue::getQueueSize()
143464d5 397{
0a7de745
A
398 if (!_reserved) {
399 return 0;
400 }
401 return _reserved->queueSize;
143464d5
A
402}
403
0a7de745
A
404Boolean
405IOSharedDataQueue::setQueueSize(UInt32 size)
143464d5 406{
0a7de745
A
407 if (!_reserved) {
408 return false;
409 }
410 _reserved->queueSize = size;
411 return true;
143464d5 412}
2d21ac55
A
413
414OSMetaClassDefineReservedUnused(IOSharedDataQueue, 0);
415OSMetaClassDefineReservedUnused(IOSharedDataQueue, 1);
416OSMetaClassDefineReservedUnused(IOSharedDataQueue, 2);
417OSMetaClassDefineReservedUnused(IOSharedDataQueue, 3);
418OSMetaClassDefineReservedUnused(IOSharedDataQueue, 4);
419OSMetaClassDefineReservedUnused(IOSharedDataQueue, 5);
420OSMetaClassDefineReservedUnused(IOSharedDataQueue, 6);
421OSMetaClassDefineReservedUnused(IOSharedDataQueue, 7);