]>
Commit | Line | Data |
---|---|---|
2d21ac55 A |
1 | /* |
2 | * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
0a7de745 | 5 | * |
2d21ac55 A |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
0a7de745 | 14 | * |
2d21ac55 A |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
0a7de745 | 17 | * |
2d21ac55 A |
18 | * The Original Code and all software distributed under the License are |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
0a7de745 | 25 | * |
2d21ac55 A |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
27 | */ | |
28 | ||
29 | #include <IOKit/IOSharedDataQueue.h> | |
30 | #include <IOKit/IODataQueueShared.h> | |
31 | #include <IOKit/IOLib.h> | |
32 | #include <IOKit/IOMemoryDescriptor.h> | |
33 | ||
143464d5 A |
34 | #ifdef enqueue |
35 | #undef enqueue | |
36 | #endif | |
37 | ||
2d21ac55 A |
38 | #ifdef dequeue |
39 | #undef dequeue | |
40 | #endif | |
41 | ||
42 | #define super IODataQueue | |
43 | ||
44 | OSDefineMetaClassAndStructors(IOSharedDataQueue, IODataQueue) | |
45 | ||
46 | IOSharedDataQueue *IOSharedDataQueue::withCapacity(UInt32 size) | |
47 | { | |
0a7de745 | 48 | IOSharedDataQueue *dataQueue = new IOSharedDataQueue; |
2d21ac55 | 49 | |
0a7de745 A |
50 | if (dataQueue) { |
51 | if (!dataQueue->initWithCapacity(size)) { | |
52 | dataQueue->release(); | |
cb323159 | 53 | dataQueue = NULL; |
0a7de745 A |
54 | } |
55 | } | |
2d21ac55 | 56 | |
0a7de745 | 57 | return dataQueue; |
2d21ac55 A |
58 | } |
59 | ||
0a7de745 A |
60 | IOSharedDataQueue * |
61 | IOSharedDataQueue::withEntries(UInt32 numEntries, UInt32 entrySize) | |
2d21ac55 | 62 | { |
0a7de745 | 63 | IOSharedDataQueue *dataQueue = new IOSharedDataQueue; |
2d21ac55 | 64 | |
0a7de745 A |
65 | if (dataQueue) { |
66 | if (!dataQueue->initWithEntries(numEntries, entrySize)) { | |
67 | dataQueue->release(); | |
cb323159 | 68 | dataQueue = NULL; |
0a7de745 A |
69 | } |
70 | } | |
2d21ac55 | 71 | |
0a7de745 | 72 | return dataQueue; |
2d21ac55 A |
73 | } |
74 | ||
0a7de745 A |
75 | Boolean |
76 | IOSharedDataQueue::initWithCapacity(UInt32 size) | |
2d21ac55 | 77 | { |
0a7de745 A |
78 | IODataQueueAppendix * appendix; |
79 | vm_size_t allocSize; | |
80 | ||
81 | if (!super::init()) { | |
82 | return false; | |
83 | } | |
84 | ||
85 | _reserved = (ExpansionData *)IOMalloc(sizeof(struct ExpansionData)); | |
86 | if (!_reserved) { | |
87 | return false; | |
88 | } | |
89 | ||
90 | if (size > UINT32_MAX - DATA_QUEUE_MEMORY_HEADER_SIZE - DATA_QUEUE_MEMORY_APPENDIX_SIZE) { | |
91 | return false; | |
92 | } | |
93 | ||
94 | allocSize = round_page(size + DATA_QUEUE_MEMORY_HEADER_SIZE + DATA_QUEUE_MEMORY_APPENDIX_SIZE); | |
95 | ||
96 | if (allocSize < size) { | |
97 | return false; | |
98 | } | |
99 | ||
100 | dataQueue = (IODataQueueMemory *)IOMallocAligned(allocSize, PAGE_SIZE); | |
cb323159 | 101 | if (dataQueue == NULL) { |
0a7de745 A |
102 | return false; |
103 | } | |
104 | bzero(dataQueue, allocSize); | |
105 | ||
106 | dataQueue->queueSize = size; | |
a1c7dba1 A |
107 | // dataQueue->head = 0; |
108 | // dataQueue->tail = 0; | |
fe8ab488 | 109 | |
0a7de745 A |
110 | if (!setQueueSize(size)) { |
111 | return false; | |
112 | } | |
113 | ||
114 | appendix = (IODataQueueAppendix *)((UInt8 *)dataQueue + size + DATA_QUEUE_MEMORY_HEADER_SIZE); | |
115 | appendix->version = 0; | |
a1c7dba1 | 116 | |
0a7de745 A |
117 | if (!notifyMsg) { |
118 | notifyMsg = IOMalloc(sizeof(mach_msg_header_t)); | |
119 | if (!notifyMsg) { | |
120 | return false; | |
121 | } | |
122 | } | |
123 | bzero(notifyMsg, sizeof(mach_msg_header_t)); | |
a1c7dba1 | 124 | |
0a7de745 | 125 | setNotificationPort(MACH_PORT_NULL); |
2d21ac55 | 126 | |
0a7de745 | 127 | return true; |
2d21ac55 A |
128 | } |
129 | ||
0a7de745 A |
130 | void |
131 | IOSharedDataQueue::free() | |
2d21ac55 | 132 | { |
0a7de745 A |
133 | if (dataQueue) { |
134 | IOFreeAligned(dataQueue, round_page(getQueueSize() + DATA_QUEUE_MEMORY_HEADER_SIZE + DATA_QUEUE_MEMORY_APPENDIX_SIZE)); | |
135 | dataQueue = NULL; | |
136 | if (notifyMsg) { | |
137 | IOFree(notifyMsg, sizeof(mach_msg_header_t)); | |
138 | notifyMsg = NULL; | |
139 | } | |
140 | } | |
141 | ||
142 | if (_reserved) { | |
143 | IOFree(_reserved, sizeof(struct ExpansionData)); | |
144 | _reserved = NULL; | |
145 | } | |
146 | ||
147 | super::free(); | |
2d21ac55 A |
148 | } |
149 | ||
0a7de745 A |
150 | IOMemoryDescriptor * |
151 | IOSharedDataQueue::getMemoryDescriptor() | |
2d21ac55 | 152 | { |
cb323159 | 153 | IOMemoryDescriptor *descriptor = NULL; |
2d21ac55 | 154 | |
cb323159 | 155 | if (dataQueue != NULL) { |
0a7de745 A |
156 | descriptor = IOMemoryDescriptor::withAddress(dataQueue, getQueueSize() + DATA_QUEUE_MEMORY_HEADER_SIZE + DATA_QUEUE_MEMORY_APPENDIX_SIZE, kIODirectionOutIn); |
157 | } | |
2d21ac55 | 158 | |
0a7de745 | 159 | return descriptor; |
2d21ac55 A |
160 | } |
161 | ||
162 | ||
0a7de745 A |
163 | IODataQueueEntry * |
164 | IOSharedDataQueue::peek() | |
2d21ac55 | 165 | { |
cb323159 | 166 | IODataQueueEntry *entry = NULL; |
0a7de745 A |
167 | UInt32 headOffset; |
168 | UInt32 tailOffset; | |
169 | ||
170 | if (!dataQueue) { | |
171 | return NULL; | |
172 | } | |
173 | ||
174 | // Read head and tail with acquire barrier | |
175 | // See rdar://problem/40780584 for an explanation of relaxed/acquire barriers | |
176 | headOffset = __c11_atomic_load((_Atomic UInt32 *)&dataQueue->head, __ATOMIC_RELAXED); | |
177 | tailOffset = __c11_atomic_load((_Atomic UInt32 *)&dataQueue->tail, __ATOMIC_ACQUIRE); | |
178 | ||
179 | if (headOffset != tailOffset) { | |
cb323159 | 180 | volatile IODataQueueEntry * head = NULL; |
0a7de745 A |
181 | UInt32 headSize = 0; |
182 | UInt32 headOffset = dataQueue->head; | |
183 | UInt32 queueSize = getQueueSize(); | |
184 | ||
185 | if (headOffset >= queueSize) { | |
186 | return NULL; | |
187 | } | |
188 | ||
189 | head = (IODataQueueEntry *)((char *)dataQueue->queue + headOffset); | |
190 | headSize = head->size; | |
191 | ||
192 | // Check if there's enough room before the end of the queue for a header. | |
193 | // If there is room, check if there's enough room to hold the header and | |
194 | // the data. | |
195 | ||
196 | if ((headOffset > UINT32_MAX - DATA_QUEUE_ENTRY_HEADER_SIZE) || | |
197 | (headOffset + DATA_QUEUE_ENTRY_HEADER_SIZE > queueSize) || | |
198 | (headOffset + DATA_QUEUE_ENTRY_HEADER_SIZE > UINT32_MAX - headSize) || | |
199 | (headOffset + headSize + DATA_QUEUE_ENTRY_HEADER_SIZE > queueSize)) { | |
200 | // No room for the header or the data, wrap to the beginning of the queue. | |
201 | // Note: wrapping even with the UINT32_MAX checks, as we have to support | |
202 | // queueSize of UINT32_MAX | |
203 | entry = dataQueue->queue; | |
204 | } else { | |
205 | entry = (IODataQueueEntry *)head; | |
206 | } | |
207 | } | |
208 | ||
209 | return entry; | |
2d21ac55 A |
210 | } |
211 | ||
0a7de745 A |
212 | Boolean |
213 | IOSharedDataQueue::enqueue(void * data, UInt32 dataSize) | |
143464d5 | 214 | { |
0a7de745 A |
215 | UInt32 head; |
216 | UInt32 tail; | |
217 | UInt32 newTail; | |
218 | const UInt32 entrySize = dataSize + DATA_QUEUE_ENTRY_HEADER_SIZE; | |
219 | IODataQueueEntry * entry; | |
220 | ||
221 | // Force a single read of head and tail | |
222 | // See rdar://problem/40780584 for an explanation of relaxed/acquire barriers | |
223 | tail = __c11_atomic_load((_Atomic UInt32 *)&dataQueue->tail, __ATOMIC_RELAXED); | |
224 | head = __c11_atomic_load((_Atomic UInt32 *)&dataQueue->head, __ATOMIC_ACQUIRE); | |
225 | ||
226 | // Check for overflow of entrySize | |
227 | if (dataSize > UINT32_MAX - DATA_QUEUE_ENTRY_HEADER_SIZE) { | |
228 | return false; | |
229 | } | |
230 | // Check for underflow of (getQueueSize() - tail) | |
231 | if (getQueueSize() < tail || getQueueSize() < head) { | |
232 | return false; | |
233 | } | |
234 | ||
235 | if (tail >= head) { | |
236 | // Is there enough room at the end for the entry? | |
237 | if ((entrySize <= UINT32_MAX - tail) && | |
238 | ((tail + entrySize) <= getQueueSize())) { | |
239 | entry = (IODataQueueEntry *)((UInt8 *)dataQueue->queue + tail); | |
240 | ||
241 | entry->size = dataSize; | |
cb323159 | 242 | __nochk_memcpy(&entry->data, data, dataSize); |
0a7de745 A |
243 | |
244 | // The tail can be out of bound when the size of the new entry | |
245 | // exactly matches the available space at the end of the queue. | |
246 | // The tail can range from 0 to dataQueue->queueSize inclusive. | |
247 | ||
248 | newTail = tail + entrySize; | |
249 | } else if (head > entrySize) { // Is there enough room at the beginning? | |
250 | // Wrap around to the beginning, but do not allow the tail to catch | |
251 | // up to the head. | |
252 | ||
253 | dataQueue->queue->size = dataSize; | |
254 | ||
255 | // We need to make sure that there is enough room to set the size before | |
256 | // doing this. The user client checks for this and will look for the size | |
257 | // at the beginning if there isn't room for it at the end. | |
258 | ||
259 | if ((getQueueSize() - tail) >= DATA_QUEUE_ENTRY_HEADER_SIZE) { | |
260 | ((IODataQueueEntry *)((UInt8 *)dataQueue->queue + tail))->size = dataSize; | |
261 | } | |
262 | ||
cb323159 | 263 | __nochk_memcpy(&dataQueue->queue->data, data, dataSize); |
0a7de745 A |
264 | newTail = entrySize; |
265 | } else { | |
266 | return false; // queue is full | |
267 | } | |
268 | } else { | |
269 | // Do not allow the tail to catch up to the head when the queue is full. | |
270 | // That's why the comparison uses a '>' rather than '>='. | |
271 | ||
272 | if ((head - tail) > entrySize) { | |
273 | entry = (IODataQueueEntry *)((UInt8 *)dataQueue->queue + tail); | |
274 | ||
275 | entry->size = dataSize; | |
cb323159 | 276 | __nochk_memcpy(&entry->data, data, dataSize); |
0a7de745 A |
277 | newTail = tail + entrySize; |
278 | } else { | |
279 | return false; // queue is full | |
280 | } | |
281 | } | |
39037602 | 282 | |
d9a64523 A |
283 | // Publish the data we just enqueued |
284 | __c11_atomic_store((_Atomic UInt32 *)&dataQueue->tail, newTail, __ATOMIC_RELEASE); | |
285 | ||
286 | if (tail != head) { | |
287 | // | |
288 | // The memory barrier below paris with the one in ::dequeue | |
289 | // so that either our store to the tail cannot be missed by | |
290 | // the next dequeue attempt, or we will observe the dequeuer | |
291 | // making the queue empty. | |
292 | // | |
293 | // Of course, if we already think the queue is empty, | |
294 | // there's no point paying this extra cost. | |
295 | // | |
296 | __c11_atomic_thread_fence(__ATOMIC_SEQ_CST); | |
297 | head = __c11_atomic_load((_Atomic UInt32 *)&dataQueue->head, __ATOMIC_RELAXED); | |
298 | } | |
299 | ||
300 | if (tail == head) { | |
301 | // Send notification (via mach message) that data is now available. | |
302 | sendDataAvailableNotification(); | |
303 | } | |
304 | return true; | |
143464d5 A |
305 | } |
306 | ||
0a7de745 A |
307 | Boolean |
308 | IOSharedDataQueue::dequeue(void *data, UInt32 *dataSize) | |
2d21ac55 | 309 | { |
0a7de745 | 310 | Boolean retVal = TRUE; |
cb323159 | 311 | volatile IODataQueueEntry * entry = NULL; |
0a7de745 A |
312 | UInt32 entrySize = 0; |
313 | UInt32 headOffset = 0; | |
314 | UInt32 tailOffset = 0; | |
315 | UInt32 newHeadOffset = 0; | |
2d21ac55 | 316 | |
d9a64523 | 317 | if (!dataQueue || (data && !dataSize)) { |
0a7de745 A |
318 | return false; |
319 | } | |
320 | ||
321 | // Read head and tail with acquire barrier | |
322 | // See rdar://problem/40780584 for an explanation of relaxed/acquire barriers | |
323 | headOffset = __c11_atomic_load((_Atomic UInt32 *)&dataQueue->head, __ATOMIC_RELAXED); | |
324 | tailOffset = __c11_atomic_load((_Atomic UInt32 *)&dataQueue->tail, __ATOMIC_ACQUIRE); | |
325 | ||
326 | if (headOffset != tailOffset) { | |
cb323159 | 327 | volatile IODataQueueEntry * head = NULL; |
0a7de745 A |
328 | UInt32 headSize = 0; |
329 | UInt32 queueSize = getQueueSize(); | |
330 | ||
331 | if (headOffset > queueSize) { | |
332 | return false; | |
333 | } | |
334 | ||
335 | head = (IODataQueueEntry *)((char *)dataQueue->queue + headOffset); | |
336 | headSize = head->size; | |
337 | ||
338 | // we wrapped around to beginning, so read from there | |
339 | // either there was not even room for the header | |
340 | if ((headOffset > UINT32_MAX - DATA_QUEUE_ENTRY_HEADER_SIZE) || | |
341 | (headOffset + DATA_QUEUE_ENTRY_HEADER_SIZE > queueSize) || | |
342 | // or there was room for the header, but not for the data | |
343 | (headOffset + DATA_QUEUE_ENTRY_HEADER_SIZE > UINT32_MAX - headSize) || | |
344 | (headOffset + headSize + DATA_QUEUE_ENTRY_HEADER_SIZE > queueSize)) { | |
345 | // Note: we have to wrap to the beginning even with the UINT32_MAX checks | |
346 | // because we have to support a queueSize of UINT32_MAX. | |
347 | entry = dataQueue->queue; | |
348 | entrySize = entry->size; | |
349 | if ((entrySize > UINT32_MAX - DATA_QUEUE_ENTRY_HEADER_SIZE) || | |
350 | (entrySize + DATA_QUEUE_ENTRY_HEADER_SIZE > queueSize)) { | |
351 | return false; | |
352 | } | |
353 | newHeadOffset = entrySize + DATA_QUEUE_ENTRY_HEADER_SIZE; | |
354 | // else it is at the end | |
355 | } else { | |
356 | entry = head; | |
357 | entrySize = entry->size; | |
358 | if ((entrySize > UINT32_MAX - DATA_QUEUE_ENTRY_HEADER_SIZE) || | |
359 | (entrySize + DATA_QUEUE_ENTRY_HEADER_SIZE > UINT32_MAX - headOffset) || | |
360 | (entrySize + DATA_QUEUE_ENTRY_HEADER_SIZE + headOffset > queueSize)) { | |
361 | return false; | |
362 | } | |
363 | newHeadOffset = headOffset + entrySize + DATA_QUEUE_ENTRY_HEADER_SIZE; | |
364 | } | |
d9a64523 A |
365 | } else { |
366 | // empty queue | |
367 | return false; | |
368 | } | |
369 | ||
370 | if (data) { | |
371 | if (entrySize > *dataSize) { | |
372 | // not enough space | |
373 | return false; | |
374 | } | |
cb323159 | 375 | __nochk_memcpy(data, (void *)entry->data, entrySize); |
d9a64523 A |
376 | *dataSize = entrySize; |
377 | } | |
378 | ||
379 | __c11_atomic_store((_Atomic UInt32 *)&dataQueue->head, newHeadOffset, __ATOMIC_RELEASE); | |
380 | ||
381 | if (newHeadOffset == tailOffset) { | |
382 | // | |
383 | // If we are making the queue empty, then we need to make sure | |
384 | // that either the enqueuer notices, or we notice the enqueue | |
385 | // that raced with our making of the queue empty. | |
386 | // | |
387 | __c11_atomic_thread_fence(__ATOMIC_SEQ_CST); | |
388 | } | |
0a7de745 A |
389 | |
390 | return retVal; | |
2d21ac55 A |
391 | } |
392 | ||
0a7de745 A |
393 | UInt32 |
394 | IOSharedDataQueue::getQueueSize() | |
143464d5 | 395 | { |
0a7de745 A |
396 | if (!_reserved) { |
397 | return 0; | |
398 | } | |
399 | return _reserved->queueSize; | |
143464d5 A |
400 | } |
401 | ||
0a7de745 A |
402 | Boolean |
403 | IOSharedDataQueue::setQueueSize(UInt32 size) | |
143464d5 | 404 | { |
0a7de745 A |
405 | if (!_reserved) { |
406 | return false; | |
407 | } | |
408 | _reserved->queueSize = size; | |
409 | return true; | |
143464d5 | 410 | } |
2d21ac55 A |
411 | |
412 | OSMetaClassDefineReservedUnused(IOSharedDataQueue, 0); | |
413 | OSMetaClassDefineReservedUnused(IOSharedDataQueue, 1); | |
414 | OSMetaClassDefineReservedUnused(IOSharedDataQueue, 2); | |
415 | OSMetaClassDefineReservedUnused(IOSharedDataQueue, 3); | |
416 | OSMetaClassDefineReservedUnused(IOSharedDataQueue, 4); | |
417 | OSMetaClassDefineReservedUnused(IOSharedDataQueue, 5); | |
418 | OSMetaClassDefineReservedUnused(IOSharedDataQueue, 6); | |
419 | OSMetaClassDefineReservedUnused(IOSharedDataQueue, 7); |