]>
git.saurik.com Git - apple/libc.git/blob - include/libkern/OSAtomic.h
6a62bd3e68fe3695badb82080a9046f6dce53da0
2 * Copyright (c) 2004-2006 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
21 * @APPLE_LICENSE_HEADER_END@
28 #include <sys/cdefs.h>
32 #include <Availability.h>
35 * These are the preferred versions of the atomic and synchronization operations.
36 * Their implementation is customized at boot time for the platform, including
37 * late-breaking errata fixes as necessary. They are thread safe.
39 * WARNING: all addresses passed to these functions must be "naturally aligned",
40 * i.e. * <code>int32_t</code> pointers must be 32-bit aligned (low 2 bits of
41 * address are zeroes), and <code>int64_t</code> pointers must be 64-bit aligned
42 * (low 3 bits of address are zeroes.)
44 * Note that some versions of the atomic functions incorporate memory barriers
45 * and some do not. Barriers strictly order memory access on weakly-ordered
46 * architectures such as PPC. All loads and stores that appear (in sequential
47 * program order) before the barrier are guaranteed to complete before any
48 * load or store that appears after the barrier.
50 * On a uniprocessor system, the barrier operation is typically a no-op. On a
51 * multiprocessor system, the barrier can be quite expensive on some platforms,
54 * Most code should use the barrier functions to ensure that memory shared between
55 * threads is properly synchronized. For example, if you want to initialize
56 * a shared data structure and then atomically increment a variable to indicate
57 * that the initialization is complete, you must use {@link OSAtomicIncrement32Barrier}
58 * to ensure that the stores to your data structure complete before the atomic
61 * Likewise, the consumer of that data structure must use {@link OSAtomicDecrement32Barrier},
62 * in order to ensure that their loads of the structure are not executed before
63 * the atomic decrement. On the other hand, if you are simply incrementing a global
64 * counter, then it is safe and potentially faster to use {@link OSAtomicIncrement32}.
66 * If you are unsure which version to use, prefer the barrier variants as they are
69 * The spinlock and queue operations always incorporate a barrier.
71 * For the kernel-space version of this header, see
72 * {@link //apple_ref/doc/header/OSAtomic.h OSAtomic.h (Kernel Framework)}
74 * @apiuid //apple_ref/doc/header/user_space_OSAtomic.h
79 /*! @group Arithmetic functions
80 All functions in this group return the new value.
83 /*! @abstract Atomically adds two 32-bit values.
85 This function adds the value given by <code>__theAmount</code> to the
86 value in the memory location referenced by <code>__theValue</code>,
87 storing the result back to that memory location atomically.
88 @result Returns the new value.
90 int32_t OSAtomicAdd32( int32_t __theAmount
, volatile int32_t *__theValue
);
93 /*! @abstract Atomically adds two 32-bit values.
95 This function adds the value given by <code>__theAmount</code> to the
96 value in the memory location referenced by <code>__theValue</code>,
97 storing the result back to that memory location atomically.
99 This function is equivalent to {@link OSAtomicAdd32}
100 except that it also introduces a barrier.
101 @result Returns the new value.
103 int32_t OSAtomicAdd32Barrier( int32_t __theAmount
, volatile int32_t *__theValue
);
106 /*! @abstract Atomically increments a 32-bit value.
109 int32_t OSAtomicIncrement32( volatile int32_t *__theValue
)
110 { return OSAtomicAdd32( 1, __theValue
); }
113 /*! @abstract Atomically increments a 32-bit value with a barrier.
115 This function is equivalent to {@link OSAtomicIncrement32}
116 except that it also introduces a barrier.
117 @result Returns the new value.
120 int32_t OSAtomicIncrement32Barrier( volatile int32_t *__theValue
)
121 { return OSAtomicAdd32Barrier( 1, __theValue
); }
123 /*! @abstract Atomically decrements a 32-bit value. */
125 int32_t OSAtomicDecrement32( volatile int32_t *__theValue
)
126 { return OSAtomicAdd32( -1, __theValue
); }
128 /*! @abstract Atomically increments a 32-bit value with a barrier.
130 This function is equivalent to {@link OSAtomicDecrement32}
131 except that it also introduces a barrier.
132 @result Returns the new value.
135 int32_t OSAtomicDecrement32Barrier( volatile int32_t *__theValue
)
136 { return OSAtomicAdd32Barrier( -1, __theValue
); }
139 #if defined(__ppc64__) || defined(__i386__) || defined(__x86_64__) || defined(__arm__)
141 /*! @abstract Atomically adds two 64-bit values.
143 This function adds the value given by <code>__theAmount</code> to the
144 value in the memory location referenced by <code>__theValue</code>,
145 storing the result back to that memory location atomically.
147 int64_t OSAtomicAdd64( int64_t __theAmount
, volatile int64_t *__theValue
);
150 /*! @abstract Atomically adds two 64-bit values with a barrier.
152 This function adds the value given by <code>__theAmount</code> to the
153 value in the memory location referenced by <code>__theValue</code>,
154 storing the result back to that memory location atomically.
156 This function is equivalent to {@link OSAtomicAdd64}
157 except that it also introduces a barrier.
158 @result Returns the new value.
160 int64_t OSAtomicAdd64Barrier( int64_t __theAmount
, volatile int64_t *__theValue
) __OSX_AVAILABLE_STARTING(__MAC_10_4
, __IPHONE_3_2
);
163 /*! @abstract Atomically increments a 64-bit value. */
165 int64_t OSAtomicIncrement64( volatile int64_t *__theValue
)
166 { return OSAtomicAdd64( 1, __theValue
); }
168 /*! @abstract Atomically increments a 64-bit value with a barrier.
170 This function is equivalent to {@link OSAtomicIncrement64}
171 except that it also introduces a barrier.
172 @result Returns the new value.
175 int64_t OSAtomicIncrement64Barrier( volatile int64_t *__theValue
)
176 { return OSAtomicAdd64Barrier( 1, __theValue
); }
179 /*! @abstract Atomically decrements a 64-bit value.
181 This function is equivalent to {@link OSAtomicIncrement64}
182 except that it also introduces a barrier.
183 @result Returns the new value.
186 int64_t OSAtomicDecrement64( volatile int64_t *__theValue
)
187 { return OSAtomicAdd64( -1, __theValue
); }
190 /*! @abstract Atomically decrements a 64-bit value with a barrier.
192 This function is equivalent to {@link OSAtomicDecrement64}
193 except that it also introduces a barrier.
194 @result Returns the new value.
197 int64_t OSAtomicDecrement64Barrier( volatile int64_t *__theValue
)
198 { return OSAtomicAdd64Barrier( -1, __theValue
); }
201 #endif /* defined(__ppc64__) || defined(__i386__) || defined(__x86_64__) || defined(__arm__) */
204 /*! @group Boolean functions (AND, OR, XOR)
206 * @discussion Functions in this group come in four variants for each operation:
207 * with and without barriers, and functions that return the original value or
208 * the result value of the operation.
210 * The "Orig" versions return the original value, (before the operation); the non-Orig
211 * versions return the value after the operation. All are layered on top of
212 * {@link OSAtomicCompareAndSwap32} and similar.
215 /*! @abstract Atomic bitwise OR of two 32-bit values.
217 This function performs the bitwise OR of the value given by <code>__theMask</code>
218 with the value in the memory location referenced by <code>__theValue</code>,
219 storing the result back to that memory location atomically.
220 @result Returns the new value.
222 int32_t OSAtomicOr32( uint32_t __theMask
, volatile uint32_t *__theValue
);
225 /*! @abstract Atomic bitwise OR of two 32-bit values with barrier.
227 This function performs the bitwise OR of the value given by <code>__theMask</code>
228 with the value in the memory location referenced by <code>__theValue</code>,
229 storing the result back to that memory location atomically.
231 This function is equivalent to {@link OSAtomicOr32}
232 except that it also introduces a barrier.
233 @result Returns the new value.
235 int32_t OSAtomicOr32Barrier( uint32_t __theMask
, volatile uint32_t *__theValue
);
238 /*! @abstract Atomic bitwise OR of two 32-bit values returning original.
240 This function performs the bitwise OR of the value given by <code>__theMask</code>
241 with the value in the memory location referenced by <code>__theValue</code>,
242 storing the result back to that memory location atomically.
243 @result Returns the original value referenced by <code>__theValue</code>.
245 int32_t OSAtomicOr32Orig( uint32_t __theMask
, volatile uint32_t *__theValue
) __OSX_AVAILABLE_STARTING(__MAC_10_5
, __IPHONE_3_2
);
248 /*! @abstract Atomic bitwise OR of two 32-bit values returning original with barrier.
250 This function performs the bitwise OR of the value given by <code>__theMask</code>
251 with the value in the memory location referenced by <code>__theValue</code>,
252 storing the result back to that memory location atomically.
254 This function is equivalent to {@link OSAtomicOr32Orig}
255 except that it also introduces a barrier.
256 @result Returns the original value referenced by <code>__theValue</code>.
258 int32_t OSAtomicOr32OrigBarrier( uint32_t __theMask
, volatile uint32_t *__theValue
) __OSX_AVAILABLE_STARTING(__MAC_10_5
, __IPHONE_3_2
);
263 /*! @abstract Atomic bitwise AND of two 32-bit values.
265 This function performs the bitwise AND of the value given by <code>__theMask</code>
266 with the value in the memory location referenced by <code>__theValue</code>,
267 storing the result back to that memory location atomically.
268 @result Returns the new value.
270 int32_t OSAtomicAnd32( uint32_t __theMask
, volatile uint32_t *__theValue
);
273 /*! @abstract Atomic bitwise AND of two 32-bit values with barrier.
275 This function performs the bitwise AND of the value given by <code>__theMask</code>
276 with the value in the memory location referenced by <code>__theValue</code>,
277 storing the result back to that memory location atomically.
279 This function is equivalent to {@link OSAtomicAnd32}
280 except that it also introduces a barrier.
281 @result Returns the new value.
283 int32_t OSAtomicAnd32Barrier( uint32_t __theMask
, volatile uint32_t *__theValue
);
286 /*! @abstract Atomic bitwise AND of two 32-bit values returning original.
288 This function performs the bitwise AND of the value given by <code>__theMask</code>
289 with the value in the memory location referenced by <code>__theValue</code>,
290 storing the result back to that memory location atomically.
291 @result Returns the original value referenced by <code>__theValue</code>.
293 int32_t OSAtomicAnd32Orig( uint32_t __theMask
, volatile uint32_t *__theValue
) __OSX_AVAILABLE_STARTING(__MAC_10_5
, __IPHONE_3_2
);
296 /*! @abstract Atomic bitwise AND of two 32-bit values returning original with barrier.
298 This function performs the bitwise AND of the value given by <code>__theMask</code>
299 with the value in the memory location referenced by <code>__theValue</code>,
300 storing the result back to that memory location atomically.
302 This function is equivalent to {@link OSAtomicAnd32Orig}
303 except that it also introduces a barrier.
304 @result Returns the original value referenced by <code>__theValue</code>.
306 int32_t OSAtomicAnd32OrigBarrier( uint32_t __theMask
, volatile uint32_t *__theValue
) __OSX_AVAILABLE_STARTING(__MAC_10_5
, __IPHONE_3_2
);
311 /*! @abstract Atomic bitwise XOR of two 32-bit values.
313 This function performs the bitwise XOR of the value given by <code>__theMask</code>
314 with the value in the memory location referenced by <code>__theValue</code>,
315 storing the result back to that memory location atomically.
316 @result Returns the new value.
318 int32_t OSAtomicXor32( uint32_t __theMask
, volatile uint32_t *__theValue
);
321 /*! @abstract Atomic bitwise XOR of two 32-bit values with barrier.
323 This function performs the bitwise XOR of the value given by <code>__theMask</code>
324 with the value in the memory location referenced by <code>__theValue</code>,
325 storing the result back to that memory location atomically.
327 This function is equivalent to {@link OSAtomicXor32}
328 except that it also introduces a barrier.
329 @result Returns the new value.
331 int32_t OSAtomicXor32Barrier( uint32_t __theMask
, volatile uint32_t *__theValue
);
334 /*! @abstract Atomic bitwise XOR of two 32-bit values returning original.
336 This function performs the bitwise XOR of the value given by <code>__theMask</code>
337 with the value in the memory location referenced by <code>__theValue</code>,
338 storing the result back to that memory location atomically.
339 @result Returns the original value referenced by <code>__theValue</code>.
341 int32_t OSAtomicXor32Orig( uint32_t __theMask
, volatile uint32_t *__theValue
) __OSX_AVAILABLE_STARTING(__MAC_10_5
, __IPHONE_3_2
);
344 /*! @abstract Atomic bitwise XOR of two 32-bit values returning original with barrier.
346 This function performs the bitwise XOR of the value given by <code>__theMask</code>
347 with the value in the memory location referenced by <code>__theValue</code>,
348 storing the result back to that memory location atomically.
350 This function is equivalent to {@link OSAtomicXor32Orig}
351 except that it also introduces a barrier.
352 @result Returns the original value referenced by <code>__theValue</code>.
354 int32_t OSAtomicXor32OrigBarrier( uint32_t __theMask
, volatile uint32_t *__theValue
) __OSX_AVAILABLE_STARTING(__MAC_10_5
, __IPHONE_3_2
);
357 /*! @group Compare and swap
358 * Functions in this group return true if the swap occured. There are several versions,
359 * depending on data type and on whether or not a barrier is used.
363 /*! @abstract Compare and swap for 32-bit values.
365 This function compares the value in <code>__oldValue</code> to the value
366 in the memory location referenced by <code>__theValue</code>. If the values
367 match, this function stores the value from <code>__newValue</code> into
368 that memory location atomically.
369 @result Returns TRUE on a match, FALSE otherwise.
371 bool OSAtomicCompareAndSwap32( int32_t __oldValue
, int32_t __newValue
, volatile int32_t *__theValue
);
374 /*! @abstract Compare and swap for 32-bit values with barrier.
376 This function compares the value in <code>__oldValue</code> to the value
377 in the memory location referenced by <code>__theValue</code>. If the values
378 match, this function stores the value from <code>__newValue</code> into
379 that memory location atomically.
381 This function is equivalent to {@link OSAtomicCompareAndSwap32}
382 except that it also introduces a barrier.
383 @result Returns TRUE on a match, FALSE otherwise.
385 bool OSAtomicCompareAndSwap32Barrier( int32_t __oldValue
, int32_t __newValue
, volatile int32_t *__theValue
);
388 /*! @abstract Compare and swap pointers.
390 This function compares the pointer stored in <code>__oldValue</code> to the pointer
391 in the memory location referenced by <code>__theValue</code>. If the pointers
392 match, this function stores the pointer from <code>__newValue</code> into
393 that memory location atomically.
394 @result Returns TRUE on a match, FALSE otherwise.
396 bool OSAtomicCompareAndSwapPtr( void *__oldValue
, void *__newValue
, void * volatile *__theValue
) __OSX_AVAILABLE_STARTING(__MAC_10_5
, __IPHONE_2_0
);
399 /*! @abstract Compare and swap pointers with barrier.
401 This function compares the pointer stored in <code>__oldValue</code> to the pointer
402 in the memory location referenced by <code>__theValue</code>. If the pointers
403 match, this function stores the pointer from <code>__newValue</code> into
404 that memory location atomically.
406 This function is equivalent to {@link OSAtomicCompareAndSwapPtr}
407 except that it also introduces a barrier.
408 @result Returns TRUE on a match, FALSE otherwise.
410 bool OSAtomicCompareAndSwapPtrBarrier( void *__oldValue
, void *__newValue
, void * volatile *__theValue
) __OSX_AVAILABLE_STARTING(__MAC_10_5
, __IPHONE_2_0
);
413 /*! @abstract Compare and swap for <code>int</code> values.
415 This function compares the value in <code>__oldValue</code> to the value
416 in the memory location referenced by <code>__theValue</code>. If the values
417 match, this function stores the value from <code>__newValue</code> into
418 that memory location atomically.
420 This function is equivalent to {@link OSAtomicCompareAndSwap32}.
421 @result Returns TRUE on a match, FALSE otherwise.
423 bool OSAtomicCompareAndSwapInt( int __oldValue
, int __newValue
, volatile int *__theValue
) __OSX_AVAILABLE_STARTING(__MAC_10_5
, __IPHONE_2_0
);
426 /*! @abstract Compare and swap for <code>int</code> values.
428 This function compares the value in <code>__oldValue</code> to the value
429 in the memory location referenced by <code>__theValue</code>. If the values
430 match, this function stores the value from <code>__newValue</code> into
431 that memory location atomically.
433 This function is equivalent to {@link OSAtomicCompareAndSwapInt}
434 except that it also introduces a barrier.
436 This function is equivalent to {@link OSAtomicCompareAndSwap32Barrier}.
437 @result Returns TRUE on a match, FALSE otherwise.
439 bool OSAtomicCompareAndSwapIntBarrier( int __oldValue
, int __newValue
, volatile int *__theValue
) __OSX_AVAILABLE_STARTING(__MAC_10_5
, __IPHONE_2_0
);
442 /*! @abstract Compare and swap for <code>long</code> values.
444 This function compares the value in <code>__oldValue</code> to the value
445 in the memory location referenced by <code>__theValue</code>. If the values
446 match, this function stores the value from <code>__newValue</code> into
447 that memory location atomically.
449 This function is equivalent to {@link OSAtomicCompareAndSwap32} on 32-bit architectures,
450 or {@link OSAtomicCompareAndSwap64} on 64-bit architectures.
451 @result Returns TRUE on a match, FALSE otherwise.
453 bool OSAtomicCompareAndSwapLong( long __oldValue
, long __newValue
, volatile long *__theValue
) __OSX_AVAILABLE_STARTING(__MAC_10_5
, __IPHONE_2_0
);
456 /*! @abstract Compare and swap for <code>long</code> values.
458 This function compares the value in <code>__oldValue</code> to the value
459 in the memory location referenced by <code>__theValue</code>. If the values
460 match, this function stores the value from <code>__newValue</code> into
461 that memory location atomically.
463 This function is equivalent to {@link OSAtomicCompareAndSwapLong}
464 except that it also introduces a barrier.
466 This function is equivalent to {@link OSAtomicCompareAndSwap32} on 32-bit architectures,
467 or {@link OSAtomicCompareAndSwap64} on 64-bit architectures.
468 @result Returns TRUE on a match, FALSE otherwise.
470 bool OSAtomicCompareAndSwapLongBarrier( long __oldValue
, long __newValue
, volatile long *__theValue
) __OSX_AVAILABLE_STARTING(__MAC_10_5
, __IPHONE_2_0
);
473 #if defined(__ppc64__) || defined(__i386__) || defined(__x86_64__) || defined(__arm__)
475 /*! @abstract Compare and swap for <code>uint64_t</code> values.
477 This function compares the value in <code>__oldValue</code> to the value
478 in the memory location referenced by <code>__theValue</code>. If the values
479 match, this function stores the value from <code>__newValue</code> into
480 that memory location atomically.
481 @result Returns TRUE on a match, FALSE otherwise.
483 bool OSAtomicCompareAndSwap64( int64_t __oldValue
, int64_t __newValue
, volatile int64_t *__theValue
);
486 /*! @abstract Compare and swap for <code>uint64_t</code> values.
488 This function compares the value in <code>__oldValue</code> to the value
489 in the memory location referenced by <code>__theValue</code>. If the values
490 match, this function stores the value from <code>__newValue</code> into
491 that memory location atomically.
493 This function is equivalent to {@link OSAtomicCompareAndSwap64}
494 except that it also introduces a barrier.
495 @result Returns TRUE on a match, FALSE otherwise.
497 bool OSAtomicCompareAndSwap64Barrier( int64_t __oldValue
, int64_t __newValue
, volatile int64_t *__theValue
) __OSX_AVAILABLE_STARTING(__MAC_10_4
, __IPHONE_3_2
);
499 #endif /* defined(__ppc64__) || defined(__i386__) || defined(__x86_64__) || defined(__arm__) */
502 /* Test and set. They return the original value of the bit, and operate on bit (0x80>>(n&7))
503 * in byte ((char*)theAddress + (n>>3)).
505 /*! @abstract Atomic test and set
507 This function tests a bit in the value referenced by <code>__theAddress</code>
508 and if it is not set, sets it. The bit is chosen by the value of <code>__n</code>.
509 The bits are numbered in order beginning with bit 1 as the lowest order bit.
511 For example, if <code>__theAddress</code> points to a 64-bit value,
512 to compare the value of the highest bit, you would specify <code>64</code> for
515 Returns the original value of the bit being tested.
517 bool OSAtomicTestAndSet( uint32_t __n
, volatile void *__theAddress
);
520 /*! @abstract Atomic test and set with barrier
522 This function tests a bit in the value referenced by <code>__theAddress</code>
523 and if it is not set, sets it. The bit is chosen by the value of <code>__n</code>.
524 The bits are numbered in order beginning with bit 1 as the lowest order bit.
526 For example, if <code>__theAddress</code> points to a 64-bit value,
527 to compare the value of the highest bit, you would specify <code>64</code> for
530 This function is equivalent to {@link OSAtomicTestAndSet}
531 except that it also introduces a barrier.
533 Returns the original value of the bit being tested.
536 bool OSAtomicTestAndSetBarrier( uint32_t __n
, volatile void *__theAddress
);
540 /*! @abstract Atomic test and clear
542 This function tests a bit in the value referenced by <code>__theAddress</code>
543 and if it is not cleared, clears it. The bit is chosen by the value of <code>__n</code>.
544 The bits are numbered in order beginning with bit 1 as the lowest order bit.
546 For example, if <code>__theAddress</code> points to a 64-bit value,
547 to compare the value of the highest bit, you would specify <code>64</code> for
550 Returns the original value of the bit being tested.
552 bool OSAtomicTestAndClear( uint32_t __n
, volatile void *__theAddress
);
555 /*! @abstract Atomic test and clear
557 This function tests a bit in the value referenced by <code>__theAddress</code>
558 and if it is not cleared, clears it. The bit is chosen by the value of <code>__n</code>.
559 The bits are numbered in order beginning with bit 1 as the lowest order bit.
561 For example, if <code>__theAddress</code> points to a 64-bit value,
562 to compare the value of the highest bit, you would specify <code>64</code> for
565 This function is equivalent to {@link OSAtomicTestAndSet}
566 except that it also introduces a barrier.
568 Returns the original value of the bit being tested.
570 bool OSAtomicTestAndClearBarrier( uint32_t __n
, volatile void *__theAddress
);
574 * These spinlocks use memory barriers as required to synchronize access to shared
575 * memory protected by the lock.
578 /*! @abstract The default value for an <code>OSSpinLock</code>.
580 The convention is that unlocked is zero, locked is nonzero.
582 #define OS_SPINLOCK_INIT 0
585 /*! @abstract Data type for a spinlock.
587 You should always initialize a spinlock to {@link OS_SPINLOCK_INIT} before
590 typedef int32_t OSSpinLock
;
593 /*! @abstract Locks a spinlock if it would not block
595 Returns <code>false</code> if the lock was already held by another thread,
596 <code>true</code> if it took the lock successfully.
598 bool OSSpinLockTry( volatile OSSpinLock
*__lock
);
601 /*! @abstract Locks a spinlock
603 Although the lock operation spins, it employs various strategies
604 to back off if the lock is held, making it immune to most priority-inversion
607 void OSSpinLockLock( volatile OSSpinLock
*__lock
);
610 /*! @abstract Unlocks a spinlock */
611 void OSSpinLockUnlock( volatile OSSpinLock
*__lock
);
614 /*! @group Lockless atomic enqueue and dequeue
615 * These routines manipulate singly-linked LIFO lists.
618 /*! @abstract The data structure for a queue head.
620 You should always initialize a queue head structure with the
621 initialization vector {@link OS_ATOMIC_QUEUE_INIT} before use.
623 #if defined(__x86_64__)
625 typedef volatile struct {
628 } __attribute__ ((aligned (16))) OSQueueHead
;
632 typedef volatile struct {
639 /*! @abstract The initialization vector for a queue head. */
640 #define OS_ATOMIC_QUEUE_INIT { NULL, 0 }
642 /*! @abstract Enqueue an item onto a list.
644 Memory barriers are incorporated as needed to permit thread-safe access
645 to the queue element.
647 The list on which you want to enqueue the item.
651 The "offset" parameter is the offset (in bytes) of the link field
652 from the beginning of the data structure being queued (<code>__new</code>).
653 The link field should be a pointer type.
654 The <code>__offset</code> value needs to be same for all enqueuing and
655 dequeuing operations on the same queue, even if different structure types
656 are enqueued on that queue. The use of <code>offsetset()</code>, defined in
657 <code>stddef.h</code> is the common way to specify the <code>__offset</code>
660 void OSAtomicEnqueue( OSQueueHead
*__list
, void *__new
, size_t __offset
) __OSX_AVAILABLE_STARTING(__MAC_10_5
, __IPHONE_4_0
);
663 /*! @abstract Dequeue an item from a list.
665 Memory barriers are incorporated as needed to permit thread-safe access
666 to the queue element.
668 The list on which you want to enqueue the item.
670 The "offset" parameter is the offset (in bytes) of the link field
671 from the beginning of the data structure being queued (<code>__new</code>).
672 The link field should be a pointer type.
673 The <code>__offset</code> value needs to be same for all enqueuing and
674 dequeuing operations on the same queue, even if different structure types
675 are enqueued on that queue. The use of <code>offsetset()</code>, defined in
676 <code>stddef.h</code> is the common way to specify the <code>__offset</code>
679 Returns the most recently enqueued element, or <code>NULL</code> if the
682 void* OSAtomicDequeue( OSQueueHead
*__list
, size_t __offset
) __OSX_AVAILABLE_STARTING(__MAC_10_5
, __IPHONE_4_0
);
684 #if defined(__x86_64__) || defined(__i386__)
686 /*! @group Lockless atomic fifo enqueue and dequeue
687 * These routines manipulate singly-linked FIFO lists.
690 /*! @abstract The data structure for a fifo queue head.
692 You should always initialize a fifo queue head structure with the
693 initialization vector {@link OS_ATOMIC_FIFO_QUEUE_INIT} before use.
695 #if defined(__x86_64__)
697 typedef volatile struct {
701 } __attribute__ ((aligned (16))) OSFifoQueueHead
;
705 typedef volatile struct {
713 /*! @abstract The initialization vector for a fifo queue head. */
714 #define OS_ATOMIC_FIFO_QUEUE_INIT { NULL, NULL, 0 }
716 /*! @abstract Enqueue an item onto a list.
718 Memory barriers are incorporated as needed to permit thread-safe access
719 to the queue element.
721 The list on which you want to enqueue the item.
725 The "offset" parameter is the offset (in bytes) of the link field
726 from the beginning of the data structure being queued (<code>__new</code>).
727 The link field should be a pointer type.
728 The <code>__offset</code> value needs to be same for all enqueuing and
729 dequeuing operations on the same queue, even if different structure types
730 are enqueued on that queue. The use of <code>offsetset()</code>, defined in
731 <code>stddef.h</code> is the common way to specify the <code>__offset</code>
734 void OSAtomicFifoEnqueue( OSFifoQueueHead
*__list
, void *__new
, size_t __offset
) __OSX_AVAILABLE_STARTING(__MAC_10_7
, __IPHONE_NA
);
736 /*! @abstract Dequeue an item from a list.
738 Memory barriers are incorporated as needed to permit thread-safe access
739 to the queue element.
741 The list on which you want to enqueue the item.
743 The "offset" parameter is the offset (in bytes) of the link field
744 from the beginning of the data structure being queued (<code>__new</code>).
745 The link field should be a pointer type.
746 The <code>__offset</code> value needs to be same for all enqueuing and
747 dequeuing operations on the same queue, even if different structure types
748 are enqueued on that queue. The use of <code>offsetset()</code>, defined in
749 <code>stddef.h</code> is the common way to specify the <code>__offset</code>
752 Returns the oldest enqueued element, or <code>NULL</code> if the
755 void* OSAtomicFifoDequeue( OSFifoQueueHead
*__list
, size_t __offset
) __OSX_AVAILABLE_STARTING(__MAC_10_7
, __IPHONE_NA
);
757 #endif /* __i386__ || __x86_64__ */
759 /*! @group Memory barriers */
761 /*! @abstract Memory barrier.
763 This function serves as both a read and write barrier.
765 void OSMemoryBarrier( void );
769 #endif /* _OSATOMIC_H_ */