]> git.saurik.com Git - apple/libc.git/blob - include/libkern/OSAtomic.h
4bd3c156122cae46fdf2e0e25f57b01f5432fb4f
[apple/libc.git] / include / libkern / OSAtomic.h
1 /*
2 * Copyright (c) 2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
7 *
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * file.
14 *
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
22 *
23 * @APPLE_LICENSE_HEADER_END@
24 */
25
26 #ifndef _OSATOMIC_H_
27 #define _OSATOMIC_H_
28
29 #include <stddef.h>
30 #include <sys/cdefs.h>
31 #include <stdint.h>
32 #include <stdbool.h>
33
34 /* These are the preferred versions of the atomic and synchronization operations.
35 * Their implementation is customized at boot time for the platform, including
36 * late-breaking errata fixes as necessary. They are thread safe.
37 *
38 * WARNING: all addresses passed to these functions must be "naturally aligned", ie
39 * int32_t's must be 32-bit aligned (low 2 bits of address zero), and int64_t's
40 * must be 64-bit aligned (low 3 bits of address zero.)
41 */
42 __BEGIN_DECLS
43
44 /* Arithmetic functions. They do not incorporate memory barriers and thus cannot
45 * be used by themselves to synchronize shared memory. They return the new value.
46 * The "or", "and", and "xor" operations are layered on top of compare-and-swap.
47 */
48 int32_t OSAtomicAdd32( int32_t theAmount, int32_t *theValue );
49 inline static
50 int32_t OSAtomicIncrement32( int32_t *theValue ) { return OSAtomicAdd32( 1, theValue); }
51 inline static
52 int32_t OSAtomicDecrement32( int32_t *theValue ) { return OSAtomicAdd32( -1, theValue); }
53 int32_t OSAtomicOr32( uint32_t theMask, uint32_t *theValue );
54 int32_t OSAtomicAnd32( uint32_t theMask, uint32_t *theValue );
55 int32_t OSAtomicXor32( uint32_t theMask, uint32_t *theValue );
56 #if defined(__ppc64__) || defined(__i386__)
57 int64_t OSAtomicAdd64( int64_t theAmount, int64_t *theValue );
58 inline static
59 int64_t OSAtomicIncrement64( int64_t *theValue ) { return OSAtomicAdd64( 1, theValue); }
60 inline static
61 int64_t OSAtomicDecrement64( int64_t *theValue ) { return OSAtomicAdd64( -1, theValue); }
62 #endif /* defined(__ppc64__) || defined(__i386__) */
63
64 /* Compare and swap. They do not incorporate memory barriers and thus cannot be used
65 * by themselved to synchronize shared memory. They return true if the swap occured.
66 */
67 bool OSAtomicCompareAndSwap32( int32_t oldValue, int32_t newValue, int32_t *theValue );
68 #if defined(__ppc64__) || defined(__i386__)
69 bool OSAtomicCompareAndSwap64( int64_t oldValue, int64_t newValue, int64_t *theValue );
70 #endif /* defined(__ppc64__) || defined(__i386__) */
71
72 /* Test and set. They do not incorporate memory barriers and thus cannot be used by
73 * themselves to synchronize shared memory. They return the original value of the bit.
74 * They operate on bit (0x80>>(n&7)) in byte ((char*)theAddress + (n>>3)). They are
75 * layered on top of the compare-and-swap operation.
76 */
77 bool OSAtomicTestAndSet( uint32_t n, void *theAddress );
78 bool OSAtomicTestAndClear( uint32_t n, void *theAddress );
79
80 /* FILO queue and dequeue. These use memory barriers as required to synchronize access to
81 * the queued/dequeued structure. The "inOffset" field is the offset within the structure
82 * of the link field. "inList" is the list head; it is not a struct. The queue is a singly
83 * linked list with a zero terminator.
84 */
85 void * OSAtomicDequeue( void ** inList, size_t inOffset);
86 void OSAtomicEnqueue( void ** inList, void * inNewLink, size_t inOffset);
87
88 /* Spinlocks. These use memory barriers as required to synchronize access to shared
89 * memory protected by the lock. The lock operation spins, but employs various strategies
90 * to back off if the lock is held, making it immune to most priority-inversion livelocks.
91 * The try operation immediately returns false if the lock was held, true if it took the
92 * lock. The convention is that unlocked is zero, locked is nonzero.
93 */
94 #define OS_SPINLOCK_INIT 0
95
96 typedef int32_t OSSpinLock;
97
98 bool OSSpinLockTry( OSSpinLock *lock );
99 void OSSpinLockLock( OSSpinLock *lock );
100 void OSSpinLockUnlock( OSSpinLock *lock );
101
102 /* Memory barrier. This strictly orders memory accesses in a weakly ordered model such
103 * as PPC. All loads and stores executed in sequential program order before the barrier
104 * will complete with respect to the coherence mechanism, before any load or store
105 * executed after the barrier. Used with an atomic operation, the barrier can be used to
106 * create custom synchronization protocols, as an alternative to the spinlock or queue/
107 * dequeue operations. Note that this barrier does not order uncached loads and stores.
108 * On a uniprocessor, the barrier is typically a nop.
109 */
110 void OSMemoryBarrier( void );
111
112 __END_DECLS
113
114 #endif /* _OSATOMIC_H_ */