]>
Commit | Line | Data |
---|---|---|
59e0d9fe | 1 | /* |
224c7076 | 2 | * Copyright (c) 2004-2006 Apple Computer, Inc. All rights reserved. |
59e0d9fe A |
3 | * |
4 | * @APPLE_LICENSE_HEADER_START@ | |
5 | * | |
59e0d9fe A |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. Please obtain a copy of the License at | |
10 | * http://www.opensource.apple.com/apsl/ and read it before using this | |
11 | * file. | |
12 | * | |
13 | * The Original Code and all software distributed under the License are | |
14 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
15 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
16 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
17 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
18 | * Please see the License for the specific language governing rights and | |
19 | * limitations under the License. | |
20 | * | |
21 | * @APPLE_LICENSE_HEADER_END@ | |
22 | */ | |
23 | ||
24 | #ifndef _OSATOMIC_H_ | |
25 | #define _OSATOMIC_H_ | |
26 | ||
27 | #include <stddef.h> | |
28 | #include <sys/cdefs.h> | |
29 | #include <stdint.h> | |
30 | #include <stdbool.h> | |
31 | ||
32 | /* These are the preferred versions of the atomic and synchronization operations. | |
33 | * Their implementation is customized at boot time for the platform, including | |
34 | * late-breaking errata fixes as necessary. They are thread safe. | |
35 | * | |
36 | * WARNING: all addresses passed to these functions must be "naturally aligned", ie | |
37 | * int32_t's must be 32-bit aligned (low 2 bits of address zero), and int64_t's | |
38 | * must be 64-bit aligned (low 3 bits of address zero.) | |
3d9156a7 A |
39 | * |
40 | * Note that some versions of the atomic functions incorporate memory barriers, | |
41 | * and some do not. Barriers strictly order memory access on a weakly-ordered | |
42 | * architecture such as PPC. All loads and stores executed in sequential program | |
43 | * order before the barrier will complete before any load or store executed after | |
44 | * the barrier. On a uniprocessor, the barrier operation is typically a nop. | |
224c7076 A |
45 | * On a multiprocessor, the barrier can be quite expensive on some platforms, |
46 | * eg PPC. | |
3d9156a7 A |
47 | * |
48 | * Most code will want to use the barrier functions to insure that memory shared | |
49 | * between threads is properly synchronized. For example, if you want to initialize | |
50 | * a shared data structure and then atomically increment a variable to indicate | |
224c7076 | 51 | * that the initialization is complete, then you must use OSAtomicIncrement32Barrier() |
3d9156a7 | 52 | * to ensure that the stores to your data structure complete before the atomic add. |
224c7076 | 53 | * Likewise, the consumer of that data structure must use OSAtomicDecrement32Barrier(), |
3d9156a7 A |
54 | * in order to ensure that their loads of the structure are not executed before |
55 | * the atomic decrement. On the other hand, if you are simply incrementing a global | |
56 | * counter, then it is safe and potentially faster to use OSAtomicIncrement32(). | |
57 | * | |
58 | * If you are unsure which version to use, prefer the barrier variants as they are | |
59 | * safer. | |
60 | * | |
61 | * The spinlock and queue operations always incorporate a barrier. | |
59e0d9fe A |
62 | */ |
63 | __BEGIN_DECLS | |
64 | ||
3d9156a7 | 65 | |
224c7076 | 66 | /* Arithmetic functions. They return the new value. |
59e0d9fe | 67 | */ |
224c7076 A |
68 | int32_t OSAtomicAdd32( int32_t __theAmount, volatile int32_t *__theValue ); |
69 | int32_t OSAtomicAdd32Barrier( int32_t __theAmount, volatile int32_t *__theValue ); | |
3d9156a7 | 70 | |
34e8f829 | 71 | __inline static |
224c7076 A |
72 | int32_t OSAtomicIncrement32( volatile int32_t *__theValue ) |
73 | { return OSAtomicAdd32( 1, __theValue); } | |
34e8f829 | 74 | __inline static |
224c7076 A |
75 | int32_t OSAtomicIncrement32Barrier( volatile int32_t *__theValue ) |
76 | { return OSAtomicAdd32Barrier( 1, __theValue); } | |
3d9156a7 | 77 | |
34e8f829 | 78 | __inline static |
224c7076 A |
79 | int32_t OSAtomicDecrement32( volatile int32_t *__theValue ) |
80 | { return OSAtomicAdd32( -1, __theValue); } | |
34e8f829 | 81 | __inline static |
224c7076 A |
82 | int32_t OSAtomicDecrement32Barrier( volatile int32_t *__theValue ) |
83 | { return OSAtomicAdd32Barrier( -1, __theValue); } | |
3d9156a7 | 84 | |
34e8f829 | 85 | #if defined(__ppc64__) || defined(__i386__) || defined(__x86_64__) || defined(__arm__) |
3d9156a7 | 86 | |
224c7076 A |
87 | int64_t OSAtomicAdd64( int64_t __theAmount, volatile int64_t *__theValue ); |
88 | int64_t OSAtomicAdd64Barrier( int64_t __theAmount, volatile int64_t *__theValue ); | |
3d9156a7 | 89 | |
34e8f829 | 90 | __inline static |
224c7076 A |
91 | int64_t OSAtomicIncrement64( volatile int64_t *__theValue ) |
92 | { return OSAtomicAdd64( 1, __theValue); } | |
34e8f829 | 93 | __inline static |
224c7076 A |
94 | int64_t OSAtomicIncrement64Barrier( volatile int64_t *__theValue ) |
95 | { return OSAtomicAdd64Barrier( 1, __theValue); } | |
3d9156a7 | 96 | |
34e8f829 | 97 | __inline static |
224c7076 A |
98 | int64_t OSAtomicDecrement64( volatile int64_t *__theValue ) |
99 | { return OSAtomicAdd64( -1, __theValue); } | |
34e8f829 | 100 | __inline static |
224c7076 A |
101 | int64_t OSAtomicDecrement64Barrier( volatile int64_t *__theValue ) |
102 | { return OSAtomicAdd64Barrier( -1, __theValue); } | |
3d9156a7 | 103 | |
34e8f829 | 104 | #endif /* defined(__ppc64__) || defined(__i386__) || defined(__x86_64__) || defined(__arm__) */ |
59e0d9fe | 105 | |
3d9156a7 | 106 | |
224c7076 A |
107 | /* Boolean functions (and, or, xor.) These come in four versions for each operation: |
108 | * with and without barriers, and returning the old or new value of the operation. | |
109 | * The "Orig" versions return the original value, ie before the operation, the non-Orig | |
110 | * versions return the value after the operation. All are layered on top of | |
111 | * compare-and-swap. | |
59e0d9fe | 112 | */ |
224c7076 A |
113 | int32_t OSAtomicOr32( uint32_t __theMask, volatile uint32_t *__theValue ); |
114 | int32_t OSAtomicOr32Barrier( uint32_t __theMask, volatile uint32_t *__theValue ); | |
115 | int32_t OSAtomicOr32Orig( uint32_t __theMask, volatile uint32_t *__theValue ); | |
116 | int32_t OSAtomicOr32OrigBarrier( uint32_t __theMask, volatile uint32_t *__theValue ); | |
117 | ||
118 | int32_t OSAtomicAnd32( uint32_t __theMask, volatile uint32_t *__theValue ); | |
119 | int32_t OSAtomicAnd32Barrier( uint32_t __theMask, volatile uint32_t *__theValue ); | |
120 | int32_t OSAtomicAnd32Orig( uint32_t __theMask, volatile uint32_t *__theValue ); | |
121 | int32_t OSAtomicAnd32OrigBarrier( uint32_t __theMask, volatile uint32_t *__theValue ); | |
122 | ||
123 | int32_t OSAtomicXor32( uint32_t __theMask, volatile uint32_t *__theValue ); | |
124 | int32_t OSAtomicXor32Barrier( uint32_t __theMask, volatile uint32_t *__theValue ); | |
125 | int32_t OSAtomicXor32Orig( uint32_t __theMask, volatile uint32_t *__theValue ); | |
126 | int32_t OSAtomicXor32OrigBarrier( uint32_t __theMask, volatile uint32_t *__theValue ); | |
127 | ||
128 | ||
129 | /* Compare and swap. They return true if the swap occured. There are several versions, | |
130 | * depending on data type and whether or not a barrier is used. | |
131 | */ | |
132 | bool OSAtomicCompareAndSwap32( int32_t __oldValue, int32_t __newValue, volatile int32_t *__theValue ); | |
133 | bool OSAtomicCompareAndSwap32Barrier( int32_t __oldValue, int32_t __newValue, volatile int32_t *__theValue ); | |
134 | bool OSAtomicCompareAndSwapPtr( void *__oldValue, void *__newValue, void * volatile *__theValue ); | |
135 | bool OSAtomicCompareAndSwapPtrBarrier( void *__oldValue, void *__newValue, void * volatile *__theValue ); | |
136 | bool OSAtomicCompareAndSwapInt( int __oldValue, int __newValue, volatile int *__theValue ); | |
137 | bool OSAtomicCompareAndSwapIntBarrier( int __oldValue, int __newValue, volatile int *__theValue ); | |
138 | bool OSAtomicCompareAndSwapLong( long __oldValue, long __newValue, volatile long *__theValue ); | |
139 | bool OSAtomicCompareAndSwapLongBarrier( long __oldValue, long __newValue, volatile long *__theValue ); | |
3d9156a7 | 140 | |
34e8f829 | 141 | #if defined(__ppc64__) || defined(__i386__) || defined(__x86_64__) || defined(__arm__) |
3d9156a7 | 142 | |
224c7076 A |
143 | bool OSAtomicCompareAndSwap64( int64_t __oldValue, int64_t __newValue, volatile int64_t *__theValue ); |
144 | bool OSAtomicCompareAndSwap64Barrier( int64_t __oldValue, int64_t __newValue, volatile int64_t *__theValue ); | |
3d9156a7 | 145 | |
34e8f829 | 146 | #endif /* defined(__ppc64__) || defined(__i386__) || defined(__x86_64__) || defined(__arm__) */ |
59e0d9fe | 147 | |
3d9156a7 A |
148 | |
149 | /* Test and set. They return the original value of the bit, and operate on bit (0x80>>(n&7)) | |
224c7076 | 150 | * in byte ((char*)theAddress + (n>>3)). |
59e0d9fe | 151 | */ |
224c7076 A |
152 | bool OSAtomicTestAndSet( uint32_t __n, volatile void *__theAddress ); |
153 | bool OSAtomicTestAndSetBarrier( uint32_t __n, volatile void *__theAddress ); | |
154 | bool OSAtomicTestAndClear( uint32_t __n, volatile void *__theAddress ); | |
155 | bool OSAtomicTestAndClearBarrier( uint32_t __n, volatile void *__theAddress ); | |
3d9156a7 | 156 | |
224c7076 | 157 | |
59e0d9fe A |
158 | /* Spinlocks. These use memory barriers as required to synchronize access to shared |
159 | * memory protected by the lock. The lock operation spins, but employs various strategies | |
160 | * to back off if the lock is held, making it immune to most priority-inversion livelocks. | |
161 | * The try operation immediately returns false if the lock was held, true if it took the | |
162 | * lock. The convention is that unlocked is zero, locked is nonzero. | |
163 | */ | |
164 | #define OS_SPINLOCK_INIT 0 | |
165 | ||
224c7076 | 166 | typedef int32_t OSSpinLock; |
59e0d9fe | 167 | |
224c7076 A |
168 | bool OSSpinLockTry( volatile OSSpinLock *__lock ); |
169 | void OSSpinLockLock( volatile OSSpinLock *__lock ); | |
170 | void OSSpinLockUnlock( volatile OSSpinLock *__lock ); | |
171 | ||
172 | ||
173 | /* Lockless atomic enqueue and dequeue. These routines manipulate singly | |
174 | * linked LIFO lists. Ie, a dequeue will return the most recently enqueued | |
175 | * element, or NULL if the list is empty. The "offset" parameter is the offset | |
176 | * in bytes of the link field within the data structure being queued. The | |
177 | * link field should be a pointer type. Memory barriers are incorporated as | |
178 | * needed to permit thread-safe access to the queue element. | |
179 | */ | |
180 | #if defined(__x86_64__) | |
181 | ||
182 | typedef volatile struct { | |
183 | void *opaque1; | |
184 | long opaque2; | |
185 | } OSQueueHead __attribute__ ((aligned (16))); | |
186 | ||
187 | #else | |
188 | ||
189 | typedef volatile struct { | |
190 | void *opaque1; | |
191 | long opaque2; | |
192 | } OSQueueHead; | |
193 | ||
194 | #endif | |
195 | ||
196 | #define OS_ATOMIC_QUEUE_INIT { NULL, 0 } | |
197 | ||
198 | void OSAtomicEnqueue( OSQueueHead *__list, void *__new, size_t __offset); | |
199 | void* OSAtomicDequeue( OSQueueHead *__list, size_t __offset); | |
59e0d9fe | 200 | |
3d9156a7 A |
201 | |
202 | /* Memory barrier. It is both a read and write barrier. | |
59e0d9fe A |
203 | */ |
204 | void OSMemoryBarrier( void ); | |
205 | ||
224c7076 | 206 | |
59e0d9fe A |
207 | __END_DECLS |
208 | ||
209 | #endif /* _OSATOMIC_H_ */ |