]> git.saurik.com Git - apple/libc.git/blame - include/libkern/OSAtomic.h
Libc-583.tar.gz
[apple/libc.git] / include / libkern / OSAtomic.h
CommitLineData
59e0d9fe 1/*
224c7076 2 * Copyright (c) 2004-2006 Apple Computer, Inc. All rights reserved.
59e0d9fe
A
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
59e0d9fe
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23
24#ifndef _OSATOMIC_H_
25#define _OSATOMIC_H_
26
27#include <stddef.h>
28#include <sys/cdefs.h>
29#include <stdint.h>
30#include <stdbool.h>
31
32/* These are the preferred versions of the atomic and synchronization operations.
33 * Their implementation is customized at boot time for the platform, including
34 * late-breaking errata fixes as necessary. They are thread safe.
35 *
36 * WARNING: all addresses passed to these functions must be "naturally aligned", ie
37 * int32_t's must be 32-bit aligned (low 2 bits of address zero), and int64_t's
38 * must be 64-bit aligned (low 3 bits of address zero.)
3d9156a7
A
39 *
40 * Note that some versions of the atomic functions incorporate memory barriers,
41 * and some do not. Barriers strictly order memory access on a weakly-ordered
42 * architecture such as PPC. All loads and stores executed in sequential program
43 * order before the barrier will complete before any load or store executed after
44 * the barrier. On a uniprocessor, the barrier operation is typically a nop.
224c7076
A
45 * On a multiprocessor, the barrier can be quite expensive on some platforms,
46 * eg PPC.
3d9156a7
A
47 *
48 * Most code will want to use the barrier functions to insure that memory shared
49 * between threads is properly synchronized. For example, if you want to initialize
50 * a shared data structure and then atomically increment a variable to indicate
224c7076 51 * that the initialization is complete, then you must use OSAtomicIncrement32Barrier()
3d9156a7 52 * to ensure that the stores to your data structure complete before the atomic add.
224c7076 53 * Likewise, the consumer of that data structure must use OSAtomicDecrement32Barrier(),
3d9156a7
A
54 * in order to ensure that their loads of the structure are not executed before
55 * the atomic decrement. On the other hand, if you are simply incrementing a global
56 * counter, then it is safe and potentially faster to use OSAtomicIncrement32().
57 *
58 * If you are unsure which version to use, prefer the barrier variants as they are
59 * safer.
60 *
61 * The spinlock and queue operations always incorporate a barrier.
59e0d9fe
A
62 */
63__BEGIN_DECLS
64
3d9156a7 65
224c7076 66/* Arithmetic functions. They return the new value.
59e0d9fe 67 */
224c7076
A
68int32_t OSAtomicAdd32( int32_t __theAmount, volatile int32_t *__theValue );
69int32_t OSAtomicAdd32Barrier( int32_t __theAmount, volatile int32_t *__theValue );
3d9156a7 70
34e8f829 71__inline static
224c7076
A
72int32_t OSAtomicIncrement32( volatile int32_t *__theValue )
73 { return OSAtomicAdd32( 1, __theValue); }
34e8f829 74__inline static
224c7076
A
75int32_t OSAtomicIncrement32Barrier( volatile int32_t *__theValue )
76 { return OSAtomicAdd32Barrier( 1, __theValue); }
3d9156a7 77
34e8f829 78__inline static
224c7076
A
79int32_t OSAtomicDecrement32( volatile int32_t *__theValue )
80 { return OSAtomicAdd32( -1, __theValue); }
34e8f829 81__inline static
224c7076
A
82int32_t OSAtomicDecrement32Barrier( volatile int32_t *__theValue )
83 { return OSAtomicAdd32Barrier( -1, __theValue); }
3d9156a7 84
34e8f829 85#if defined(__ppc64__) || defined(__i386__) || defined(__x86_64__) || defined(__arm__)
3d9156a7 86
224c7076
A
87int64_t OSAtomicAdd64( int64_t __theAmount, volatile int64_t *__theValue );
88int64_t OSAtomicAdd64Barrier( int64_t __theAmount, volatile int64_t *__theValue );
3d9156a7 89
34e8f829 90__inline static
224c7076
A
91int64_t OSAtomicIncrement64( volatile int64_t *__theValue )
92 { return OSAtomicAdd64( 1, __theValue); }
34e8f829 93__inline static
224c7076
A
94int64_t OSAtomicIncrement64Barrier( volatile int64_t *__theValue )
95 { return OSAtomicAdd64Barrier( 1, __theValue); }
3d9156a7 96
34e8f829 97__inline static
224c7076
A
98int64_t OSAtomicDecrement64( volatile int64_t *__theValue )
99 { return OSAtomicAdd64( -1, __theValue); }
34e8f829 100__inline static
224c7076
A
101int64_t OSAtomicDecrement64Barrier( volatile int64_t *__theValue )
102 { return OSAtomicAdd64Barrier( -1, __theValue); }
3d9156a7 103
34e8f829 104#endif /* defined(__ppc64__) || defined(__i386__) || defined(__x86_64__) || defined(__arm__) */
59e0d9fe 105
3d9156a7 106
224c7076
A
107/* Boolean functions (and, or, xor.) These come in four versions for each operation:
108 * with and without barriers, and returning the old or new value of the operation.
109 * The "Orig" versions return the original value, ie before the operation, the non-Orig
110 * versions return the value after the operation. All are layered on top of
111 * compare-and-swap.
59e0d9fe 112 */
224c7076
A
113int32_t OSAtomicOr32( uint32_t __theMask, volatile uint32_t *__theValue );
114int32_t OSAtomicOr32Barrier( uint32_t __theMask, volatile uint32_t *__theValue );
115int32_t OSAtomicOr32Orig( uint32_t __theMask, volatile uint32_t *__theValue );
116int32_t OSAtomicOr32OrigBarrier( uint32_t __theMask, volatile uint32_t *__theValue );
117
118int32_t OSAtomicAnd32( uint32_t __theMask, volatile uint32_t *__theValue );
119int32_t OSAtomicAnd32Barrier( uint32_t __theMask, volatile uint32_t *__theValue );
120int32_t OSAtomicAnd32Orig( uint32_t __theMask, volatile uint32_t *__theValue );
121int32_t OSAtomicAnd32OrigBarrier( uint32_t __theMask, volatile uint32_t *__theValue );
122
123int32_t OSAtomicXor32( uint32_t __theMask, volatile uint32_t *__theValue );
124int32_t OSAtomicXor32Barrier( uint32_t __theMask, volatile uint32_t *__theValue );
125int32_t OSAtomicXor32Orig( uint32_t __theMask, volatile uint32_t *__theValue );
126int32_t OSAtomicXor32OrigBarrier( uint32_t __theMask, volatile uint32_t *__theValue );
127
128
129/* Compare and swap. They return true if the swap occured. There are several versions,
130 * depending on data type and whether or not a barrier is used.
131 */
132bool OSAtomicCompareAndSwap32( int32_t __oldValue, int32_t __newValue, volatile int32_t *__theValue );
133bool OSAtomicCompareAndSwap32Barrier( int32_t __oldValue, int32_t __newValue, volatile int32_t *__theValue );
134bool OSAtomicCompareAndSwapPtr( void *__oldValue, void *__newValue, void * volatile *__theValue );
135bool OSAtomicCompareAndSwapPtrBarrier( void *__oldValue, void *__newValue, void * volatile *__theValue );
136bool OSAtomicCompareAndSwapInt( int __oldValue, int __newValue, volatile int *__theValue );
137bool OSAtomicCompareAndSwapIntBarrier( int __oldValue, int __newValue, volatile int *__theValue );
138bool OSAtomicCompareAndSwapLong( long __oldValue, long __newValue, volatile long *__theValue );
139bool OSAtomicCompareAndSwapLongBarrier( long __oldValue, long __newValue, volatile long *__theValue );
3d9156a7 140
34e8f829 141#if defined(__ppc64__) || defined(__i386__) || defined(__x86_64__) || defined(__arm__)
3d9156a7 142
224c7076
A
143bool OSAtomicCompareAndSwap64( int64_t __oldValue, int64_t __newValue, volatile int64_t *__theValue );
144bool OSAtomicCompareAndSwap64Barrier( int64_t __oldValue, int64_t __newValue, volatile int64_t *__theValue );
3d9156a7 145
34e8f829 146#endif /* defined(__ppc64__) || defined(__i386__) || defined(__x86_64__) || defined(__arm__) */
59e0d9fe 147
3d9156a7
A
148
149/* Test and set. They return the original value of the bit, and operate on bit (0x80>>(n&7))
224c7076 150 * in byte ((char*)theAddress + (n>>3)).
59e0d9fe 151 */
224c7076
A
152bool OSAtomicTestAndSet( uint32_t __n, volatile void *__theAddress );
153bool OSAtomicTestAndSetBarrier( uint32_t __n, volatile void *__theAddress );
154bool OSAtomicTestAndClear( uint32_t __n, volatile void *__theAddress );
155bool OSAtomicTestAndClearBarrier( uint32_t __n, volatile void *__theAddress );
3d9156a7 156
224c7076 157
59e0d9fe
A
158/* Spinlocks. These use memory barriers as required to synchronize access to shared
159 * memory protected by the lock. The lock operation spins, but employs various strategies
160 * to back off if the lock is held, making it immune to most priority-inversion livelocks.
161 * The try operation immediately returns false if the lock was held, true if it took the
162 * lock. The convention is that unlocked is zero, locked is nonzero.
163 */
164#define OS_SPINLOCK_INIT 0
165
224c7076 166typedef int32_t OSSpinLock;
59e0d9fe 167
224c7076
A
168bool OSSpinLockTry( volatile OSSpinLock *__lock );
169void OSSpinLockLock( volatile OSSpinLock *__lock );
170void OSSpinLockUnlock( volatile OSSpinLock *__lock );
171
172
173/* Lockless atomic enqueue and dequeue. These routines manipulate singly
174 * linked LIFO lists. Ie, a dequeue will return the most recently enqueued
175 * element, or NULL if the list is empty. The "offset" parameter is the offset
176 * in bytes of the link field within the data structure being queued. The
177 * link field should be a pointer type. Memory barriers are incorporated as
178 * needed to permit thread-safe access to the queue element.
179 */
180#if defined(__x86_64__)
181
182typedef volatile struct {
183 void *opaque1;
184 long opaque2;
185} OSQueueHead __attribute__ ((aligned (16)));
186
187#else
188
189typedef volatile struct {
190 void *opaque1;
191 long opaque2;
192} OSQueueHead;
193
194#endif
195
196#define OS_ATOMIC_QUEUE_INIT { NULL, 0 }
197
198void OSAtomicEnqueue( OSQueueHead *__list, void *__new, size_t __offset);
199void* OSAtomicDequeue( OSQueueHead *__list, size_t __offset);
59e0d9fe 200
3d9156a7
A
201
202/* Memory barrier. It is both a read and write barrier.
59e0d9fe
A
203 */
204void OSMemoryBarrier( void );
205
224c7076 206
59e0d9fe
A
207__END_DECLS
208
209#endif /* _OSATOMIC_H_ */