]> git.saurik.com Git - apple/libc.git/blame - include/libkern/OSAtomic.h
Libc-391.5.22.tar.gz
[apple/libc.git] / include / libkern / OSAtomic.h
CommitLineData
59e0d9fe
A
1/*
2 * Copyright (c) 2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
59e0d9fe
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23
24#ifndef _OSATOMIC_H_
25#define _OSATOMIC_H_
26
27#include <stddef.h>
28#include <sys/cdefs.h>
29#include <stdint.h>
30#include <stdbool.h>
31
32/* These are the preferred versions of the atomic and synchronization operations.
33 * Their implementation is customized at boot time for the platform, including
34 * late-breaking errata fixes as necessary. They are thread safe.
35 *
36 * WARNING: all addresses passed to these functions must be "naturally aligned", ie
37 * int32_t's must be 32-bit aligned (low 2 bits of address zero), and int64_t's
38 * must be 64-bit aligned (low 3 bits of address zero.)
3d9156a7
A
39 *
40 * Note that some versions of the atomic functions incorporate memory barriers,
41 * and some do not. Barriers strictly order memory access on a weakly-ordered
42 * architecture such as PPC. All loads and stores executed in sequential program
43 * order before the barrier will complete before any load or store executed after
44 * the barrier. On a uniprocessor, the barrier operation is typically a nop.
45 * On a multiprocessor, the barrier can be quite expensive.
46 *
47 * Most code will want to use the barrier functions to insure that memory shared
48 * between threads is properly synchronized. For example, if you want to initialize
49 * a shared data structure and then atomically increment a variable to indicate
50 * that the initialization is complete, then you MUST use OSAtomicIncrement32Barrier()
51 * to ensure that the stores to your data structure complete before the atomic add.
52 * Likewise, the consumer of that data structure MUST use OSAtomicDecrement32Barrier(),
53 * in order to ensure that their loads of the structure are not executed before
54 * the atomic decrement. On the other hand, if you are simply incrementing a global
55 * counter, then it is safe and potentially faster to use OSAtomicIncrement32().
56 *
57 * If you are unsure which version to use, prefer the barrier variants as they are
58 * safer.
59 *
60 * The spinlock and queue operations always incorporate a barrier.
59e0d9fe
A
61 */
62__BEGIN_DECLS
63
3d9156a7
A
64
65/* Arithmetic functions. They return the new value. All the "or", "and", and "xor"
66 * operations, and the barrier forms of add, are layered on top of compare-and-swap.
59e0d9fe
A
67 */
68int32_t OSAtomicAdd32( int32_t theAmount, int32_t *theValue );
3d9156a7
A
69int32_t OSAtomicAdd32Barrier( int32_t theAmount, int32_t *theValue );
70
59e0d9fe 71inline static
3d9156a7
A
72int32_t OSAtomicIncrement32( int32_t *theValue )
73 { return OSAtomicAdd32( 1, theValue); }
59e0d9fe 74inline static
3d9156a7
A
75int32_t OSAtomicIncrement32Barrier( int32_t *theValue )
76 { return OSAtomicAdd32Barrier( 1, theValue); }
77
78inline static
79int32_t OSAtomicDecrement32( int32_t *theValue )
80 { return OSAtomicAdd32( -1, theValue); }
81inline static
82int32_t OSAtomicDecrement32Barrier( int32_t *theValue )
83 { return OSAtomicAdd32Barrier( -1, theValue); }
84
59e0d9fe 85int32_t OSAtomicOr32( uint32_t theMask, uint32_t *theValue );
3d9156a7
A
86int32_t OSAtomicOr32Barrier( uint32_t theMask, uint32_t *theValue );
87
59e0d9fe 88int32_t OSAtomicAnd32( uint32_t theMask, uint32_t *theValue );
3d9156a7
A
89int32_t OSAtomicAnd32Barrier( uint32_t theMask, uint32_t *theValue );
90
59e0d9fe 91int32_t OSAtomicXor32( uint32_t theMask, uint32_t *theValue );
3d9156a7
A
92int32_t OSAtomicXor32Barrier( uint32_t theMask, uint32_t *theValue );
93
8e029c65 94#if defined(__ppc64__) || defined(__i386__) || defined(__x86_64__)
3d9156a7 95
59e0d9fe 96int64_t OSAtomicAdd64( int64_t theAmount, int64_t *theValue );
3d9156a7
A
97int64_t OSAtomicAdd64Barrier( int64_t theAmount, int64_t *theValue );
98
99inline static
100int64_t OSAtomicIncrement64( int64_t *theValue )
101 { return OSAtomicAdd64( 1, theValue); }
102inline static
103int64_t OSAtomicIncrement64Barrier( int64_t *theValue )
104 { return OSAtomicAdd64Barrier( 1, theValue); }
105
59e0d9fe 106inline static
3d9156a7
A
107int64_t OSAtomicDecrement64( int64_t *theValue )
108 { return OSAtomicAdd64( -1, theValue); }
59e0d9fe 109inline static
3d9156a7
A
110int64_t OSAtomicDecrement64Barrier( int64_t *theValue )
111 { return OSAtomicAdd64Barrier( -1, theValue); }
112
8e029c65 113#endif /* defined(__ppc64__) || defined(__i386__) || defined(__x86_64__) */
59e0d9fe 114
3d9156a7
A
115
116/* Compare and swap. They return true if the swap occured.
59e0d9fe
A
117 */
118bool OSAtomicCompareAndSwap32( int32_t oldValue, int32_t newValue, int32_t *theValue );
3d9156a7
A
119bool OSAtomicCompareAndSwap32Barrier( int32_t oldValue, int32_t newValue, int32_t *theValue );
120
8e029c65 121#if defined(__ppc64__) || defined(__i386__) || defined(__x86_64__)
3d9156a7 122
59e0d9fe 123bool OSAtomicCompareAndSwap64( int64_t oldValue, int64_t newValue, int64_t *theValue );
3d9156a7
A
124bool OSAtomicCompareAndSwap64Barrier( int64_t oldValue, int64_t newValue, int64_t *theValue );
125
8e029c65 126#endif /* defined(__ppc64__) || defined(__i386__) || defined(__x86_64__) */
59e0d9fe 127
3d9156a7
A
128
129/* Test and set. They return the original value of the bit, and operate on bit (0x80>>(n&7))
130 * in byte ((char*)theAddress + (n>>3)). They are layered on top of the compare-and-swap
131 * operation.
59e0d9fe
A
132 */
133bool OSAtomicTestAndSet( uint32_t n, void *theAddress );
3d9156a7 134bool OSAtomicTestAndSetBarrier( uint32_t n, void *theAddress );
59e0d9fe 135bool OSAtomicTestAndClear( uint32_t n, void *theAddress );
3d9156a7
A
136bool OSAtomicTestAndClearBarrier( uint32_t n, void *theAddress );
137
59e0d9fe
A
138/* Spinlocks. These use memory barriers as required to synchronize access to shared
139 * memory protected by the lock. The lock operation spins, but employs various strategies
140 * to back off if the lock is held, making it immune to most priority-inversion livelocks.
141 * The try operation immediately returns false if the lock was held, true if it took the
142 * lock. The convention is that unlocked is zero, locked is nonzero.
143 */
144#define OS_SPINLOCK_INIT 0
145
146typedef int32_t OSSpinLock;
147
148bool OSSpinLockTry( OSSpinLock *lock );
149void OSSpinLockLock( OSSpinLock *lock );
150void OSSpinLockUnlock( OSSpinLock *lock );
151
3d9156a7
A
152
153/* Memory barrier. It is both a read and write barrier.
59e0d9fe
A
154 */
155void OSMemoryBarrier( void );
156
157__END_DECLS
158
159#endif /* _OSATOMIC_H_ */