]>
git.saurik.com Git - apple/libc.git/blob - include/libkern/OSAtomic.h
9f2dba4390ebdd3d60cf57388a6aee84bcde28c5
2 * Copyright (c) 2004-2006 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
21 * @APPLE_LICENSE_HEADER_END@
28 #include <sys/cdefs.h>
32 /* These are the preferred versions of the atomic and synchronization operations.
33 * Their implementation is customized at boot time for the platform, including
34 * late-breaking errata fixes as necessary. They are thread safe.
36 * WARNING: all addresses passed to these functions must be "naturally aligned", ie
37 * int32_t's must be 32-bit aligned (low 2 bits of address zero), and int64_t's
38 * must be 64-bit aligned (low 3 bits of address zero.)
40 * Note that some versions of the atomic functions incorporate memory barriers,
41 * and some do not. Barriers strictly order memory access on a weakly-ordered
42 * architecture such as PPC. All loads and stores executed in sequential program
43 * order before the barrier will complete before any load or store executed after
44 * the barrier. On a uniprocessor, the barrier operation is typically a nop.
45 * On a multiprocessor, the barrier can be quite expensive on some platforms,
48 * Most code will want to use the barrier functions to insure that memory shared
49 * between threads is properly synchronized. For example, if you want to initialize
50 * a shared data structure and then atomically increment a variable to indicate
51 * that the initialization is complete, then you must use OSAtomicIncrement32Barrier()
52 * to ensure that the stores to your data structure complete before the atomic add.
53 * Likewise, the consumer of that data structure must use OSAtomicDecrement32Barrier(),
54 * in order to ensure that their loads of the structure are not executed before
55 * the atomic decrement. On the other hand, if you are simply incrementing a global
56 * counter, then it is safe and potentially faster to use OSAtomicIncrement32().
58 * If you are unsure which version to use, prefer the barrier variants as they are
61 * The spinlock and queue operations always incorporate a barrier.
66 /* Arithmetic functions. They return the new value.
68 int32_t OSAtomicAdd32( int32_t __theAmount
, volatile int32_t *__theValue
);
69 int32_t OSAtomicAdd32Barrier( int32_t __theAmount
, volatile int32_t *__theValue
);
72 int32_t OSAtomicIncrement32( volatile int32_t *__theValue
)
73 { return OSAtomicAdd32( 1, __theValue
); }
75 int32_t OSAtomicIncrement32Barrier( volatile int32_t *__theValue
)
76 { return OSAtomicAdd32Barrier( 1, __theValue
); }
79 int32_t OSAtomicDecrement32( volatile int32_t *__theValue
)
80 { return OSAtomicAdd32( -1, __theValue
); }
82 int32_t OSAtomicDecrement32Barrier( volatile int32_t *__theValue
)
83 { return OSAtomicAdd32Barrier( -1, __theValue
); }
85 #if defined(__ppc64__) || defined(__i386__) || defined(__x86_64__) || defined(__arm__)
87 int64_t OSAtomicAdd64( int64_t __theAmount
, volatile int64_t *__theValue
);
88 int64_t OSAtomicAdd64Barrier( int64_t __theAmount
, volatile int64_t *__theValue
);
91 int64_t OSAtomicIncrement64( volatile int64_t *__theValue
)
92 { return OSAtomicAdd64( 1, __theValue
); }
94 int64_t OSAtomicIncrement64Barrier( volatile int64_t *__theValue
)
95 { return OSAtomicAdd64Barrier( 1, __theValue
); }
98 int64_t OSAtomicDecrement64( volatile int64_t *__theValue
)
99 { return OSAtomicAdd64( -1, __theValue
); }
101 int64_t OSAtomicDecrement64Barrier( volatile int64_t *__theValue
)
102 { return OSAtomicAdd64Barrier( -1, __theValue
); }
104 #endif /* defined(__ppc64__) || defined(__i386__) || defined(__x86_64__) || defined(__arm__) */
107 /* Boolean functions (and, or, xor.) These come in four versions for each operation:
108 * with and without barriers, and returning the old or new value of the operation.
109 * The "Orig" versions return the original value, ie before the operation, the non-Orig
110 * versions return the value after the operation. All are layered on top of
113 int32_t OSAtomicOr32( uint32_t __theMask
, volatile uint32_t *__theValue
);
114 int32_t OSAtomicOr32Barrier( uint32_t __theMask
, volatile uint32_t *__theValue
);
115 int32_t OSAtomicOr32Orig( uint32_t __theMask
, volatile uint32_t *__theValue
);
116 int32_t OSAtomicOr32OrigBarrier( uint32_t __theMask
, volatile uint32_t *__theValue
);
118 int32_t OSAtomicAnd32( uint32_t __theMask
, volatile uint32_t *__theValue
);
119 int32_t OSAtomicAnd32Barrier( uint32_t __theMask
, volatile uint32_t *__theValue
);
120 int32_t OSAtomicAnd32Orig( uint32_t __theMask
, volatile uint32_t *__theValue
);
121 int32_t OSAtomicAnd32OrigBarrier( uint32_t __theMask
, volatile uint32_t *__theValue
);
123 int32_t OSAtomicXor32( uint32_t __theMask
, volatile uint32_t *__theValue
);
124 int32_t OSAtomicXor32Barrier( uint32_t __theMask
, volatile uint32_t *__theValue
);
125 int32_t OSAtomicXor32Orig( uint32_t __theMask
, volatile uint32_t *__theValue
);
126 int32_t OSAtomicXor32OrigBarrier( uint32_t __theMask
, volatile uint32_t *__theValue
);
129 /* Compare and swap. They return true if the swap occured. There are several versions,
130 * depending on data type and whether or not a barrier is used.
132 bool OSAtomicCompareAndSwap32( int32_t __oldValue
, int32_t __newValue
, volatile int32_t *__theValue
);
133 bool OSAtomicCompareAndSwap32Barrier( int32_t __oldValue
, int32_t __newValue
, volatile int32_t *__theValue
);
134 bool OSAtomicCompareAndSwapPtr( void *__oldValue
, void *__newValue
, void * volatile *__theValue
);
135 bool OSAtomicCompareAndSwapPtrBarrier( void *__oldValue
, void *__newValue
, void * volatile *__theValue
);
136 bool OSAtomicCompareAndSwapInt( int __oldValue
, int __newValue
, volatile int *__theValue
);
137 bool OSAtomicCompareAndSwapIntBarrier( int __oldValue
, int __newValue
, volatile int *__theValue
);
138 bool OSAtomicCompareAndSwapLong( long __oldValue
, long __newValue
, volatile long *__theValue
);
139 bool OSAtomicCompareAndSwapLongBarrier( long __oldValue
, long __newValue
, volatile long *__theValue
);
141 #if defined(__ppc64__) || defined(__i386__) || defined(__x86_64__) || defined(__arm__)
143 bool OSAtomicCompareAndSwap64( int64_t __oldValue
, int64_t __newValue
, volatile int64_t *__theValue
);
144 bool OSAtomicCompareAndSwap64Barrier( int64_t __oldValue
, int64_t __newValue
, volatile int64_t *__theValue
);
146 #endif /* defined(__ppc64__) || defined(__i386__) || defined(__x86_64__) || defined(__arm__) */
149 /* Test and set. They return the original value of the bit, and operate on bit (0x80>>(n&7))
150 * in byte ((char*)theAddress + (n>>3)).
152 bool OSAtomicTestAndSet( uint32_t __n
, volatile void *__theAddress
);
153 bool OSAtomicTestAndSetBarrier( uint32_t __n
, volatile void *__theAddress
);
154 bool OSAtomicTestAndClear( uint32_t __n
, volatile void *__theAddress
);
155 bool OSAtomicTestAndClearBarrier( uint32_t __n
, volatile void *__theAddress
);
158 /* Spinlocks. These use memory barriers as required to synchronize access to shared
159 * memory protected by the lock. The lock operation spins, but employs various strategies
160 * to back off if the lock is held, making it immune to most priority-inversion livelocks.
161 * The try operation immediately returns false if the lock was held, true if it took the
162 * lock. The convention is that unlocked is zero, locked is nonzero.
164 #define OS_SPINLOCK_INIT 0
166 typedef int32_t OSSpinLock
;
168 bool OSSpinLockTry( volatile OSSpinLock
*__lock
);
169 void OSSpinLockLock( volatile OSSpinLock
*__lock
);
170 void OSSpinLockUnlock( volatile OSSpinLock
*__lock
);
173 /* Lockless atomic enqueue and dequeue. These routines manipulate singly
174 * linked LIFO lists. Ie, a dequeue will return the most recently enqueued
175 * element, or NULL if the list is empty. The "offset" parameter is the offset
176 * in bytes of the link field within the data structure being queued. The
177 * link field should be a pointer type. Memory barriers are incorporated as
178 * needed to permit thread-safe access to the queue element.
180 #if defined(__x86_64__)
182 typedef volatile struct {
185 } OSQueueHead
__attribute__ ((aligned (16)));
189 typedef volatile struct {
196 #define OS_ATOMIC_QUEUE_INIT { NULL, 0 }
198 void OSAtomicEnqueue( OSQueueHead
*__list
, void *__new
, size_t __offset
);
199 void* OSAtomicDequeue( OSQueueHead
*__list
, size_t __offset
);
202 /* Memory barrier. It is both a read and write barrier.
204 void OSMemoryBarrier( void );
209 #endif /* _OSATOMIC_H_ */