]> git.saurik.com Git - apple/libc.git/blame_incremental - arm/sys/OSAtomic-v4.c
Libc-825.40.1.tar.gz
[apple/libc.git] / arm / sys / OSAtomic-v4.c
... / ...
CommitLineData
1/*
2 * Copyright (c) 2004, 2008 Apple Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23
24#include <libkern/OSAtomic.h>
25#include <arm/arch.h>
26
27
28#if !defined(_ARM_ARCH_6)
29
30/*
31 * The only atomic operation ARMv4T provides (atomic swap) is not
32 * sufficient for the general 32-bit arithmetic and compare-and-swap
33 * operations OSAtomic is supposed to provide. So we use a global
34 * spin lock around all operations.
35 *
36 * Since we have only a single, in-order, CPU, we do not need
37 * memory barriers for data.
38 */
39
40static OSSpinLock _atomic_lock = OS_SPINLOCK_INIT;
41
42int32_t OSAtomicAdd32( int32_t theAmount, volatile int32_t *theValue )
43{
44 int32_t result;
45
46 OSSpinLockLock(&_atomic_lock);
47 result = (*theValue += theAmount);
48 OSSpinLockUnlock(&_atomic_lock);
49
50 return result;
51}
52
53int32_t OSAtomicAdd32Barrier( int32_t theAmount, volatile int32_t *theValue )
54{
55 return OSAtomicAdd32(theAmount, theValue);
56}
57
58int64_t OSAtomicAdd64( int64_t theAmount, volatile int64_t *theValue )
59{
60 int64_t result;
61
62 OSSpinLockLock(&_atomic_lock);
63 result = (*theValue += theAmount);
64 OSSpinLockUnlock(&_atomic_lock);
65
66 return result;
67}
68
69int64_t OSAtomicAdd64Barrier( int64_t theAmount, volatile int64_t *theValue )
70{
71 return OSAtomicAdd64(theAmount, theValue);
72}
73
74int32_t OSAtomicOr32( uint32_t theMask, volatile uint32_t *theValue )
75{
76 int32_t result;
77
78 OSSpinLockLock(&_atomic_lock);
79 result = (*theValue |= theMask);
80 OSSpinLockUnlock(&_atomic_lock);
81
82 return result;
83}
84
85int32_t OSAtomicOr32Barrier( uint32_t theMask, volatile uint32_t *theValue )
86{
87 return OSAtomicOr32(theMask, theValue);
88}
89
90int32_t OSAtomicOr32Orig( uint32_t theMask, volatile uint32_t *theValue )
91{
92 int32_t result;
93
94 OSSpinLockLock(&_atomic_lock);
95 result = *theValue;
96 *theValue |= theMask;
97 OSSpinLockUnlock(&_atomic_lock);
98
99 return result;
100}
101
102int32_t OSAtomicOr32OrigBarrier( uint32_t theMask, volatile uint32_t *theValue )
103{
104 return OSAtomicOr32Orig(theMask, theValue);
105}
106
107int32_t OSAtomicAnd32( uint32_t theMask, volatile uint32_t *theValue )
108{
109 int32_t result;
110
111 OSSpinLockLock(&_atomic_lock);
112 result = (*theValue &= theMask);
113 OSSpinLockUnlock(&_atomic_lock);
114
115 return result;
116}
117
118int32_t OSAtomicAnd32Barrier( uint32_t theMask, volatile uint32_t *theValue )
119{
120 return OSAtomicAnd32(theMask, theValue);
121}
122
123int32_t OSAtomicAnd32Orig( uint32_t theMask, volatile uint32_t *theValue )
124{
125 int32_t result;
126
127 OSSpinLockLock(&_atomic_lock);
128 result = *theValue;
129 *theValue &= theMask;
130 OSSpinLockUnlock(&_atomic_lock);
131
132 return result;
133}
134
135int32_t OSAtomicAnd32OrigBarrier( uint32_t theMask, volatile uint32_t *theValue )
136{
137 return OSAtomicAnd32Orig(theMask, theValue);
138}
139
140int32_t OSAtomicXor32( uint32_t theMask, volatile uint32_t *theValue )
141{
142 int32_t result;
143
144 OSSpinLockLock(&_atomic_lock);
145 result = (*theValue ^= theMask);
146 OSSpinLockUnlock(&_atomic_lock);
147
148 return result;
149}
150
151int32_t OSAtomicXor32Barrier( uint32_t theMask, volatile uint32_t *theValue )
152{
153 return OSAtomicXor32(theMask, theValue);
154}
155
156int32_t OSAtomicXor32Orig( uint32_t theMask, volatile uint32_t *theValue )
157{
158 int32_t result;
159
160 OSSpinLockLock(&_atomic_lock);
161 result = *theValue;
162 *theValue ^= theMask;
163 OSSpinLockUnlock(&_atomic_lock);
164
165 return result;
166}
167
168int32_t OSAtomicXor32OrigBarrier( uint32_t theMask, volatile uint32_t *theValue )
169{
170 return OSAtomicXor32Orig(theMask, theValue);
171}
172
173bool OSAtomicCompareAndSwap32( int32_t oldValue, int32_t newValue, volatile int32_t *theValue )
174{
175 bool result;
176
177 OSSpinLockLock(&_atomic_lock);
178 result = (*theValue == oldValue);
179 if (result) *theValue = newValue;
180 OSSpinLockUnlock(&_atomic_lock);
181
182 return result;
183}
184
185bool OSAtomicCompareAndSwap32Barrier( int32_t oldValue, int32_t newValue, volatile int32_t *theValue )
186{
187 return OSAtomicCompareAndSwap32(oldValue, newValue, theValue);
188}
189
190bool
191OSAtomicCompareAndSwapInt(int oldValue, int newValue, volatile int *theValue)
192{
193 return OSAtomicCompareAndSwap32(oldValue, newValue, theValue);
194}
195
196bool
197OSAtomicCompareAndSwapIntBarrier(int oldValue, int newValue, volatile int *theValue)
198{
199 return OSAtomicCompareAndSwap32(oldValue, newValue, theValue);
200}
201
202bool
203OSAtomicCompareAndSwapLong(long oldValue, long newValue, volatile long *theValue)
204{
205 return OSAtomicCompareAndSwap32(oldValue, newValue, (volatile int32_t *)theValue);
206}
207
208bool
209OSAtomicCompareAndSwapLongBarrier(long oldValue, long newValue, volatile long *theValue)
210{
211 return OSAtomicCompareAndSwap32(oldValue, newValue, (volatile int32_t *)theValue);
212}
213
214bool OSAtomicCompareAndSwap64( int64_t oldValue, int64_t newValue, volatile int64_t *theValue )
215{
216 bool result;
217
218 OSSpinLockLock(&_atomic_lock);
219 result = (*theValue == oldValue);
220 if (result) *theValue = newValue;
221 OSSpinLockUnlock(&_atomic_lock);
222
223 return result;
224}
225
226bool OSAtomicCompareAndSwap64Barrier( int64_t oldValue, int64_t newValue, volatile int64_t *theValue )
227{
228 return OSAtomicCompareAndSwap64(oldValue, newValue, theValue);
229}
230
231bool OSAtomicTestAndSet( uint32_t n, volatile void *theAddress )
232{
233 char *byteAddress = ((char*)theAddress + (n>>3));
234 uint32_t byteBit = (0x80>>(n&7));
235 bool result;
236
237 OSSpinLockLock(&_atomic_lock);
238 result = *byteAddress & byteBit;
239 *byteAddress |= byteBit;
240 OSSpinLockUnlock(&_atomic_lock);
241
242 return result;
243}
244
245bool OSAtomicTestAndSetBarrier( uint32_t n, volatile void *theAddress )
246{
247 return OSAtomicTestAndSet(n, theAddress);
248}
249
250bool OSAtomicTestAndClear( uint32_t n, volatile void *theAddress )
251{
252 char *byteAddress = ((char*)theAddress + (n>>3));
253 uint32_t byteBit = (0x80>>(n&7));
254 bool result;
255
256 OSSpinLockLock(&_atomic_lock);
257 result = *byteAddress & byteBit;
258 *byteAddress &= (~byteBit);
259 OSSpinLockUnlock(&_atomic_lock);
260
261 return result;
262}
263
264bool OSAtomicTestAndClearBarrier( uint32_t n, volatile void *theAddress )
265{
266 return OSAtomicTestAndClear(n, theAddress);
267}
268
269void OSMemoryBarrier( void )
270{
271 return;
272}
273
274
275bool OSAtomicCompareAndSwapPtrBarrier( void *__oldValue, void *__newValue, void * volatile *__theValue )
276{
277 return OSAtomicCompareAndSwapPtr(__oldValue, __newValue, __theValue);
278}
279
280bool OSAtomicCompareAndSwapPtr( void *__oldValue, void *__newValue, void * volatile *__theValue )
281{
282 bool result;
283
284 OSSpinLockLock(&_atomic_lock);
285 result = (*__theValue == __oldValue);
286 if (result) *__theValue = __newValue;
287 OSSpinLockUnlock(&_atomic_lock);
288
289 return result;
290}
291
292void OSAtomicEnqueue( OSQueueHead *__list, void *__new, size_t __offset )
293{
294 OSSpinLockLock(&_atomic_lock);
295 *((void **)((char *)__new + __offset)) = __list->opaque1;
296 __list->opaque1 = __new;
297 OSSpinLockUnlock(&_atomic_lock);
298}
299
300void* OSAtomicDequeue( OSQueueHead *__list, size_t __offset )
301{
302 void *head;
303
304 OSSpinLockLock(&_atomic_lock);
305 head = __list->opaque1;
306 if (head != NULL) {
307 void **next = (void **)((char *)head + __offset);
308 __list->opaque1 = *next;
309 }
310 OSSpinLockUnlock(&_atomic_lock);
311
312 return head;
313}
314
315#endif /* !defined(_ARM_ARCH_6) */