]>
git.saurik.com Git - apple/xnu.git/blob - libkern/gen/OSAtomicOperations.c
7866c302e05d4f101f366ea3933530dd405b85d8
2 * Copyright (c) 2000-2015 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <libkern/OSAtomic.h>
30 #include <kern/debug.h>
31 #include <machine/atomic.h>
39 #define NULL ((void *)0)
42 #define ATOMIC_DEBUG DEBUG
45 #define ALIGN_TEST(p, t) do{if((uintptr_t)p&(sizeof(t)-1)) panic("Unaligned atomic pointer %p\n",p);}while(0)
47 #define ALIGN_TEST(p, t) do{}while(0)
50 // 19831745 - start of big hammer!
51 #pragma clang diagnostic push
52 #pragma clang diagnostic ignored "-Wcast-qual"
56 * These are _the_ atomic operations, now implemented via compiler built-ins.
57 * It is expected that this C implementation is a candidate for Link-Time-
58 * Optimization inlining, whereas the assembler implementations they replace
62 #undef OSCompareAndSwap8
64 OSCompareAndSwap8(UInt8 oldValue
, UInt8 newValue
, volatile UInt8
*address
)
66 return __c11_atomic_compare_exchange_strong((_Atomic UInt8
*)address
, &oldValue
, newValue
,
67 memory_order_acq_rel_smp
, memory_order_relaxed
);
70 #undef OSCompareAndSwap16
72 OSCompareAndSwap16(UInt16 oldValue
, UInt16 newValue
, volatile UInt16
*address
)
74 return __c11_atomic_compare_exchange_strong((_Atomic UInt16
*)address
, &oldValue
, newValue
,
75 memory_order_acq_rel_smp
, memory_order_relaxed
);
78 #undef OSCompareAndSwap
80 OSCompareAndSwap(UInt32 oldValue
, UInt32 newValue
, volatile UInt32
*address
)
82 ALIGN_TEST(address
, UInt32
);
83 return __c11_atomic_compare_exchange_strong((_Atomic UInt32
*)address
, &oldValue
, newValue
,
84 memory_order_acq_rel_smp
, memory_order_relaxed
);
87 #undef OSCompareAndSwap64
89 OSCompareAndSwap64(UInt64 oldValue
, UInt64 newValue
, volatile UInt64
*address
)
92 * _Atomic uint64 requires 8-byte alignment on all architectures.
93 * This silences the compiler cast warning. ALIGN_TEST() verifies
94 * that the cast was legal, if defined.
96 _Atomic UInt64
*aligned_addr
= (_Atomic UInt64
*)(uintptr_t)address
;
98 ALIGN_TEST(address
, UInt64
);
99 return __c11_atomic_compare_exchange_strong(aligned_addr
, &oldValue
, newValue
,
100 memory_order_acq_rel_smp
, memory_order_relaxed
);
103 #undef OSCompareAndSwapPtr
105 OSCompareAndSwapPtr(void *oldValue
, void *newValue
, void * volatile *address
)
108 return OSCompareAndSwap64((UInt64
)oldValue
, (UInt64
)newValue
, (volatile UInt64
*)address
);
110 return OSCompareAndSwap((UInt32
)oldValue
, (UInt32
)newValue
, (volatile UInt32
*)address
);
115 OSAddAtomic8(SInt32 amount
, volatile SInt8
*address
)
117 return __c11_atomic_fetch_add((_Atomic SInt8
*)address
, amount
, memory_order_relaxed
);
121 OSAddAtomic16(SInt32 amount
, volatile SInt16
*address
)
123 return __c11_atomic_fetch_add((_Atomic SInt16
*)address
, amount
, memory_order_relaxed
);
128 OSAddAtomic(SInt32 amount
, volatile SInt32
*address
)
130 ALIGN_TEST(address
, UInt32
);
131 return __c11_atomic_fetch_add((_Atomic SInt32
*)address
, amount
, memory_order_relaxed
);
136 OSAddAtomic64(SInt64 amount
, volatile SInt64
*address
)
138 _Atomic SInt64
* aligned_address
= (_Atomic SInt64
*)(uintptr_t)address
;
140 ALIGN_TEST(address
, SInt64
);
141 return __c11_atomic_fetch_add(aligned_address
, amount
, memory_order_relaxed
);
144 #undef OSAddAtomicLong
146 OSAddAtomicLong(long theAmount
, volatile long *address
)
149 return (long)OSAddAtomic64((SInt64
)theAmount
, (SInt64
*)address
);
151 return (long)OSAddAtomic((SInt32
)theAmount
, address
);
155 #undef OSIncrementAtomic
157 OSIncrementAtomic(volatile SInt32
* value
)
159 return OSAddAtomic(1, value
);
162 #undef OSDecrementAtomic
164 OSDecrementAtomic(volatile SInt32
* value
)
166 return OSAddAtomic(-1, value
);
169 #undef OSBitAndAtomic
171 OSBitAndAtomic(UInt32 mask
, volatile UInt32
* value
)
173 return __c11_atomic_fetch_and((_Atomic UInt32
*)value
, mask
, memory_order_relaxed
);
178 OSBitOrAtomic(UInt32 mask
, volatile UInt32
* value
)
180 return __c11_atomic_fetch_or((_Atomic UInt32
*)value
, mask
, memory_order_relaxed
);
183 #undef OSBitXorAtomic
185 OSBitXorAtomic(UInt32 mask
, volatile UInt32
* value
)
187 return __c11_atomic_fetch_xor((_Atomic UInt32
*)value
, mask
, memory_order_relaxed
);
191 OSTestAndSetClear(UInt32 bit
, Boolean wantSet
, volatile UInt8
* startAddress
)
197 startAddress
+= (bit
/ 8);
198 mask
<<= (7 - (bit
% 8));
199 wantValue
= wantSet
? mask
: 0;
202 oldValue
= *startAddress
;
203 if ((oldValue
& mask
) == wantValue
) {
206 } while (!__c11_atomic_compare_exchange_strong((_Atomic UInt8
*)startAddress
,
207 &oldValue
, (oldValue
& ~mask
) | wantValue
, memory_order_relaxed
, memory_order_relaxed
));
209 return (oldValue
& mask
) == wantValue
;
213 OSTestAndSet(UInt32 bit
, volatile UInt8
* startAddress
)
215 return OSTestAndSetClear(bit
, true, startAddress
);
219 OSTestAndClear(UInt32 bit
, volatile UInt8
* startAddress
)
221 return OSTestAndSetClear(bit
, false, startAddress
);
225 * silly unaligned versions
229 OSIncrementAtomic8(volatile SInt8
* value
)
231 return OSAddAtomic8(1, value
);
235 OSDecrementAtomic8(volatile SInt8
* value
)
237 return OSAddAtomic8(-1, value
);
241 OSBitAndAtomic8(UInt32 mask
, volatile UInt8
* value
)
243 return __c11_atomic_fetch_and((_Atomic UInt8
*)value
, mask
, memory_order_relaxed
);
247 OSBitOrAtomic8(UInt32 mask
, volatile UInt8
* value
)
249 return __c11_atomic_fetch_or((_Atomic UInt8
*)value
, mask
, memory_order_relaxed
);
253 OSBitXorAtomic8(UInt32 mask
, volatile UInt8
* value
)
255 return __c11_atomic_fetch_xor((_Atomic UInt8
*)value
, mask
, memory_order_relaxed
);
259 OSIncrementAtomic16(volatile SInt16
* value
)
261 return OSAddAtomic16(1, value
);
265 OSDecrementAtomic16(volatile SInt16
* value
)
267 return OSAddAtomic16(-1, value
);
271 OSBitAndAtomic16(UInt32 mask
, volatile UInt16
* value
)
273 return __c11_atomic_fetch_and((_Atomic UInt16
*)value
, mask
, memory_order_relaxed
);
277 OSBitOrAtomic16(UInt32 mask
, volatile UInt16
* value
)
279 return __c11_atomic_fetch_or((_Atomic UInt16
*)value
, mask
, memory_order_relaxed
);
283 OSBitXorAtomic16(UInt32 mask
, volatile UInt16
* value
)
285 return __c11_atomic_fetch_xor((_Atomic UInt16
*)value
, mask
, memory_order_relaxed
);
288 // 19831745 - end of big hammer!
289 #pragma clang diagnostic pop