]>
git.saurik.com Git - apple/xnu.git/blob - libkern/gen/OSAtomicOperations.c
2 * Copyright (c) 2000-2015 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <libkern/OSAtomic.h>
30 #include <kern/debug.h>
31 #include <machine/atomic.h>
39 #define NULL ((void *)0)
42 #define ATOMIC_DEBUG DEBUG
45 #define ALIGN_TEST(p, t) do{if((uintptr_t)p&(sizeof(t)-1)) panic("Unaligned atomic pointer %p\n",p);}while(0)
47 #define ALIGN_TEST(p, t) do{}while(0)
52 * These are _the_ atomic operations, now implemented via compiler built-ins.
53 * It is expected that this C implementation is a candidate for Link-Time-
54 * Optimization inlining, whereas the assembler implementations they replace
58 #undef OSCompareAndSwap8
60 OSCompareAndSwap8(UInt8 oldValue
, UInt8 newValue
, volatile UInt8
*address
)
62 return os_atomic_cmpxchg(address
, oldValue
, newValue
, acq_rel
);
65 #undef OSCompareAndSwap16
67 OSCompareAndSwap16(UInt16 oldValue
, UInt16 newValue
, volatile UInt16
*address
)
69 return os_atomic_cmpxchg(address
, oldValue
, newValue
, acq_rel
);
72 #undef OSCompareAndSwap
74 OSCompareAndSwap(UInt32 oldValue
, UInt32 newValue
, volatile UInt32
*address
)
76 ALIGN_TEST(address
, UInt32
);
77 return os_atomic_cmpxchg(address
, oldValue
, newValue
, acq_rel
);
80 #undef OSCompareAndSwap64
82 OSCompareAndSwap64(UInt64 oldValue
, UInt64 newValue
, volatile UInt64
*address
)
85 * _Atomic uint64 requires 8-byte alignment on all architectures.
86 * This silences the compiler cast warning. ALIGN_TEST() verifies
87 * that the cast was legal, if defined.
89 _Atomic UInt64
*aligned_addr
= (_Atomic UInt64
*)(uintptr_t)address
;
91 ALIGN_TEST(address
, UInt64
);
92 return os_atomic_cmpxchg(aligned_addr
, oldValue
, newValue
, acq_rel
);
95 #undef OSCompareAndSwapPtr
97 OSCompareAndSwapPtr(void *oldValue
, void *newValue
, void * volatile *address
)
99 return os_atomic_cmpxchg(address
, oldValue
, newValue
, acq_rel
);
103 OSAddAtomic8(SInt32 amount
, volatile SInt8
*address
)
105 return os_atomic_add_orig(address
, amount
, relaxed
);
109 OSAddAtomic16(SInt32 amount
, volatile SInt16
*address
)
111 return os_atomic_add_orig(address
, amount
, relaxed
);
116 OSAddAtomic(SInt32 amount
, volatile SInt32
*address
)
118 ALIGN_TEST(address
, UInt32
);
119 return os_atomic_add_orig(address
, amount
, relaxed
);
124 OSAddAtomic64(SInt64 amount
, volatile SInt64
*address
)
126 _Atomic SInt64
* aligned_address
= (_Atomic SInt64
*)(uintptr_t)address
;
128 ALIGN_TEST(address
, SInt64
);
129 return os_atomic_add_orig(aligned_address
, amount
, relaxed
);
132 #undef OSAddAtomicLong
134 OSAddAtomicLong(long theAmount
, volatile long *address
)
136 return os_atomic_add_orig(address
, theAmount
, relaxed
);
139 #undef OSIncrementAtomic
141 OSIncrementAtomic(volatile SInt32
* value
)
143 return os_atomic_inc_orig(value
, relaxed
);
146 #undef OSDecrementAtomic
148 OSDecrementAtomic(volatile SInt32
* value
)
150 return os_atomic_dec_orig(value
, relaxed
);
153 #undef OSBitAndAtomic
155 OSBitAndAtomic(UInt32 mask
, volatile UInt32
* value
)
157 return os_atomic_and_orig(value
, mask
, relaxed
);
162 OSBitOrAtomic(UInt32 mask
, volatile UInt32
* value
)
164 return os_atomic_or_orig(value
, mask
, relaxed
);
167 #undef OSBitXorAtomic
169 OSBitXorAtomic(UInt32 mask
, volatile UInt32
* value
)
171 return os_atomic_xor_orig(value
, mask
, relaxed
);
175 OSTestAndSetClear(UInt32 bit
, Boolean wantSet
, volatile UInt8
* startAddress
)
178 UInt8 oldValue
, newValue
;
182 address
= (UInt8
*)(uintptr_t)(startAddress
+ (bit
/ 8));
183 mask
<<= (7 - (bit
% 8));
184 wantValue
= wantSet
? mask
: 0;
186 return !os_atomic_rmw_loop(address
, oldValue
, newValue
, relaxed
, {
187 if ((oldValue
& mask
) == wantValue
) {
188 os_atomic_rmw_loop_give_up(break);
190 newValue
= (oldValue
& ~mask
) | wantValue
;
195 OSTestAndSet(UInt32 bit
, volatile UInt8
* startAddress
)
197 return OSTestAndSetClear(bit
, true, startAddress
);
201 OSTestAndClear(UInt32 bit
, volatile UInt8
* startAddress
)
203 return OSTestAndSetClear(bit
, false, startAddress
);
207 * silly unaligned versions
211 OSIncrementAtomic8(volatile SInt8
* value
)
213 return os_atomic_inc_orig(value
, relaxed
);
217 OSDecrementAtomic8(volatile SInt8
* value
)
219 return os_atomic_dec_orig(value
, relaxed
);
223 OSBitAndAtomic8(UInt32 mask
, volatile UInt8
* value
)
225 return os_atomic_and_orig(value
, mask
, relaxed
);
229 OSBitOrAtomic8(UInt32 mask
, volatile UInt8
* value
)
231 return os_atomic_or_orig(value
, mask
, relaxed
);
235 OSBitXorAtomic8(UInt32 mask
, volatile UInt8
* value
)
237 return os_atomic_xor_orig(value
, mask
, relaxed
);
241 OSIncrementAtomic16(volatile SInt16
* value
)
243 return OSAddAtomic16(1, value
);
247 OSDecrementAtomic16(volatile SInt16
* value
)
249 return OSAddAtomic16(-1, value
);
253 OSBitAndAtomic16(UInt32 mask
, volatile UInt16
* value
)
255 return os_atomic_and_orig(value
, mask
, relaxed
);
259 OSBitOrAtomic16(UInt32 mask
, volatile UInt16
* value
)
261 return os_atomic_or_orig(value
, mask
, relaxed
);
265 OSBitXorAtomic16(UInt32 mask
, volatile UInt16
* value
)
267 return os_atomic_xor_orig(value
, mask
, relaxed
);