2 * Copyright (c) 2008-2013 Apple Inc. All rights reserved.
4 * @APPLE_APACHE_LICENSE_HEADER_START@
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
10 * http://www.apache.org/licenses/LICENSE-2.0
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
18 * @APPLE_APACHE_LICENSE_HEADER_END@
21 #ifndef __OS_INTERNAL_ATOMIC__
22 #define __OS_INTERNAL_ATOMIC__
24 #ifndef __OS_EXPOSE_INTERNALS_INDIRECT__
26 * Use c11 <stdatomic.h> or c++11 std::atomic from <atomic> instead
28 * XXX /!\ WARNING /!\ XXX
30 * This header file describes INTERNAL interfaces to libplatform used by other
31 * libsystem targets, which are subject to change in future releases of OS X
32 * and iOS. Any applications relying on these interfaces WILL break.
34 * If you are not a libsystem target, you should NOT EVER use these headers.
37 * XXX /!\ WARNING /!\ XXX
39 #error "Please #include <os/internal/internal_shared.h> instead of this file directly."
42 // generate error during codegen
43 #define _os_atomic_unimplemented() \
44 ({ __asm__(".err unimplemented"); })
47 #pragma mark memory_order
49 typedef enum _os_atomic_memory_order
{
50 _os_atomic_memory_order_relaxed
,
51 _os_atomic_memory_order_consume
,
52 _os_atomic_memory_order_acquire
,
53 _os_atomic_memory_order_release
,
54 _os_atomic_memory_order_acq_rel
,
55 _os_atomic_memory_order_seq_cst
,
56 _os_atomic_memory_order_ordered
,
57 _os_atomic_memory_order_dependency
,
58 } _os_atomic_memory_order
;
62 #define os_atomic_memory_order_relaxed _os_atomic_memory_order_relaxed
63 #define os_atomic_memory_order_acquire _os_atomic_memory_order_acquire
64 #define os_atomic_memory_order_release _os_atomic_memory_order_release
65 #define os_atomic_memory_order_acq_rel _os_atomic_memory_order_acq_rel
66 #define os_atomic_memory_order_seq_cst _os_atomic_memory_order_seq_cst
67 #define os_atomic_memory_order_ordered _os_atomic_memory_order_seq_cst
68 #define os_atomic_memory_order_dependency _os_atomic_memory_order_acquire
72 #define os_atomic_memory_order_relaxed _os_atomic_memory_order_relaxed
73 #define os_atomic_memory_order_acquire _os_atomic_memory_order_relaxed
74 #define os_atomic_memory_order_release _os_atomic_memory_order_relaxed
75 #define os_atomic_memory_order_acq_rel _os_atomic_memory_order_relaxed
76 #define os_atomic_memory_order_seq_cst _os_atomic_memory_order_relaxed
77 #define os_atomic_memory_order_ordered _os_atomic_memory_order_relaxed
78 #define os_atomic_memory_order_dependency _os_atomic_memory_order_relaxed
80 #endif // OS_ATOMIC_UP
85 #if !__has_extension(c_atomic)
86 #error "Please use a C11 compiler"
89 #define os_atomic(type) type _Atomic
91 #define _os_atomic_c11_atomic(p) \
92 ((typeof(*(p)) _Atomic *)(p))
94 // This removes the _Atomic and volatile qualifiers on the type of *p
95 #define _os_atomic_basetypeof(p) \
96 typeof(__c11_atomic_load(_os_atomic_c11_atomic(p), \
97 _os_atomic_memory_order_relaxed))
99 #define _os_atomic_baseptr(p) \
100 ((_os_atomic_basetypeof(p) *)(p))
102 #define _os_atomic_barrier(m) \
103 __c11_atomic_thread_fence(os_atomic_memory_order_##m)
104 #define os_atomic_load(p, m) \
105 __c11_atomic_load(_os_atomic_c11_atomic(p), os_atomic_memory_order_##m)
106 #define os_atomic_store(p, v, m) \
107 __c11_atomic_store(_os_atomic_c11_atomic(p), v, \
108 os_atomic_memory_order_##m)
109 #define os_atomic_xchg(p, v, m) \
110 __c11_atomic_exchange(_os_atomic_c11_atomic(p), v, \
111 os_atomic_memory_order_##m)
112 #define os_atomic_cmpxchg(p, e, v, m) \
113 ({ _os_atomic_basetypeof(p) _r = (e); \
114 __c11_atomic_compare_exchange_strong(_os_atomic_c11_atomic(p), \
115 &_r, v, os_atomic_memory_order_##m, os_atomic_memory_order_relaxed); })
116 #define os_atomic_cmpxchgv(p, e, v, g, m) \
117 ({ _os_atomic_basetypeof(p) _r = (e); _Bool _b = \
118 __c11_atomic_compare_exchange_strong(_os_atomic_c11_atomic(p), \
119 &_r, v, os_atomic_memory_order_##m, os_atomic_memory_order_relaxed); \
121 #define os_atomic_cmpxchgvw(p, e, v, g, m) \
122 ({ _os_atomic_basetypeof(p) _r = (e); _Bool _b = \
123 __c11_atomic_compare_exchange_weak(_os_atomic_c11_atomic(p), \
124 &_r, v, os_atomic_memory_order_##m, os_atomic_memory_order_relaxed); \
126 #define _os_atomic_c11_op(p, v, m, o, op) \
127 ({ _os_atomic_basetypeof(p) _v = (v), _r = \
128 __c11_atomic_fetch_##o(_os_atomic_c11_atomic(p), _v, \
129 os_atomic_memory_order_##m); (typeof(_r))(_r op _v); })
130 #define _os_atomic_c11_op_orig(p, v, m, o, op) \
131 __c11_atomic_fetch_##o(_os_atomic_c11_atomic(p), v, \
132 os_atomic_memory_order_##m)
134 #define os_atomic_add(p, v, m) \
135 _os_atomic_c11_op((p), (v), m, add, +)
136 #define os_atomic_add_orig(p, v, m) \
137 _os_atomic_c11_op_orig((p), (v), m, add, +)
138 #define os_atomic_sub(p, v, m) \
139 _os_atomic_c11_op((p), (v), m, sub, -)
140 #define os_atomic_sub_orig(p, v, m) \
141 _os_atomic_c11_op_orig((p), (v), m, sub, -)
142 #define os_atomic_and(p, v, m) \
143 _os_atomic_c11_op((p), (v), m, and, &)
144 #define os_atomic_and_orig(p, v, m) \
145 _os_atomic_c11_op_orig((p), (v), m, and, &)
146 #define os_atomic_or(p, v, m) \
147 _os_atomic_c11_op((p), (v), m, or, |)
148 #define os_atomic_or_orig(p, v, m) \
149 _os_atomic_c11_op_orig((p), (v), m, or, |)
150 #define os_atomic_xor(p, v, m) \
151 _os_atomic_c11_op((p), (v), m, xor, ^)
152 #define os_atomic_xor_orig(p, v, m) \
153 _os_atomic_c11_op_orig((p), (v), m, xor, ^)
155 #define os_atomic_force_dependency_on(p, e) (p)
156 #define os_atomic_load_with_dependency_on(p, e) \
157 os_atomic_load(os_atomic_force_dependency_on(p, e), relaxed)
158 #define os_atomic_load_with_dependency_on2o(p, f, e) \
159 os_atomic_load_with_dependency_on(&(p)->f, e)
164 #define os_atomic_thread_fence(m) _os_atomic_barrier(m)
165 // see comment in os_once.c
166 #define os_atomic_maximally_synchronizing_barrier() \
167 _os_atomic_barrier(seq_cst)
169 #define os_atomic_load2o(p, f, m) \
170 os_atomic_load(&(p)->f, m)
171 #define os_atomic_store2o(p, f, v, m) \
172 os_atomic_store(&(p)->f, (v), m)
173 #define os_atomic_xchg2o(p, f, v, m) \
174 os_atomic_xchg(&(p)->f, (v), m)
175 #define os_atomic_cmpxchg2o(p, f, e, v, m) \
176 os_atomic_cmpxchg(&(p)->f, (e), (v), m)
177 #define os_atomic_cmpxchgv2o(p, f, e, v, g, m) \
178 os_atomic_cmpxchgv(&(p)->f, (e), (v), (g), m)
179 #define os_atomic_cmpxchgvw2o(p, f, e, v, g, m) \
180 os_atomic_cmpxchgvw(&(p)->f, (e), (v), (g), m)
181 #define os_atomic_add2o(p, f, v, m) \
182 os_atomic_add(&(p)->f, (v), m)
183 #define os_atomic_add_orig2o(p, f, v, m) \
184 os_atomic_add_orig(&(p)->f, (v), m)
185 #define os_atomic_sub2o(p, f, v, m) \
186 os_atomic_sub(&(p)->f, (v), m)
187 #define os_atomic_sub_orig2o(p, f, v, m) \
188 os_atomic_sub_orig(&(p)->f, (v), m)
189 #define os_atomic_and2o(p, f, v, m) \
190 os_atomic_and(&(p)->f, (v), m)
191 #define os_atomic_and_orig2o(p, f, v, m) \
192 os_atomic_and_orig(&(p)->f, (v), m)
193 #define os_atomic_or2o(p, f, v, m) \
194 os_atomic_or(&(p)->f, (v), m)
195 #define os_atomic_or_orig2o(p, f, v, m) \
196 os_atomic_or_orig(&(p)->f, (v), m)
197 #define os_atomic_xor2o(p, f, v, m) \
198 os_atomic_xor(&(p)->f, (v), m)
199 #define os_atomic_xor_orig2o(p, f, v, m) \
200 os_atomic_xor_orig(&(p)->f, (v), m)
202 #define os_atomic_inc(p, m) \
203 os_atomic_add((p), 1, m)
204 #define os_atomic_inc_orig(p, m) \
205 os_atomic_add_orig((p), 1, m)
206 #define os_atomic_inc2o(p, f, m) \
207 os_atomic_add2o(p, f, 1, m)
208 #define os_atomic_inc_orig2o(p, f, m) \
209 os_atomic_add_orig2o(p, f, 1, m)
210 #define os_atomic_dec(p, m) \
211 os_atomic_sub((p), 1, m)
212 #define os_atomic_dec_orig(p, m) \
213 os_atomic_sub_orig((p), 1, m)
214 #define os_atomic_dec2o(p, f, m) \
215 os_atomic_sub2o(p, f, 1, m)
216 #define os_atomic_dec_orig2o(p, f, m) \
217 os_atomic_sub_orig2o(p, f, 1, m)
219 #define os_atomic_rmw_loop(p, ov, nv, m, ...) ({ \
220 bool _result = false; \
221 typeof(p) _p = (p); \
222 ov = os_atomic_load(_p, relaxed); \
225 _result = os_atomic_cmpxchgvw(_p, ov, nv, &ov, m); \
226 } while (os_unlikely(!_result)); \
229 #define os_atomic_rmw_loop2o(p, f, ov, nv, m, ...) \
230 os_atomic_rmw_loop(&(p)->f, ov, nv, m, __VA_ARGS__)
231 #define os_atomic_rmw_loop_give_up_with_fence(m, expr) \
232 ({ os_atomic_thread_fence(m); expr; __builtin_unreachable(); })
233 #define os_atomic_rmw_loop_give_up(expr) \
234 os_atomic_rmw_loop_give_up_with_fence(relaxed, expr)
236 #define os_atomic_tsx_xacq_cmpxchgv(p, e, v, g) \
237 os_atomic_cmpxchgv((p), (e), (v), (g), acquire)
238 #define os_atomic_tsx_xrel_store(p, v) \
239 os_atomic_store(p, v, release)
240 #define os_atomic_tsx_xacq_cmpxchgv2o(p, f, e, v, g) \
241 os_atomic_tsx_xacq_cmpxchgv(&(p)->f, (e), (v), (g))
242 #define os_atomic_tsx_xrel_store2o(p, f, v) \
243 os_atomic_tsx_xrel_store(&(p)->f, (v))
245 #if defined(__x86_64__) || defined(__i386__)
249 #undef os_atomic_maximally_synchronizing_barrier
251 #define os_atomic_maximally_synchronizing_barrier() \
252 ({ unsigned long _clbr; __asm__ __volatile__( \
254 : "=a" (_clbr) : "0" (0) : "rbx", "rcx", "rdx", "cc", "memory"); })
257 #define os_atomic_maximally_synchronizing_barrier() \
258 ({ unsigned long _clbr; __asm__ __volatile__( \
260 : "=a" (_clbr) : "0" (0) : "ebx", "ecx", "edx", "cc", "memory"); })
261 #else // gcc does not allow inline i386 asm to clobber ebx
262 #define os_atomic_maximally_synchronizing_barrier() \
263 ({ unsigned long _clbr; __asm__ __volatile__( \
267 : "=a" (_clbr) : "0" (0) : "ecx", "edx", "cc", "memory"); })
275 #endif // __OS_EXPOSE_INTERNALS_INDIRECT__
277 #endif // __OS_ATOMIC__