]> git.saurik.com Git - apple/libplatform.git/blob - include/os/internal/atomic.h
9b3294d24c406baef94cd0108704e1963fab919f
[apple/libplatform.git] / include / os / internal / atomic.h
1 /*
2 * Copyright (c) 2008-2013 Apple Inc. All rights reserved.
3 *
4 * @APPLE_APACHE_LICENSE_HEADER_START@
5 *
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
9 *
10 * http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 *
18 * @APPLE_APACHE_LICENSE_HEADER_END@
19 */
20
21 #ifndef __OS_INTERNAL_ATOMIC__
22 #define __OS_INTERNAL_ATOMIC__
23
24 #ifndef __OS_EXPOSE_INTERNALS_INDIRECT__
25 /*
26 * Use c11 <stdatomic.h> or c++11 std::atomic from <atomic> instead
27 *
28 * XXX /!\ WARNING /!\ XXX
29 *
30 * This header file describes INTERNAL interfaces to libplatform used by other
31 * libsystem targets, which are subject to change in future releases of OS X
32 * and iOS. Any applications relying on these interfaces WILL break.
33 *
34 * If you are not a libsystem target, you should NOT EVER use these headers.
35 * Not even a little.
36 *
37 * XXX /!\ WARNING /!\ XXX
38 */
39 #error "Please #include <os/internal/internal_shared.h> instead of this file directly."
40 #else
41
42 // generate error during codegen
43 #define _os_atomic_unimplemented() \
44 ({ __asm__(".err unimplemented"); })
45
46 #pragma mark -
47 #pragma mark memory_order
48
49 typedef enum _os_atomic_memory_order {
50 _os_atomic_memory_order_relaxed,
51 _os_atomic_memory_order_consume,
52 _os_atomic_memory_order_acquire,
53 _os_atomic_memory_order_release,
54 _os_atomic_memory_order_acq_rel,
55 _os_atomic_memory_order_seq_cst,
56 _os_atomic_memory_order_ordered,
57 _os_atomic_memory_order_dependency,
58 } _os_atomic_memory_order;
59
60 #if !OS_ATOMIC_UP
61
62 #define os_atomic_memory_order_relaxed _os_atomic_memory_order_relaxed
63 #define os_atomic_memory_order_acquire _os_atomic_memory_order_acquire
64 #define os_atomic_memory_order_release _os_atomic_memory_order_release
65 #define os_atomic_memory_order_acq_rel _os_atomic_memory_order_acq_rel
66 #define os_atomic_memory_order_seq_cst _os_atomic_memory_order_seq_cst
67 #define os_atomic_memory_order_ordered _os_atomic_memory_order_seq_cst
68 #define os_atomic_memory_order_dependency _os_atomic_memory_order_acquire
69
70 #else // OS_ATOMIC_UP
71
72 #define os_atomic_memory_order_relaxed _os_atomic_memory_order_relaxed
73 #define os_atomic_memory_order_acquire _os_atomic_memory_order_relaxed
74 #define os_atomic_memory_order_release _os_atomic_memory_order_relaxed
75 #define os_atomic_memory_order_acq_rel _os_atomic_memory_order_relaxed
76 #define os_atomic_memory_order_seq_cst _os_atomic_memory_order_relaxed
77 #define os_atomic_memory_order_ordered _os_atomic_memory_order_relaxed
78 #define os_atomic_memory_order_dependency _os_atomic_memory_order_relaxed
79
80 #endif // OS_ATOMIC_UP
81
82 #pragma mark -
83 #pragma mark c11
84
85 #if !__has_extension(c_atomic)
86 #error "Please use a C11 compiler"
87 #endif
88
89 #define os_atomic(type) type _Atomic
90
91 #define _os_atomic_c11_atomic(p) \
92 ((typeof(*(p)) _Atomic *)(p))
93
94 // This removes the _Atomic and volatile qualifiers on the type of *p
95 #define _os_atomic_basetypeof(p) \
96 typeof(__c11_atomic_load(_os_atomic_c11_atomic(p), \
97 _os_atomic_memory_order_relaxed))
98
99 #define _os_atomic_baseptr(p) \
100 ((_os_atomic_basetypeof(p) *)(p))
101
102 #define _os_atomic_barrier(m) \
103 __c11_atomic_thread_fence(os_atomic_memory_order_##m)
104 #define os_atomic_load(p, m) \
105 __c11_atomic_load(_os_atomic_c11_atomic(p), os_atomic_memory_order_##m)
106 #define os_atomic_store(p, v, m) \
107 __c11_atomic_store(_os_atomic_c11_atomic(p), v, \
108 os_atomic_memory_order_##m)
109 #define os_atomic_xchg(p, v, m) \
110 __c11_atomic_exchange(_os_atomic_c11_atomic(p), v, \
111 os_atomic_memory_order_##m)
112 #define os_atomic_cmpxchg(p, e, v, m) \
113 ({ _os_atomic_basetypeof(p) _r = (e); \
114 __c11_atomic_compare_exchange_strong(_os_atomic_c11_atomic(p), \
115 &_r, v, os_atomic_memory_order_##m, os_atomic_memory_order_relaxed); })
116 #define os_atomic_cmpxchgv(p, e, v, g, m) \
117 ({ _os_atomic_basetypeof(p) _r = (e); _Bool _b = \
118 __c11_atomic_compare_exchange_strong(_os_atomic_c11_atomic(p), \
119 &_r, v, os_atomic_memory_order_##m, os_atomic_memory_order_relaxed); \
120 *(g) = _r; _b; })
121 #define os_atomic_cmpxchgvw(p, e, v, g, m) \
122 ({ _os_atomic_basetypeof(p) _r = (e); _Bool _b = \
123 __c11_atomic_compare_exchange_weak(_os_atomic_c11_atomic(p), \
124 &_r, v, os_atomic_memory_order_##m, os_atomic_memory_order_relaxed); \
125 *(g) = _r; _b; })
126 #define _os_atomic_c11_op(p, v, m, o, op) \
127 ({ _os_atomic_basetypeof(p) _v = (v), _r = \
128 __c11_atomic_fetch_##o(_os_atomic_c11_atomic(p), _v, \
129 os_atomic_memory_order_##m); (typeof(_r))(_r op _v); })
130 #define _os_atomic_c11_op_orig(p, v, m, o, op) \
131 __c11_atomic_fetch_##o(_os_atomic_c11_atomic(p), v, \
132 os_atomic_memory_order_##m)
133
134 #define os_atomic_add(p, v, m) \
135 _os_atomic_c11_op((p), (v), m, add, +)
136 #define os_atomic_add_orig(p, v, m) \
137 _os_atomic_c11_op_orig((p), (v), m, add, +)
138 #define os_atomic_sub(p, v, m) \
139 _os_atomic_c11_op((p), (v), m, sub, -)
140 #define os_atomic_sub_orig(p, v, m) \
141 _os_atomic_c11_op_orig((p), (v), m, sub, -)
142 #define os_atomic_and(p, v, m) \
143 _os_atomic_c11_op((p), (v), m, and, &)
144 #define os_atomic_and_orig(p, v, m) \
145 _os_atomic_c11_op_orig((p), (v), m, and, &)
146 #define os_atomic_or(p, v, m) \
147 _os_atomic_c11_op((p), (v), m, or, |)
148 #define os_atomic_or_orig(p, v, m) \
149 _os_atomic_c11_op_orig((p), (v), m, or, |)
150 #define os_atomic_xor(p, v, m) \
151 _os_atomic_c11_op((p), (v), m, xor, ^)
152 #define os_atomic_xor_orig(p, v, m) \
153 _os_atomic_c11_op_orig((p), (v), m, xor, ^)
154
155 #define os_atomic_force_dependency_on(p, e) (p)
156 #define os_atomic_load_with_dependency_on(p, e) \
157 os_atomic_load(os_atomic_force_dependency_on(p, e), relaxed)
158 #define os_atomic_load_with_dependency_on2o(p, f, e) \
159 os_atomic_load_with_dependency_on(&(p)->f, e)
160
161 #pragma mark -
162 #pragma mark generic
163
164 #define os_atomic_thread_fence(m) _os_atomic_barrier(m)
165 // see comment in os_once.c
166 #define os_atomic_maximally_synchronizing_barrier() \
167 _os_atomic_barrier(seq_cst)
168
169 #define os_atomic_load2o(p, f, m) \
170 os_atomic_load(&(p)->f, m)
171 #define os_atomic_store2o(p, f, v, m) \
172 os_atomic_store(&(p)->f, (v), m)
173 #define os_atomic_xchg2o(p, f, v, m) \
174 os_atomic_xchg(&(p)->f, (v), m)
175 #define os_atomic_cmpxchg2o(p, f, e, v, m) \
176 os_atomic_cmpxchg(&(p)->f, (e), (v), m)
177 #define os_atomic_cmpxchgv2o(p, f, e, v, g, m) \
178 os_atomic_cmpxchgv(&(p)->f, (e), (v), (g), m)
179 #define os_atomic_cmpxchgvw2o(p, f, e, v, g, m) \
180 os_atomic_cmpxchgvw(&(p)->f, (e), (v), (g), m)
181 #define os_atomic_add2o(p, f, v, m) \
182 os_atomic_add(&(p)->f, (v), m)
183 #define os_atomic_add_orig2o(p, f, v, m) \
184 os_atomic_add_orig(&(p)->f, (v), m)
185 #define os_atomic_sub2o(p, f, v, m) \
186 os_atomic_sub(&(p)->f, (v), m)
187 #define os_atomic_sub_orig2o(p, f, v, m) \
188 os_atomic_sub_orig(&(p)->f, (v), m)
189 #define os_atomic_and2o(p, f, v, m) \
190 os_atomic_and(&(p)->f, (v), m)
191 #define os_atomic_and_orig2o(p, f, v, m) \
192 os_atomic_and_orig(&(p)->f, (v), m)
193 #define os_atomic_or2o(p, f, v, m) \
194 os_atomic_or(&(p)->f, (v), m)
195 #define os_atomic_or_orig2o(p, f, v, m) \
196 os_atomic_or_orig(&(p)->f, (v), m)
197 #define os_atomic_xor2o(p, f, v, m) \
198 os_atomic_xor(&(p)->f, (v), m)
199 #define os_atomic_xor_orig2o(p, f, v, m) \
200 os_atomic_xor_orig(&(p)->f, (v), m)
201
202 #define os_atomic_inc(p, m) \
203 os_atomic_add((p), 1, m)
204 #define os_atomic_inc_orig(p, m) \
205 os_atomic_add_orig((p), 1, m)
206 #define os_atomic_inc2o(p, f, m) \
207 os_atomic_add2o(p, f, 1, m)
208 #define os_atomic_inc_orig2o(p, f, m) \
209 os_atomic_add_orig2o(p, f, 1, m)
210 #define os_atomic_dec(p, m) \
211 os_atomic_sub((p), 1, m)
212 #define os_atomic_dec_orig(p, m) \
213 os_atomic_sub_orig((p), 1, m)
214 #define os_atomic_dec2o(p, f, m) \
215 os_atomic_sub2o(p, f, 1, m)
216 #define os_atomic_dec_orig2o(p, f, m) \
217 os_atomic_sub_orig2o(p, f, 1, m)
218
219 #define os_atomic_rmw_loop(p, ov, nv, m, ...) ({ \
220 bool _result = false; \
221 typeof(p) _p = (p); \
222 ov = os_atomic_load(_p, relaxed); \
223 do { \
224 __VA_ARGS__; \
225 _result = os_atomic_cmpxchgvw(_p, ov, nv, &ov, m); \
226 } while (os_unlikely(!_result)); \
227 _result; \
228 })
229 #define os_atomic_rmw_loop2o(p, f, ov, nv, m, ...) \
230 os_atomic_rmw_loop(&(p)->f, ov, nv, m, __VA_ARGS__)
231 #define os_atomic_rmw_loop_give_up_with_fence(m, expr) \
232 ({ os_atomic_thread_fence(m); expr; __builtin_unreachable(); })
233 #define os_atomic_rmw_loop_give_up(expr) \
234 os_atomic_rmw_loop_give_up_with_fence(relaxed, expr)
235
236 #define os_atomic_tsx_xacq_cmpxchgv(p, e, v, g) \
237 os_atomic_cmpxchgv((p), (e), (v), (g), acquire)
238 #define os_atomic_tsx_xrel_store(p, v) \
239 os_atomic_store(p, v, release)
240 #define os_atomic_tsx_xacq_cmpxchgv2o(p, f, e, v, g) \
241 os_atomic_tsx_xacq_cmpxchgv(&(p)->f, (e), (v), (g))
242 #define os_atomic_tsx_xrel_store2o(p, f, v) \
243 os_atomic_tsx_xrel_store(&(p)->f, (v))
244
245 #if defined(__x86_64__) || defined(__i386__)
246 #pragma mark -
247 #pragma mark x86
248
249 #undef os_atomic_maximally_synchronizing_barrier
250 #ifdef __LP64__
251 #define os_atomic_maximally_synchronizing_barrier() \
252 ({ unsigned long _clbr; __asm__ __volatile__( \
253 "cpuid" \
254 : "=a" (_clbr) : "0" (0) : "rbx", "rcx", "rdx", "cc", "memory"); })
255 #else
256 #ifdef __llvm__
257 #define os_atomic_maximally_synchronizing_barrier() \
258 ({ unsigned long _clbr; __asm__ __volatile__( \
259 "cpuid" \
260 : "=a" (_clbr) : "0" (0) : "ebx", "ecx", "edx", "cc", "memory"); })
261 #else // gcc does not allow inline i386 asm to clobber ebx
262 #define os_atomic_maximally_synchronizing_barrier() \
263 ({ unsigned long _clbr; __asm__ __volatile__( \
264 "pushl %%ebx\n\t" \
265 "cpuid\n\t" \
266 "popl %%ebx" \
267 : "=a" (_clbr) : "0" (0) : "ecx", "edx", "cc", "memory"); })
268 #endif
269 #endif
270
271
272 #endif
273
274
275 #endif // __OS_EXPOSE_INTERNALS_INDIRECT__
276
277 #endif // __OS_ATOMIC__