]> git.saurik.com Git - apple/libdispatch.git/blob - src/shims/atomic.h
libdispatch-703.50.37.tar.gz
[apple/libdispatch.git] / src / shims / atomic.h
1 /*
2 * Copyright (c) 2008-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_APACHE_LICENSE_HEADER_START@
5 *
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
9 *
10 * http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 *
18 * @APPLE_APACHE_LICENSE_HEADER_END@
19 */
20
21 /*
22 * IMPORTANT: This header file describes INTERNAL interfaces to libdispatch
23 * which are subject to change in future releases of Mac OS X. Any applications
24 * relying on these interfaces WILL break.
25 */
26
27 #ifndef __DISPATCH_SHIMS_ATOMIC__
28 #define __DISPATCH_SHIMS_ATOMIC__
29
30 #if !__has_extension(c_atomic) || \
31 !__has_extension(c_generic_selections) || \
32 !__has_include(<stdatomic.h>)
33 #error libdispatch requires C11 with <stdatomic.h> and generic selections
34 #endif
35
36 #include <stdatomic.h>
37
38 #define memory_order_ordered memory_order_seq_cst
39 #define memory_order_dependency memory_order_acquire
40
41 #if __has_extension(c_generic_selections) && __has_extension(c_atomic)
42 #define os_atomic(type) _Atomic(type)
43 #else
44 #define os_atomic(type) type volatile
45 #endif
46
47 #define _os_atomic_type_cases(type, expr) \
48 type *: expr, \
49 type volatile *: expr, \
50 _Atomic(type) *: expr, \
51 _Atomic(type) volatile *: expr
52
53 #define _os_atomic_basetypeof(p) \
54 typeof(*_Generic((p), \
55 _os_atomic_type_cases(char, (char *)(p)), \
56 _os_atomic_type_cases(signed char, (signed char *)(p)), \
57 _os_atomic_type_cases(unsigned char, (unsigned char *)(p)), \
58 _os_atomic_type_cases(short, (short *)(p)), \
59 _os_atomic_type_cases(unsigned short, (unsigned short *)(p)), \
60 _os_atomic_type_cases(int, (int *)(p)), \
61 _os_atomic_type_cases(unsigned int, (unsigned int *)(p)), \
62 _os_atomic_type_cases(long, (long *)(p)), \
63 _os_atomic_type_cases(unsigned long, (unsigned long *)(p)), \
64 _os_atomic_type_cases(long long, (long long *)(p)), \
65 _os_atomic_type_cases(unsigned long long, (unsigned long long *)(p)), \
66 _os_atomic_type_cases(void *, (void **)(p)), \
67 _os_atomic_type_cases(const void *, (const void **)(p)), \
68 default: (void**)(p)))
69
70 #define _os_atomic_c11_atomic(p) \
71 _Generic((p), \
72 _os_atomic_type_cases(char, (_Atomic(char)*)(p)), \
73 _os_atomic_type_cases(signed char, (_Atomic(signed char)*)(p)), \
74 _os_atomic_type_cases(unsigned char, (_Atomic(unsigned char)*)(p)), \
75 _os_atomic_type_cases(short, (_Atomic(short)*)(p)), \
76 _os_atomic_type_cases(unsigned short, (_Atomic(unsigned short)*)(p)), \
77 _os_atomic_type_cases(int, (_Atomic(int)*)(p)), \
78 _os_atomic_type_cases(unsigned int, (_Atomic(unsigned int)*)(p)), \
79 _os_atomic_type_cases(long, (_Atomic(long)*)(p)), \
80 _os_atomic_type_cases(unsigned long, (_Atomic(unsigned long)*)(p)), \
81 _os_atomic_type_cases(long long, (_Atomic(long long)*)(p)), \
82 _os_atomic_type_cases(unsigned long long, (_Atomic(unsigned long long)*)(p)), \
83 _os_atomic_type_cases(void *, (_Atomic(void*)*)(p)), \
84 _os_atomic_type_cases(const void *, (_Atomic(const void*)*)(p)), \
85 default: (_Atomic(void*)*)(p))
86
87 #define os_atomic_thread_fence(m) atomic_thread_fence(memory_order_##m)
88 // see comment in dispatch_once.c
89 #define os_atomic_maximally_synchronizing_barrier() \
90 atomic_thread_fence(memory_order_seq_cst)
91
92 #define os_atomic_load(p, m) \
93 ({ _os_atomic_basetypeof(p) _r = \
94 atomic_load_explicit(_os_atomic_c11_atomic(p), \
95 memory_order_##m); (typeof(*(p)))_r; })
96 #define os_atomic_store(p, v, m) \
97 ({ _os_atomic_basetypeof(p) _v = (v); \
98 atomic_store_explicit(_os_atomic_c11_atomic(p), _v, \
99 memory_order_##m); })
100 #define os_atomic_xchg(p, v, m) \
101 ({ _os_atomic_basetypeof(p) _v = (v), _r = \
102 atomic_exchange_explicit(_os_atomic_c11_atomic(p), _v, \
103 memory_order_##m); (typeof(*(p)))_r; })
104 #define os_atomic_cmpxchg(p, e, v, m) \
105 ({ _os_atomic_basetypeof(p) _v = (v), _r = (e); \
106 atomic_compare_exchange_strong_explicit(_os_atomic_c11_atomic(p), \
107 &_r, _v, memory_order_##m, \
108 memory_order_relaxed); })
109 #define os_atomic_cmpxchgv(p, e, v, g, m) \
110 ({ _os_atomic_basetypeof(p) _v = (v), _r = (e); _Bool _b = \
111 atomic_compare_exchange_strong_explicit(_os_atomic_c11_atomic(p), \
112 &_r, _v, memory_order_##m, \
113 memory_order_relaxed); *(g) = (typeof(*(p)))_r; _b; })
114 #define os_atomic_cmpxchgvw(p, e, v, g, m) \
115 ({ _os_atomic_basetypeof(p) _v = (v), _r = (e); _Bool _b = \
116 atomic_compare_exchange_weak_explicit(_os_atomic_c11_atomic(p), \
117 &_r, _v, memory_order_##m, \
118 memory_order_relaxed); *(g) = (typeof(*(p)))_r; _b; })
119
120 #define _os_atomic_c11_op(p, v, m, o, op) \
121 ({ _os_atomic_basetypeof(p) _v = (v), _r = \
122 atomic_fetch_##o##_explicit(_os_atomic_c11_atomic(p), _v, \
123 memory_order_##m); (typeof(*(p)))(_r op _v); })
124 #define _os_atomic_c11_op_orig(p, v, m, o, op) \
125 ({ _os_atomic_basetypeof(p) _v = (v), _r = \
126 atomic_fetch_##o##_explicit(_os_atomic_c11_atomic(p), _v, \
127 memory_order_##m); (typeof(*(p)))_r; })
128 #define os_atomic_add(p, v, m) \
129 _os_atomic_c11_op((p), (v), m, add, +)
130 #define os_atomic_add_orig(p, v, m) \
131 _os_atomic_c11_op_orig((p), (v), m, add, +)
132 #define os_atomic_sub(p, v, m) \
133 _os_atomic_c11_op((p), (v), m, sub, -)
134 #define os_atomic_sub_orig(p, v, m) \
135 _os_atomic_c11_op_orig((p), (v), m, sub, -)
136 #define os_atomic_and(p, v, m) \
137 _os_atomic_c11_op((p), (v), m, and, &)
138 #define os_atomic_and_orig(p, v, m) \
139 _os_atomic_c11_op_orig((p), (v), m, and, &)
140 #define os_atomic_or(p, v, m) \
141 _os_atomic_c11_op((p), (v), m, or, |)
142 #define os_atomic_or_orig(p, v, m) \
143 _os_atomic_c11_op_orig((p), (v), m, or, |)
144 #define os_atomic_xor(p, v, m) \
145 _os_atomic_c11_op((p), (v), m, xor, ^)
146 #define os_atomic_xor_orig(p, v, m) \
147 _os_atomic_c11_op_orig((p), (v), m, xor, ^)
148
149 #define os_atomic_force_dependency_on(p, e) (p)
150 #define os_atomic_load_with_dependency_on(p, e) \
151 os_atomic_load(os_atomic_force_dependency_on(p, e), relaxed)
152 #define os_atomic_load_with_dependency_on2o(p, f, e) \
153 os_atomic_load_with_dependency_on(&(p)->f, e)
154
155 #define os_atomic_rmw_loop(p, ov, nv, m, ...) ({ \
156 bool _result = false; \
157 typeof(p) _p = (p); \
158 ov = os_atomic_load(_p, relaxed); \
159 do { \
160 __VA_ARGS__; \
161 _result = os_atomic_cmpxchgvw(_p, ov, nv, &ov, m); \
162 } while (os_unlikely(!_result)); \
163 _result; \
164 })
165 #define os_atomic_rmw_loop2o(p, f, ov, nv, m, ...) \
166 os_atomic_rmw_loop(&(p)->f, ov, nv, m, __VA_ARGS__)
167 #define os_atomic_rmw_loop_give_up_with_fence(m, expr) \
168 ({ os_atomic_thread_fence(m); expr; __builtin_unreachable(); })
169 #define os_atomic_rmw_loop_give_up(expr) \
170 os_atomic_rmw_loop_give_up_with_fence(relaxed, expr)
171
172 #define os_atomic_load2o(p, f, m) \
173 os_atomic_load(&(p)->f, m)
174 #define os_atomic_store2o(p, f, v, m) \
175 os_atomic_store(&(p)->f, (v), m)
176 #define os_atomic_xchg2o(p, f, v, m) \
177 os_atomic_xchg(&(p)->f, (v), m)
178 #define os_atomic_cmpxchg2o(p, f, e, v, m) \
179 os_atomic_cmpxchg(&(p)->f, (e), (v), m)
180 #define os_atomic_cmpxchgv2o(p, f, e, v, g, m) \
181 os_atomic_cmpxchgv(&(p)->f, (e), (v), (g), m)
182 #define os_atomic_cmpxchgvw2o(p, f, e, v, g, m) \
183 os_atomic_cmpxchgvw(&(p)->f, (e), (v), (g), m)
184 #define os_atomic_add2o(p, f, v, m) \
185 os_atomic_add(&(p)->f, (v), m)
186 #define os_atomic_add_orig2o(p, f, v, m) \
187 os_atomic_add_orig(&(p)->f, (v), m)
188 #define os_atomic_sub2o(p, f, v, m) \
189 os_atomic_sub(&(p)->f, (v), m)
190 #define os_atomic_sub_orig2o(p, f, v, m) \
191 os_atomic_sub_orig(&(p)->f, (v), m)
192 #define os_atomic_and2o(p, f, v, m) \
193 os_atomic_and(&(p)->f, (v), m)
194 #define os_atomic_and_orig2o(p, f, v, m) \
195 os_atomic_and_orig(&(p)->f, (v), m)
196 #define os_atomic_or2o(p, f, v, m) \
197 os_atomic_or(&(p)->f, (v), m)
198 #define os_atomic_or_orig2o(p, f, v, m) \
199 os_atomic_or_orig(&(p)->f, (v), m)
200 #define os_atomic_xor2o(p, f, v, m) \
201 os_atomic_xor(&(p)->f, (v), m)
202 #define os_atomic_xor_orig2o(p, f, v, m) \
203 os_atomic_xor_orig(&(p)->f, (v), m)
204
205 #define os_atomic_inc(p, m) \
206 os_atomic_add((p), 1, m)
207 #define os_atomic_inc_orig(p, m) \
208 os_atomic_add_orig((p), 1, m)
209 #define os_atomic_inc2o(p, f, m) \
210 os_atomic_add2o(p, f, 1, m)
211 #define os_atomic_inc_orig2o(p, f, m) \
212 os_atomic_add_orig2o(p, f, 1, m)
213 #define os_atomic_dec(p, m) \
214 os_atomic_sub((p), 1, m)
215 #define os_atomic_dec_orig(p, m) \
216 os_atomic_sub_orig((p), 1, m)
217 #define os_atomic_dec2o(p, f, m) \
218 os_atomic_sub2o(p, f, 1, m)
219 #define os_atomic_dec_orig2o(p, f, m) \
220 os_atomic_sub_orig2o(p, f, 1, m)
221
222 #if defined(__x86_64__) || defined(__i386__)
223 #undef os_atomic_maximally_synchronizing_barrier
224 #ifdef __LP64__
225 #define os_atomic_maximally_synchronizing_barrier() \
226 ({ unsigned long _clbr; __asm__ __volatile__( \
227 "cpuid" \
228 : "=a" (_clbr) : "0" (0) : "rbx", "rcx", "rdx", "cc", "memory"); })
229 #else
230 #ifdef __llvm__
231 #define os_atomic_maximally_synchronizing_barrier() \
232 ({ unsigned long _clbr; __asm__ __volatile__( \
233 "cpuid" \
234 : "=a" (_clbr) : "0" (0) : "ebx", "ecx", "edx", "cc", "memory"); })
235 #else // gcc does not allow inline i386 asm to clobber ebx
236 #define os_atomic_maximally_synchronizing_barrier() \
237 ({ unsigned long _clbr; __asm__ __volatile__( \
238 "pushl %%ebx\n\t" \
239 "cpuid\n\t" \
240 "popl %%ebx" \
241 : "=a" (_clbr) : "0" (0) : "ecx", "edx", "cc", "memory"); })
242 #endif
243 #endif
244 #endif // defined(__x86_64__) || defined(__i386__)
245
246 #endif // __DISPATCH_SHIMS_ATOMIC__