]>
Commit | Line | Data |
---|---|---|
cb323159 A |
1 | /* |
2 | * Copyright (c) 2018 Apple Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | ||
29 | /* | |
30 | * This header provides some gory details to implement the <machine/atomic.h> | |
31 | * interfaces. Nothing in this header should be called directly, no promise is | |
32 | * made to keep this interface stable. | |
33 | */ | |
34 | ||
35 | #ifndef _MACHINE_ATOMIC_H | |
36 | #error "Do not include <machine/atomic_impl.h> directly, use <machine/atomic.h>" | |
37 | #endif | |
38 | ||
39 | #ifndef _MACHINE_ATOMIC_IMPL_H | |
40 | #define _MACHINE_ATOMIC_IMPL_H | |
41 | ||
42 | #include <stdatomic.h> | |
43 | #include <machine/smp.h> | |
44 | ||
45 | static inline int | |
46 | memory_order_has_acquire(enum memory_order ord) | |
47 | { | |
48 | switch (ord) { | |
49 | case memory_order_consume: | |
50 | case memory_order_acquire: | |
51 | case memory_order_acq_rel: | |
52 | case memory_order_seq_cst: | |
53 | return 1; | |
54 | default: | |
55 | return 0; | |
56 | } | |
57 | } | |
58 | ||
59 | static inline int | |
60 | memory_order_has_release(enum memory_order ord) | |
61 | { | |
62 | switch (ord) { | |
63 | case memory_order_release: | |
64 | case memory_order_acq_rel: | |
65 | case memory_order_seq_cst: | |
66 | return 1; | |
67 | default: | |
68 | return 0; | |
69 | } | |
70 | } | |
71 | ||
72 | #if __SMP__ | |
73 | ||
74 | #define memory_order_relaxed_smp memory_order_relaxed | |
75 | #define memory_order_compiler_acquire_smp memory_order_relaxed | |
76 | #define memory_order_compiler_release_smp memory_order_relaxed | |
77 | #define memory_order_compiler_acq_rel_smp memory_order_relaxed | |
78 | #define memory_order_consume_smp memory_order_consume | |
79 | #define memory_order_dependency_smp memory_order_acquire | |
80 | #define memory_order_acquire_smp memory_order_acquire | |
81 | #define memory_order_release_smp memory_order_release | |
82 | #define memory_order_acq_rel_smp memory_order_acq_rel | |
83 | #define memory_order_seq_cst_smp memory_order_seq_cst | |
84 | ||
85 | #else | |
86 | ||
87 | #define memory_order_relaxed_smp memory_order_relaxed | |
88 | #define memory_order_compiler_acquire_smp memory_order_relaxed | |
89 | #define memory_order_compiler_release_smp memory_order_relaxed | |
90 | #define memory_order_compiler_acq_rel_smp memory_order_relaxed | |
91 | #define memory_order_consume_smp memory_order_relaxed | |
92 | #define memory_order_dependency_smp memory_order_relaxed | |
93 | #define memory_order_acquire_smp memory_order_relaxed | |
94 | #define memory_order_release_smp memory_order_relaxed | |
95 | #define memory_order_acq_rel_smp memory_order_relaxed | |
96 | #define memory_order_seq_cst_smp memory_order_relaxed | |
97 | ||
98 | #endif | |
99 | ||
100 | /* | |
101 | * Hack needed for os_compiler_barrier() to work (including with empty argument) | |
102 | */ | |
103 | #define _os_compiler_barrier_relaxed memory_order_relaxed | |
104 | #define _os_compiler_barrier_acquire memory_order_acquire | |
105 | #define _os_compiler_barrier_release memory_order_release | |
106 | #define _os_compiler_barrier_acq_rel memory_order_acq_rel | |
107 | #define _os_compiler_barrier_ memory_order_acq_rel | |
108 | ||
109 | /* | |
110 | * Mapping between compiler barrier/memory orders and: | |
111 | * - compiler barriers before atomics ("rel_barrier") | |
112 | * - compiler barriers after atomics ("acq_barrier") | |
113 | */ | |
114 | #define _os_rel_barrier_relaxed memory_order_relaxed | |
115 | #define _os_rel_barrier_compiler_acquire memory_order_relaxed | |
116 | #define _os_rel_barrier_compiler_release memory_order_release | |
117 | #define _os_rel_barrier_compiler_acq_rel memory_order_release | |
118 | #define _os_rel_barrier_consume memory_order_relaxed | |
119 | #define _os_rel_barrier_dependency memory_order_relaxed | |
120 | #define _os_rel_barrier_acquire memory_order_relaxed | |
121 | #define _os_rel_barrier_release memory_order_release | |
122 | #define _os_rel_barrier_acq_rel memory_order_release | |
123 | #define _os_rel_barrier_seq_cst memory_order_release | |
124 | ||
125 | #define _os_acq_barrier_relaxed memory_order_relaxed | |
126 | #define _os_acq_barrier_compiler_acquire memory_order_acquire | |
127 | #define _os_acq_barrier_compiler_release memory_order_relaxed | |
128 | #define _os_acq_barrier_compiler_acq_rel memory_order_acquire | |
129 | #define _os_acq_barrier_consume memory_order_acquire | |
130 | #define _os_acq_barrier_dependency memory_order_acquire | |
131 | #define _os_acq_barrier_acquire memory_order_acquire | |
132 | #define _os_acq_barrier_release memory_order_relaxed | |
133 | #define _os_acq_barrier_acq_rel memory_order_acquire | |
134 | #define _os_acq_barrier_seq_cst memory_order_acquire | |
135 | ||
136 | #define _os_compiler_barrier_before_atomic(m) \ | |
137 | atomic_signal_fence(_os_rel_barrier_##m) | |
138 | #define _os_compiler_barrier_after_atomic(m) \ | |
139 | atomic_signal_fence(_os_acq_barrier_##m) | |
140 | ||
141 | /* | |
142 | * Mapping between compiler barrier/memmory orders and: | |
143 | * - memory fences before atomics ("rel_fence") | |
144 | * - memory fences after atomics ("acq_fence") | |
145 | */ | |
146 | #define _os_rel_fence_relaxed memory_order_relaxed | |
147 | #define _os_rel_fence_compiler_acquire memory_order_relaxed | |
148 | #define _os_rel_fence_compiler_release memory_order_release | |
149 | #define _os_rel_fence_compiler_acq_rel memory_order_release | |
150 | #define _os_rel_fence_consume memory_order_relaxed_smp | |
151 | #define _os_rel_fence_dependency memory_order_relaxed_smp | |
152 | #define _os_rel_fence_acquire memory_order_relaxed_smp | |
153 | #define _os_rel_fence_release memory_order_release_smp | |
154 | #define _os_rel_fence_acq_rel memory_order_release_smp | |
155 | #define _os_rel_fence_seq_cst memory_order_release_smp | |
156 | ||
157 | #define _os_acq_fence_relaxed memory_order_relaxed | |
158 | #define _os_acq_fence_compiler_acquire memory_order_relaxed | |
159 | #define _os_acq_fence_compiler_release memory_order_relaxed | |
160 | #define _os_acq_fence_compiler_acq_rel memory_order_relaxed | |
161 | #define _os_acq_fence_consume memory_order_acquire_smp | |
162 | #define _os_acq_fence_dependency memory_order_dependency_smp | |
163 | #define _os_acq_fence_acquire memory_order_acquire_smp | |
164 | #define _os_acq_fence_release memory_order_relaxed_smp | |
165 | #define _os_acq_fence_acq_rel memory_order_acquire_smp | |
166 | #define _os_acq_fence_seq_cst memory_order_acquire_smp | |
167 | ||
168 | #define _os_memory_fence_before_atomic(m) \ | |
169 | atomic_thread_fence(_os_rel_fence_##m) | |
170 | #define _os_memory_fence_after_atomic(m) \ | |
171 | atomic_thread_fence(_os_acq_fence_##m) | |
172 | ||
173 | /* | |
174 | * Misc. helpers | |
175 | */ | |
176 | ||
177 | /* | |
178 | * For this implementation, we make sure the compiler cannot coalesce any of the | |
179 | * os_atomic calls by casting all atomic variables to `volatile _Atomic`. | |
180 | * | |
181 | * At the time this decision was taken, clang has been treating all `_Atomic` | |
182 | * accesses as if qualified `volatile _Atomic`, so the cast below freezes that | |
183 | * aspect of the codegen in time. | |
184 | * | |
185 | * When/if clang starts coalescing non-volatile _Atomics, we may decide to add | |
186 | * coalescing orderings, e.g. {relaxed,acquire,release,acq_rel,seq_cst}_nv. | |
187 | */ | |
188 | #define _os_atomic_c11_atomic(p) \ | |
189 | ((typeof(*(p)) volatile _Atomic *)(p)) | |
190 | ||
191 | #define _os_atomic_basetypeof(p) \ | |
192 | typeof(atomic_load(_os_atomic_c11_atomic(p))) | |
193 | ||
194 | #define _os_atomic_op_orig(p, v, m, o) ({ \ | |
195 | _os_atomic_basetypeof(p) _r; \ | |
196 | _os_compiler_barrier_before_atomic(m); \ | |
197 | _r = o(_os_atomic_c11_atomic(p), v, memory_order_##m##_smp); \ | |
198 | _os_compiler_barrier_after_atomic(m); \ | |
199 | _r; \ | |
200 | }) | |
201 | ||
202 | #define _os_atomic_c11_op_orig(p, v, m, o) \ | |
203 | _os_atomic_op_orig(p, v, m, atomic_##o##_explicit) | |
204 | ||
205 | #define _os_atomic_c11_op(p, v, m, o, op) \ | |
206 | ({ typeof(v) _v = (v); _os_atomic_c11_op_orig(p, _v, m, o) op _v; }) | |
207 | ||
208 | #define _os_atomic_clang_op_orig(p, v, m, o) \ | |
209 | _os_atomic_op_orig(p, v, m, __atomic_##o) | |
210 | ||
211 | #define _os_atomic_clang_op(p, v, m, o, op) \ | |
212 | ({ typeof(v) _v = (v); _os_atomic_basetypeof(p) _r = \ | |
213 | _os_atomic_clang_op_orig(p, _v, m, o); op(_r, _v); }) | |
214 | ||
215 | #define _os_atomic_auto_dependency(e) \ | |
216 | _Generic(e, \ | |
217 | os_atomic_dependency_t: (e), \ | |
218 | default: os_atomic_make_dependency(e)) | |
219 | ||
220 | #endif /* _MACHINE_ATOMIC_IMPL_H */ |