]>
Commit | Line | Data |
---|---|---|
f427ee49 A |
1 | /* |
2 | * Copyright (c) 2019 Apple Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | ||
29 | /* | |
30 | * This header provides some gory details to implement the <os/atomic_private.h> | |
31 | * interfaces. Nothing in this header should be called directly, no promise is | |
32 | * made to keep this interface stable. | |
33 | * | |
34 | * Architecture overrides. | |
35 | */ | |
36 | ||
37 | #ifndef __OS_ATOMIC_PRIVATE_H__ | |
38 | #error "Do not include <os/atomic_private_arch.h> directly, use <os/atomic_private.h>" | |
39 | #endif | |
40 | ||
41 | #ifndef __OS_ATOMIC_PRIVATE_ARCH_H__ | |
42 | #define __OS_ATOMIC_PRIVATE_ARCH_H__ | |
43 | ||
44 | #pragma mark - arm v7 | |
45 | ||
46 | #if defined(__arm__) | |
47 | ||
48 | #if OS_ATOMIC_CONFIG_MEMORY_ORDER_DEPENDENCY | |
49 | /* | |
50 | * On armv7, we do provide fine grained dependency injection, so | |
51 | * memory_order_dependency maps to relaxed as far as thread fences are concerned | |
52 | */ | |
53 | #undef _os_atomic_mo_dependency | |
54 | #define _os_atomic_mo_dependency memory_order_relaxed | |
55 | ||
56 | #undef os_atomic_make_dependency | |
57 | #define os_atomic_make_dependency(v) ({ \ | |
58 | os_atomic_dependency_t _dep; \ | |
59 | __asm__ __volatile__("and %[_dep], %[_v], #0" \ | |
60 | : [_dep] "=r" (_dep.__opaque_zero) \ | |
61 | : [_v] "r" (v)); \ | |
62 | os_compiler_barrier(acquire); \ | |
63 | _dep; \ | |
64 | }) | |
65 | #endif // OS_ATOMIC_CONFIG_MEMORY_ORDER_DEPENDENCY | |
66 | ||
67 | #define os_atomic_clear_exclusive() __builtin_arm_clrex() | |
68 | ||
69 | #define os_atomic_load_exclusive(p, m) ({ \ | |
70 | __auto_type _r = __builtin_arm_ldrex(os_cast_to_nonatomic_pointer(p)); \ | |
71 | _os_memory_fence_after_atomic(m); \ | |
72 | _os_compiler_barrier_after_atomic(m); \ | |
73 | _r; \ | |
74 | }) | |
75 | ||
76 | #define os_atomic_store_exclusive(p, v, m) ({ \ | |
77 | _os_compiler_barrier_before_atomic(m); \ | |
78 | _os_memory_fence_before_atomic(m); \ | |
79 | !__builtin_arm_strex(v, os_cast_to_nonatomic_pointer(p)); \ | |
80 | }) | |
81 | ||
82 | #if !OS_ATOMIC_HAS_STARVATION_FREE_RMW && !OS_ATOMIC_CONFIG_STARVATION_FREE_ONLY | |
83 | ||
84 | /* | |
85 | * armv7 override of os_atomic_rmw_loop | |
86 | * documentation for os_atomic_rmw_loop is in <os/atomic_private.h> | |
87 | */ | |
88 | #undef os_atomic_rmw_loop | |
89 | #define os_atomic_rmw_loop(p, ov, nv, m, ...) ({ \ | |
90 | int _result = 0; uint32_t _err = 0; \ | |
91 | __auto_type *_p = os_cast_to_nonatomic_pointer(p); \ | |
92 | for (;;) { \ | |
93 | ov = __builtin_arm_ldrex(_p); \ | |
94 | __VA_ARGS__; \ | |
95 | if (!_err) { \ | |
96 | /* release barrier only done for the first loop iteration */ \ | |
97 | _os_memory_fence_before_atomic(m); \ | |
98 | } \ | |
99 | _err = __builtin_arm_strex(nv, _p); \ | |
100 | if (__builtin_expect(!_err, 1)) { \ | |
101 | _os_memory_fence_after_atomic(m); \ | |
102 | _result = 1; \ | |
103 | break; \ | |
104 | } \ | |
105 | } \ | |
106 | _os_compiler_barrier_after_atomic(m); \ | |
107 | _result; \ | |
108 | }) | |
109 | ||
110 | /* | |
111 | * armv7 override of os_atomic_rmw_loop_give_up | |
112 | * documentation for os_atomic_rmw_loop_give_up is in <os/atomic_private.h> | |
113 | */ | |
114 | #undef os_atomic_rmw_loop_give_up | |
115 | #define os_atomic_rmw_loop_give_up(...) \ | |
116 | ({ os_atomic_clear_exclusive(); __VA_ARGS__; break; }) | |
117 | ||
118 | #endif // !OS_ATOMIC_HAS_STARVATION_FREE_RMW && !OS_ATOMIC_CONFIG_STARVATION_FREE_ONLY | |
119 | ||
120 | #endif // __arm__ | |
121 | ||
122 | #pragma mark - arm64 | |
123 | ||
124 | #if defined(__arm64__) | |
125 | ||
126 | #if OS_ATOMIC_CONFIG_MEMORY_ORDER_DEPENDENCY | |
127 | /* | |
128 | * On arm64, we do provide fine grained dependency injection, so | |
129 | * memory_order_dependency maps to relaxed as far as thread fences are concerned | |
130 | */ | |
131 | #undef _os_atomic_mo_dependency | |
132 | #define _os_atomic_mo_dependency memory_order_relaxed | |
133 | ||
134 | #undef os_atomic_make_dependency | |
135 | #if __ARM64_ARCH_8_32__ | |
136 | #define os_atomic_make_dependency(v) ({ \ | |
137 | os_atomic_dependency_t _dep; \ | |
138 | __asm__ __volatile__("and %w[_dep], %w[_v], wzr" \ | |
139 | : [_dep] "=r" (_dep.__opaque_zero) \ | |
140 | : [_v] "r" (v)); \ | |
141 | os_compiler_barrier(acquire); \ | |
142 | _dep; \ | |
143 | }) | |
144 | #else | |
145 | #define os_atomic_make_dependency(v) ({ \ | |
146 | os_atomic_dependency_t _dep; \ | |
147 | __asm__ __volatile__("and %[_dep], %[_v], xzr" \ | |
148 | : [_dep] "=r" (_dep.__opaque_zero) \ | |
149 | : [_v] "r" (v)); \ | |
150 | os_compiler_barrier(acquire); \ | |
151 | _dep; \ | |
152 | }) | |
153 | #endif | |
154 | #endif // OS_ATOMIC_CONFIG_MEMORY_ORDER_DEPENDENCY | |
155 | ||
156 | #if defined(__ARM_ARCH_8_4__) | |
157 | /* on armv8.4 16-byte aligned load/store pair is atomic */ | |
158 | #undef os_atomic_load_is_plain | |
159 | #define os_atomic_load_is_plain(p) (sizeof(*(p)) <= 16) | |
160 | #endif | |
161 | ||
162 | #define os_atomic_clear_exclusive() __builtin_arm_clrex() | |
163 | ||
164 | #define os_atomic_load_exclusive(p, m) ({ \ | |
165 | __auto_type _r = _os_atomic_mo_has_acquire(_os_atomic_mo_##m##_smp) \ | |
166 | ? __builtin_arm_ldaex(os_cast_to_nonatomic_pointer(p)) \ | |
167 | : __builtin_arm_ldrex(os_cast_to_nonatomic_pointer(p)); \ | |
168 | _os_compiler_barrier_after_atomic(m); \ | |
169 | _r; \ | |
170 | }) | |
171 | ||
172 | #define os_atomic_store_exclusive(p, v, m) ({ \ | |
173 | _os_compiler_barrier_before_atomic(m); \ | |
174 | (_os_atomic_mo_has_release(_os_atomic_mo_##m##_smp) \ | |
175 | ? !__builtin_arm_stlex(v, os_cast_to_nonatomic_pointer(p)) \ | |
176 | : !__builtin_arm_strex(v, os_cast_to_nonatomic_pointer(p))); \ | |
177 | }) | |
178 | ||
179 | #if !OS_ATOMIC_HAS_STARVATION_FREE_RMW && !OS_ATOMIC_CONFIG_STARVATION_FREE_ONLY | |
180 | ||
181 | /* | |
182 | * arm64 (without armv81 atomics) override of os_atomic_rmw_loop | |
183 | * documentation for os_atomic_rmw_loop is in <os/atomic_private.h> | |
184 | */ | |
185 | #undef os_atomic_rmw_loop | |
186 | #define os_atomic_rmw_loop(p, ov, nv, m, ...) ({ \ | |
187 | int _result = 0; \ | |
188 | __auto_type *_p = os_cast_to_nonatomic_pointer(p); \ | |
189 | _os_compiler_barrier_before_atomic(m); \ | |
190 | do { \ | |
191 | if (_os_atomic_mo_has_acquire(_os_atomic_mo_##m##_smp)) { \ | |
192 | ov = __builtin_arm_ldaex(_p); \ | |
193 | } else { \ | |
194 | ov = __builtin_arm_ldrex(_p); \ | |
195 | } \ | |
196 | __VA_ARGS__; \ | |
197 | if (_os_atomic_mo_has_release(_os_atomic_mo_##m##_smp)) { \ | |
198 | _result = !__builtin_arm_stlex(nv, _p); \ | |
199 | } else { \ | |
200 | _result = !__builtin_arm_strex(nv, _p); \ | |
201 | } \ | |
202 | } while (__builtin_expect(!_result, 0)); \ | |
203 | _os_compiler_barrier_after_atomic(m); \ | |
204 | _result; \ | |
205 | }) | |
206 | ||
207 | /* | |
208 | * arm64 override of os_atomic_rmw_loop_give_up | |
209 | * documentation for os_atomic_rmw_loop_give_up is in <os/atomic_private.h> | |
210 | */ | |
211 | #undef os_atomic_rmw_loop_give_up | |
212 | #define os_atomic_rmw_loop_give_up(...) \ | |
213 | ({ os_atomic_clear_exclusive(); __VA_ARGS__; break; }) | |
214 | ||
215 | #endif // !OS_ATOMIC_HAS_STARVATION_FREE_RMW && !OS_ATOMIC_CONFIG_STARVATION_FREE_ONLY | |
216 | ||
217 | #endif // __arm64__ | |
218 | ||
219 | #endif /* __OS_ATOMIC_PRIVATE_ARCH_H__ */ |