]> git.saurik.com Git - apple/xnu.git/blob - osfmk/arm/atomic.h
xnu-6153.11.26.tar.gz
[apple/xnu.git] / osfmk / arm / atomic.h
1 /*
2 * Copyright (c) 2015-2018 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #ifndef _MACHINE_ATOMIC_H
30 #error "Do not include <arm/atomic.h> directly, use <machine/atomic.h>"
31 #endif
32
33 #ifndef _ARM_ATOMIC_H_
34 #define _ARM_ATOMIC_H_
35
36 #include <mach/boolean.h>
37
38 // Parameter for __builtin_arm_dmb
39 #define DMB_OSHLD 0x1
40 #define DMB_OSHST 0x2
41 #define DMB_OSH 0x3
42 #define DMB_NSHLD 0x5
43 #define DMB_NSHST 0x6
44 #define DMB_NSH 0x7
45 #define DMB_ISHLD 0x9
46 #define DMB_ISHST 0xa
47 #define DMB_ISH 0xb
48 #define DMB_LD 0xd
49 #define DMB_ST 0xe
50 #define DMB_SY 0xf
51
52 // Parameter for __builtin_arm_dsb
53 #define DSB_OSHLD 0x1
54 #define DSB_OSHST 0x2
55 #define DSB_OSH 0x3
56 #define DSB_NSHLD 0x5
57 #define DSB_NSHST 0x6
58 #define DSB_NSH 0x7
59 #define DSB_ISHLD 0x9
60 #define DSB_ISHST 0xa
61 #define DSB_ISH 0xb
62 #define DSB_LD 0xd
63 #define DSB_ST 0xe
64 #define DSB_SY 0xf
65
66 // Parameter for __builtin_arm_isb
67 #define ISB_SY 0xf
68
69 #undef OS_ATOMIC_HAS_LLSC
70 #define OS_ATOMIC_HAS_LLSC 1
71
72 #if defined(__ARM_ARCH_8_2__) && defined(__arm64__)
73 #undef OS_ATOMIC_USE_LLSC
74 #define OS_ATOMIC_USE_LLSC 0
75 #endif
76
77
78 /*
79 * On armv7 & arm64, we do provide fine grained dependency injection, so
80 * memory_order_dependency maps to relaxed as far as thread fences are concerned
81 */
82 #undef memory_order_dependency_smp
83 #define memory_order_dependency_smp memory_order_relaxed
84
85 #define os_atomic_clear_exclusive() __builtin_arm_clrex()
86
87 #if __arm__
88
89 #define os_atomic_load_exclusive(p, m) ({ \
90 _os_atomic_basetypeof(p) _r; \
91 _r = __builtin_arm_ldrex(p); \
92 _os_memory_fence_after_atomic(m); \
93 _os_compiler_barrier_after_atomic(m); \
94 _r; \
95 })
96
97 #define os_atomic_store_exclusive(p, v, m) ({ \
98 _os_compiler_barrier_before_atomic(m); \
99 _os_memory_fence_before_atomic(m); \
100 !__builtin_arm_strex(p, v); \
101 })
102
103 /*
104 * armv7 override of os_atomic_make_dependency
105 * documentation for os_atomic_make_dependency is in <machine/atomic.h>
106 */
107 #undef os_atomic_make_dependency
108 #define os_atomic_make_dependency(v) ({ \
109 os_atomic_dependency_t _dep; \
110 __asm__ __volatile__("and %[_dep], %[_v], #0" \
111 : [_dep] "=r" (_dep.__opaque_zero) : [_v] "r" (v)); \
112 os_compiler_barrier(acquire); \
113 _dep; \
114 })
115
116 /*
117 * armv7 override of os_atomic_rmw_loop
118 * documentation for os_atomic_rmw_loop is in <machine/atomic.h>
119 */
120 #undef os_atomic_rmw_loop
121 #define os_atomic_rmw_loop(p, ov, nv, m, ...) ({ \
122 int _result = 0; uint32_t _err = 0; \
123 _os_atomic_basetypeof(p) *_p; \
124 _p = (_os_atomic_basetypeof(p) *)(p); \
125 _os_compiler_barrier_before_atomic(m); \
126 for (;;) { \
127 ov = __builtin_arm_ldrex(_p); \
128 __VA_ARGS__; \
129 if (!_err) { \
130 /* release barrier only done for the first loop iteration */ \
131 _os_memory_fence_before_atomic(m); \
132 } \
133 _err = __builtin_arm_strex(nv, _p); \
134 if (__builtin_expect(!_err, 1)) { \
135 _os_memory_fence_after_atomic(m); \
136 _result = 1; \
137 break; \
138 } \
139 } \
140 _os_compiler_barrier_after_atomic(m); \
141 _result; \
142 })
143
144 /*
145 * armv7 override of os_atomic_rmw_loop_give_up
146 * documentation for os_atomic_rmw_loop_give_up is in <machine/atomic.h>
147 */
148 #undef os_atomic_rmw_loop_give_up
149 #define os_atomic_rmw_loop_give_up(...) \
150 ({ os_atomic_clear_exclusive(); __VA_ARGS__; break; })
151
152 #else // __arm64__
153
154 #define os_atomic_load_exclusive(p, m) ({ \
155 _os_atomic_basetypeof(p) _r; \
156 if (memory_order_has_acquire(memory_order_##m##_smp)) { \
157 _r = __builtin_arm_ldaex(p); \
158 } else { \
159 _r = __builtin_arm_ldrex(p); \
160 } \
161 _os_compiler_barrier_after_atomic(m); \
162 _r; \
163 })
164
165 #define os_atomic_store_exclusive(p, v, m) ({ \
166 _os_compiler_barrier_before_atomic(m); \
167 (memory_order_has_release(memory_order_##m##_smp) ? \
168 !__builtin_arm_stlex(p, v) : !__builtin_arm_strex(p, v)); \
169 })
170
171 /*
172 * arm64 override of os_atomic_make_dependency
173 * documentation for os_atomic_make_dependency is in <machine/atomic.h>
174 */
175 #undef os_atomic_make_dependency
176 #define os_atomic_make_dependency(v) ({ \
177 os_atomic_dependency_t _dep; \
178 __asm__ __volatile__("and %[_dep], %[_v], xzr" \
179 : [_dep] "=r" (_dep.__opaque_zero) : [_v] "r" (v)); \
180 os_compiler_barrier(acquire); \
181 _dep; \
182 })
183
184 #if OS_ATOMIC_USE_LLSC
185
186 /*
187 * arm64 (without armv81 atomics) override of os_atomic_rmw_loop
188 * documentation for os_atomic_rmw_loop is in <machine/atomic.h>
189 */
190 #undef os_atomic_rmw_loop
191 #define os_atomic_rmw_loop(p, ov, nv, m, ...) ({ \
192 int _result = 0; \
193 _os_atomic_basetypeof(p) *_p; \
194 _p = (_os_atomic_basetypeof(p) *)(p); \
195 _os_compiler_barrier_before_atomic(m); \
196 do { \
197 if (memory_order_has_acquire(memory_order_##m##_smp)) { \
198 ov = __builtin_arm_ldaex(_p); \
199 } else { \
200 ov = __builtin_arm_ldrex(_p); \
201 } \
202 __VA_ARGS__; \
203 if (memory_order_has_release(memory_order_##m##_smp)) { \
204 _result = !__builtin_arm_stlex(nv, _p); \
205 } else { \
206 _result = !__builtin_arm_strex(nv, _p); \
207 } \
208 } while (__builtin_expect(!_result, 0)); \
209 _os_compiler_barrier_after_atomic(m); \
210 _result; \
211 })
212
213 /*
214 * arm64 override of os_atomic_rmw_loop_give_up
215 * documentation for os_atomic_rmw_loop_give_up is in <machine/atomic.h>
216 */
217 #undef os_atomic_rmw_loop_give_up
218 #define os_atomic_rmw_loop_give_up(...) \
219 ({ os_atomic_clear_exclusive(); __VA_ARGS__; break; })
220
221 #endif // OS_ATOMIC_USE_LLSC
222
223 #endif // __arm64__
224
225 #endif // _ARM_ATOMIC_H_