]> git.saurik.com Git - apple/xnu.git/blame - osfmk/arm/atomic.h
xnu-4570.1.46.tar.gz
[apple/xnu.git] / osfmk / arm / atomic.h
CommitLineData
5ba3f43e
A
1/*
2 * Copyright (c) 2015 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#ifndef _ARM_ATOMIC_H_
30#define _ARM_ATOMIC_H_
31
32#include <arm/smp.h>
33
34// Parameter for __builtin_arm_dmb
35#define DMB_NSH 0x7
36#define DMB_ISHLD 0x9
37#define DMB_ISHST 0xa
38#define DMB_ISH 0xb
39#define DMB_SY 0xf
40
41// Parameter for __builtin_arm_dsb
42#define DSB_NSH 0x7
43#define DSB_ISHLD 0x9
44#define DSB_ISHST 0xa
45#define DSB_ISH 0xb
46#define DSB_SY 0xf
47
48// Parameter for __builtin_arm_isb
49#define ISB_SY 0xf
50
51#if __SMP__
52
53#define memory_order_consume_smp memory_order_consume
54#define memory_order_acquire_smp memory_order_acquire
55#define memory_order_release_smp memory_order_release
56#define memory_order_acq_rel_smp memory_order_acq_rel
57#define memory_order_seq_cst_smp memory_order_seq_cst
58
59#else
60
61#define memory_order_consume_smp memory_order_relaxed
62#define memory_order_acquire_smp memory_order_relaxed
63#define memory_order_release_smp memory_order_relaxed
64#define memory_order_acq_rel_smp memory_order_relaxed
65#define memory_order_seq_cst_smp memory_order_relaxed
66
67#endif
68
69/*
70 * Atomic operations functions
71 *
72 * These static functions are designed for inlining
73 * It is expected that the memory_order arguments are
74 * known at compile time. This collapses these
75 * functions into a simple atomic operation
76 */
77
78static inline boolean_t
79memory_order_has_acquire(enum memory_order ord)
80{
81 switch (ord) {
82 case memory_order_consume:
83 case memory_order_acquire:
84 case memory_order_acq_rel:
85 case memory_order_seq_cst:
86 return TRUE;
87 default:
88 return FALSE;
89 }
90}
91
92static inline boolean_t
93memory_order_has_release(enum memory_order ord)
94{
95 switch (ord) {
96 case memory_order_release:
97 case memory_order_acq_rel:
98 case memory_order_seq_cst:
99 return TRUE;
100 default:
101 return FALSE;
102 }
103}
104
105#ifdef ATOMIC_PRIVATE
106
107#define clear_exclusive() __builtin_arm_clrex()
108
109__unused static uint32_t
110load_exclusive32(uint32_t *target, enum memory_order ord)
111{
112 uint32_t value;
113
114#if __arm__
115 if (memory_order_has_release(ord)) {
116 // Pre-load release barrier
117 atomic_thread_fence(memory_order_release);
118 }
119 value = __builtin_arm_ldrex(target);
120#else
121 if (memory_order_has_acquire(ord))
122 value = __builtin_arm_ldaex(target); // ldaxr
123 else
124 value = __builtin_arm_ldrex(target); // ldxr
125#endif // __arm__
126 return value;
127}
128
129__unused static boolean_t
130store_exclusive32(uint32_t *target, uint32_t value, enum memory_order ord)
131{
132 boolean_t err;
133
134#if __arm__
135 err = __builtin_arm_strex(value, target);
136 if (memory_order_has_acquire(ord)) {
137 // Post-store acquire barrier
138 atomic_thread_fence(memory_order_acquire);
139 }
140#else
141 if (memory_order_has_release(ord))
142 err = __builtin_arm_stlex(value, target); // stlxr
143 else
144 err = __builtin_arm_strex(value, target); // stxr
145#endif // __arm__
146 return !err;
147}
148
149__unused static uintptr_t
150load_exclusive(uintptr_t *target, enum memory_order ord)
151{
152#if !__LP64__
153 return load_exclusive32((uint32_t *)target, ord);
154#else
155 uintptr_t value;
156
157 if (memory_order_has_acquire(ord))
158 value = __builtin_arm_ldaex(target); // ldaxr
159 else
160 value = __builtin_arm_ldrex(target); // ldxr
161 return value;
162#endif // __arm__
163}
164
165__unused static boolean_t
166store_exclusive(uintptr_t *target, uintptr_t value, enum memory_order ord)
167{
168#if !__LP64__
169 return store_exclusive32((uint32_t *)target, value, ord);
170#else
171 boolean_t err;
172
173 if (memory_order_has_release(ord))
174 err = __builtin_arm_stlex(value, target); // stlxr
175 else
176 err = __builtin_arm_strex(value, target); // stxr
177 return !err;
178#endif
179}
180
181__unused static boolean_t
182atomic_compare_exchange(uintptr_t *target, uintptr_t oldval, uintptr_t newval,
183 enum memory_order orig_ord, boolean_t wait)
184{
185 enum memory_order ord = orig_ord;
186 uintptr_t value;
187
188
189#if __arm__
190 ord = memory_order_relaxed;
191 if (memory_order_has_release(orig_ord)) {
192 atomic_thread_fence(memory_order_release);
193 }
194#endif
195 do {
196 value = load_exclusive(target, ord);
197 if (value != oldval) {
198 if (wait)
199 wait_for_event(); // Wait with monitor held
200 else
201 clear_exclusive(); // Clear exclusive monitor
202 return FALSE;
203 }
204 } while (!store_exclusive(target, newval, ord));
205#if __arm__
206 if (memory_order_has_acquire(orig_ord)) {
207 atomic_thread_fence(memory_order_acquire);
208 }
209#endif
210 return TRUE;
211}
212
213#endif // ATOMIC_PRIVATE
214
215#if __arm__
216#define os_atomic_rmw_loop(p, ov, nv, m, ...) ({ \
217 boolean_t _result = FALSE; uint32_t _err = 0; \
218 typeof(atomic_load(p)) *_p = (typeof(atomic_load(p)) *)(p); \
219 for (;;) { \
220 ov = __builtin_arm_ldrex(_p); \
221 __VA_ARGS__; \
222 if (!_err && memory_order_has_release(memory_order_##m)) { \
223 /* only done for the first loop iteration */ \
224 atomic_thread_fence(memory_order_release); \
225 } \
226 _err = __builtin_arm_strex(nv, _p); \
227 if (__builtin_expect(!_err, 1)) { \
228 if (memory_order_has_acquire(memory_order_##m)) { \
229 atomic_thread_fence(memory_order_acquire); \
230 } \
231 _result = TRUE; \
232 break; \
233 } \
234 } \
235 _result; \
236 })
237#else
238#define os_atomic_rmw_loop(p, ov, nv, m, ...) ({ \
239 boolean_t _result = FALSE; \
240 typeof(atomic_load(p)) *_p = (typeof(atomic_load(p)) *)(p); \
241 do { \
242 if (memory_order_has_acquire(memory_order_##m)) { \
243 ov = __builtin_arm_ldaex(_p); \
244 } else { \
245 ov = __builtin_arm_ldrex(_p); \
246 } \
247 __VA_ARGS__; \
248 if (memory_order_has_release(memory_order_##m)) { \
249 _result = !__builtin_arm_stlex(nv, _p); \
250 } else { \
251 _result = !__builtin_arm_strex(nv, _p); \
252 } \
253 } while (__builtin_expect(!_result, 0)); \
254 _result; \
255 })
256#endif
257
258#define os_atomic_rmw_loop_give_up(expr) \
259 ({ __builtin_arm_clrex(); expr; __builtin_trap(); })
260
261#endif // _ARM_ATOMIC_H_