]>
git.saurik.com Git - apple/xnu.git/blob - osfmk/arm/atomic.h
2 * Copyright (c) 2015 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #ifndef _ARM_ATOMIC_H_
30 #define _ARM_ATOMIC_H_
34 // Parameter for __builtin_arm_dmb
41 // Parameter for __builtin_arm_dsb
48 // Parameter for __builtin_arm_isb
53 #define memory_order_consume_smp memory_order_consume
54 #define memory_order_acquire_smp memory_order_acquire
55 #define memory_order_release_smp memory_order_release
56 #define memory_order_acq_rel_smp memory_order_acq_rel
57 #define memory_order_seq_cst_smp memory_order_seq_cst
61 #define memory_order_consume_smp memory_order_relaxed
62 #define memory_order_acquire_smp memory_order_relaxed
63 #define memory_order_release_smp memory_order_relaxed
64 #define memory_order_acq_rel_smp memory_order_relaxed
65 #define memory_order_seq_cst_smp memory_order_relaxed
70 * Atomic operations functions
72 * These static functions are designed for inlining
73 * It is expected that the memory_order arguments are
74 * known at compile time. This collapses these
75 * functions into a simple atomic operation
78 static inline boolean_t
79 memory_order_has_acquire(enum memory_order ord
)
82 case memory_order_consume
:
83 case memory_order_acquire
:
84 case memory_order_acq_rel
:
85 case memory_order_seq_cst
:
92 static inline boolean_t
93 memory_order_has_release(enum memory_order ord
)
96 case memory_order_release
:
97 case memory_order_acq_rel
:
98 case memory_order_seq_cst
:
105 #ifdef ATOMIC_PRIVATE
107 #define clear_exclusive() __builtin_arm_clrex()
109 __unused
static uint32_t
110 load_exclusive32(uint32_t *target
, enum memory_order ord
)
115 if (memory_order_has_release(ord
)) {
116 // Pre-load release barrier
117 atomic_thread_fence(memory_order_release
);
119 value
= __builtin_arm_ldrex(target
);
121 if (memory_order_has_acquire(ord
))
122 value
= __builtin_arm_ldaex(target
); // ldaxr
124 value
= __builtin_arm_ldrex(target
); // ldxr
129 __unused
static boolean_t
130 store_exclusive32(uint32_t *target
, uint32_t value
, enum memory_order ord
)
135 err
= __builtin_arm_strex(value
, target
);
136 if (memory_order_has_acquire(ord
)) {
137 // Post-store acquire barrier
138 atomic_thread_fence(memory_order_acquire
);
141 if (memory_order_has_release(ord
))
142 err
= __builtin_arm_stlex(value
, target
); // stlxr
144 err
= __builtin_arm_strex(value
, target
); // stxr
149 __unused
static uintptr_t
150 load_exclusive(uintptr_t *target
, enum memory_order ord
)
153 return load_exclusive32((uint32_t *)target
, ord
);
157 if (memory_order_has_acquire(ord
))
158 value
= __builtin_arm_ldaex(target
); // ldaxr
160 value
= __builtin_arm_ldrex(target
); // ldxr
165 __unused
static boolean_t
166 store_exclusive(uintptr_t *target
, uintptr_t value
, enum memory_order ord
)
169 return store_exclusive32((uint32_t *)target
, value
, ord
);
173 if (memory_order_has_release(ord
))
174 err
= __builtin_arm_stlex(value
, target
); // stlxr
176 err
= __builtin_arm_strex(value
, target
); // stxr
181 __unused
static boolean_t
182 atomic_compare_exchange(uintptr_t *target
, uintptr_t oldval
, uintptr_t newval
,
183 enum memory_order orig_ord
, boolean_t wait
)
185 enum memory_order ord
= orig_ord
;
190 ord
= memory_order_relaxed
;
191 if (memory_order_has_release(orig_ord
)) {
192 atomic_thread_fence(memory_order_release
);
196 value
= load_exclusive(target
, ord
);
197 if (value
!= oldval
) {
199 wait_for_event(); // Wait with monitor held
201 clear_exclusive(); // Clear exclusive monitor
204 } while (!store_exclusive(target
, newval
, ord
));
206 if (memory_order_has_acquire(orig_ord
)) {
207 atomic_thread_fence(memory_order_acquire
);
213 #endif // ATOMIC_PRIVATE
216 #define os_atomic_rmw_loop(p, ov, nv, m, ...) ({ \
217 boolean_t _result = FALSE; uint32_t _err = 0; \
218 typeof(atomic_load(p)) *_p = (typeof(atomic_load(p)) *)(p); \
220 ov = __builtin_arm_ldrex(_p); \
222 if (!_err && memory_order_has_release(memory_order_##m)) { \
223 /* only done for the first loop iteration */ \
224 atomic_thread_fence(memory_order_release); \
226 _err = __builtin_arm_strex(nv, _p); \
227 if (__builtin_expect(!_err, 1)) { \
228 if (memory_order_has_acquire(memory_order_##m)) { \
229 atomic_thread_fence(memory_order_acquire); \
238 #define os_atomic_rmw_loop(p, ov, nv, m, ...) ({ \
239 boolean_t _result = FALSE; \
240 typeof(atomic_load(p)) *_p = (typeof(atomic_load(p)) *)(p); \
242 if (memory_order_has_acquire(memory_order_##m)) { \
243 ov = __builtin_arm_ldaex(_p); \
245 ov = __builtin_arm_ldrex(_p); \
248 if (memory_order_has_release(memory_order_##m)) { \
249 _result = !__builtin_arm_stlex(nv, _p); \
251 _result = !__builtin_arm_strex(nv, _p); \
253 } while (__builtin_expect(!_result, 0)); \
258 #define os_atomic_rmw_loop_give_up(expr) \
259 ({ __builtin_arm_clrex(); expr; __builtin_trap(); })
261 #endif // _ARM_ATOMIC_H_