]> git.saurik.com Git - apple/xnu.git/blame - libkern/os/atomic_private.h
xnu-7195.50.7.100.1.tar.gz
[apple/xnu.git] / libkern / os / atomic_private.h
CommitLineData
f427ee49
A
1/*
2 * Copyright (c) 2018 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#ifndef __OS_ATOMIC_PRIVATE_H__
30#define __OS_ATOMIC_PRIVATE_H__
31
32/*!
33 * @file <os/atomic_private.h>
34 *
35 * @brief
36 * This file defines nicer (terser and safer) wrappers for C11's <stdatomic.h>.
37 *
38 * @discussion
39 * @see xnu.git::doc/atomics.md which provides more extensive documentation
40 * about this header.
41 *
42 * Note that some of the macros defined in this file may be overridden by
43 * architecture specific headers.
44 *
45 * All the os_atomic* functions take an operation ordering argument that can be:
46 * - C11 memory orders: relaxed, acquire, release, acq_rel or seq_cst which
47 * imply a memory fence on SMP machines, and always carry the matching
48 * compiler barrier semantics.
49 *
50 * - the os_atomic-specific `dependency` memory ordering that is used to
51 * document intent to a carry a data or address dependency.
52 * See doc/atomics.md for more information.
53 *
54 * - a compiler barrier: compiler_acquire, compiler_release, compiler_acq_rel
55 * without a corresponding memory fence.
56 */
57
58#include <os/atomic.h>
59
60/*!
61 * @group <os/atomic_private.h> tunables.
62 *
63 * @{
64 *
65 * @brief
66 * @c OS_ATOMIC_CONFIG_* macros provide tunables for clients.
67 */
68
69/*!
70 * @macro OS_ATOMIC_CONFIG_SMP
71 *
72 * @brief
73 * Whether this is used on an SMP system, defaults to 1.
74 */
75#ifndef OS_ATOMIC_CONFIG_SMP
76#define OS_ATOMIC_CONFIG_SMP 1
77#endif // OS_ATOMIC_CONFIG_SMP
78
79/*!
80 * @macro OS_ATOMIC_CONFIG_STARVATION_FREE_ONLY
81 *
82 * @brief
83 * Hide interfaces that can lead to starvation on certain hardware/build
84 * configurations.
85 *
86 * @discussion
87 * The following ABIs are currently supported by os_atomic:
88 * - i386 and x86_64: Intel atomics
89 * - armv7: load/store exclusive
90 * - armv8: load/store exclusive
91 * - armv8.1: armv8.1 style atomics
92 *
93 * On armv8 hardware with asymmetric cores, using load/store exclusive based
94 * atomics can lead to starvation in very hot code or non-preemptible context,
95 * and code that is sensitive to such must not use these interfaces.
96 *
97 * When OS_ATOMIC_CONFIG_STARVATION_FREE_ONLY is set, any os_atomic_* interface
98 * that may cause starvation will be made unavailable to avoid accidental use.
99 *
100 * Defaults:
101 * - XNU: builds per SoC, already safe
102 * - Kexts: default to avoid starvable interfaces by default
103 * - User: default to allow starvable interfaces by default
104 *
105 * Note: at this time, on Apple supported platforms, the only configuration
106 * that is affected by this would be for the "arm64" slices.
107 *
108 * Intel, armv7 variants, and the arm64e slice always are unaffected.
109 */
110#ifndef OS_ATOMIC_CONFIG_STARVATION_FREE_ONLY
111#if XNU_KERNEL_PRIVATE
112#define OS_ATOMIC_CONFIG_STARVATION_FREE_ONLY 0
113#elif KERNEL
114#define OS_ATOMIC_CONFIG_STARVATION_FREE_ONLY 1
115#else
116#define OS_ATOMIC_CONFIG_STARVATION_FREE_ONLY 0
117#endif
118#endif // OS_ATOMIC_CONFIG_STARVATION_FREE_ONLY
119
120/*!
121 * @macro OS_ATOMIC_CONFIG_MEMORY_ORDER_DEPENDENCY
122 *
123 * @brief
124 * Expose the os_atomic-specific fake `dependency` memory ordering.
125 *
126 * @discussion
127 * The dependency ordering can be used to try to "repair" C11's consume ordering
128 * and should be limited to extremely complex algorithms where every cycle counts.
129 *
130 * Due to the inherent risks (no compiler support) for this feature, it is
131 * reserved for expert and very domain-specific code only and is off by default.
132 *
133 * Default: 0
134 */
135#ifndef OS_ATOMIC_CONFIG_MEMORY_ORDER_DEPENDENCY
136#if XNU_KERNEL_PRIVATE
137#define OS_ATOMIC_CONFIG_MEMORY_ORDER_DEPENDENCY 1
138#else
139#define OS_ATOMIC_CONFIG_MEMORY_ORDER_DEPENDENCY 0
140#endif
141#endif // OS_ATOMIC_CONFIG_MEMORY_ORDER_DEPENDENCY
142
143/*! @} */
144
145/*!
146 * @group <os/atomic_private.h> features (arch specific).
147 *
148 * @{
149 *
150 * @brief
151 * The @c OS_ATOMIC_USE_* and @c OS_ATOMIC_HAS_* defines expose some
152 * specificities of <os/atomic_private.h> implementation that are relevant to
153 * certain clients and can be used to conditionalize code.
154 */
155
156/*!
157 * @const OS_ATOMIC_HAS_LLSC
158 *
159 * @brief
160 * Whether the platform has LL/SC features.
161 *
162 * @discussion
163 * When set, the os_atomic_*_exclusive() macros are defined.
164 */
165#if defined(__i386__) || defined(__x86_64__)
166#define OS_ATOMIC_HAS_LLSC 0
167#elif defined(__arm__) || defined(__arm64__)
168#define OS_ATOMIC_HAS_LLSC 1
169#else
170#error unsupported architecture
171#endif
172
173/*!
174 * @const OS_ATOMIC_USE_LLSC
175 *
176 * @brief
177 * Whether os_atomic* use LL/SC internally.
178 *
179 * @discussion
180 * OS_ATOMIC_USE_LLSC implies OS_ATOMIC_HAS_LLSC.
181 */
182#if defined(__arm64__) && defined(__ARM_ARCH_8_2__)
183#define OS_ATOMIC_USE_LLSC 0
184#else
185#define OS_ATOMIC_USE_LLSC OS_ATOMIC_HAS_LLSC
186#endif
187
188/*!
189 * @const OS_ATOMIC_HAS_STARVATION_FREE_RMW
190 *
191 * @brief
192 * Whether os_atomic* Read-Modify-Write operations are starvation free
193 * in the current configuration.
194 */
195#define OS_ATOMIC_HAS_STARVATION_FREE_RMW (!OS_ATOMIC_USE_LLSC)
196
197/*! @} */
198
199#include "atomic_private_impl.h" // Internal implementation details
200
201/*!
202 * @function os_compiler_barrier
203 *
204 * @brief
205 * Provide a compiler barrier according to the specified ordering.
206 *
207 * @param m
208 * An optional ordering among `acquire`, `release` or `acq_rel` which defaults
209 * to `acq_rel` when not specified.
210 * These are equivalent to the `compiler_acquire`, `compiler_release` and
211 * `compiler_acq_rel` orderings taken by the os_atomic* functions
212 */
213#undef os_compiler_barrier
214#define os_compiler_barrier(b...) \
215 os_atomic_std(atomic_signal_fence)(_os_compiler_barrier_##b)
216
217/*!
218 * @function os_atomic_thread_fence
219 *
220 * @brief
221 * Memory fence which is elided in non-SMP mode, but always carries the
222 * corresponding compiler barrier.
223 *
224 * @param m
225 * The ordering for this fence.
226 */
227#define os_atomic_thread_fence(m) ({ \
228 os_atomic_std(atomic_thread_fence)(_os_atomic_mo_##m##_smp); \
229 os_atomic_std(atomic_signal_fence)(_os_atomic_mo_##m); \
230})
231
232/*!
233 * @function os_atomic_init
234 *
235 * @brief
236 * Wrapper for C11 atomic_init()
237 *
238 * @discussion
239 * This initialization is not performed atomically, and so must only be used as
240 * part of object initialization before the object is made visible to other
241 * threads/cores.
242 *
243 * @param p
244 * A pointer to an atomic variable.
245 *
246 * @param v
247 * The value to initialize the variable with.
248 *
249 * @returns
250 * The value loaded from @a p.
251 */
252#define os_atomic_init(p, v) \
253 os_atomic_std(atomic_init)(os_cast_to_atomic_pointer(p), v)
254
255/*!
256 * @function os_atomic_load_is_plain, os_atomic_store_is_plain
257 *
258 * @brief
259 * Return whether a relaxed atomic load (resp. store) to an atomic variable
260 * is implemented as a single plain load (resp. store) instruction.
261 *
262 * @discussion
263 * Non-relaxed loads/stores may involve additional memory fence instructions
264 * or more complex atomic instructions.
265 *
266 * This is a construct that can safely be used in static asserts.
267 *
268 * This doesn't check for alignment and it is assumed that `p` is
269 * "aligned enough".
270 *
271 * @param p
272 * A pointer to an atomic variable.
273 *
274 * @returns
275 * True when relaxed atomic loads (resp. stores) compile to a plain load
276 * (resp. store) instruction, false otherwise.
277 */
278#define os_atomic_load_is_plain(p) (sizeof(*(p)) <= sizeof(void *))
279#define os_atomic_store_is_plain(p) os_atomic_load_is_plain(p)
280
281/*!
282 * @function os_atomic_load
283 *
284 * @brief
285 * Wrapper for C11 atomic_load_explicit(), guaranteed to compile to a single
286 * plain load instruction (when @a m is `relaxed`).
287 *
288 * @param p
289 * A pointer to an atomic variable.
290 *
291 * @param m
292 * The ordering to use.
293 *
294 * @returns
295 * The value loaded from @a p.
296 */
297#define os_atomic_load(p, m) ({ \
298 _Static_assert(os_atomic_load_is_plain(p), "Load is wide"); \
299 _os_compiler_barrier_before_atomic(m); \
300 __auto_type _r = os_atomic_std(atomic_load_explicit)( \
301 os_cast_to_atomic_pointer(p), _os_atomic_mo_##m##_smp); \
302 _os_compiler_barrier_after_atomic(m); \
303 _r; \
304})
305
306/*!
307 * @function os_atomic_store
308 *
309 * @brief
310 * Wrapper for C11 atomic_store_explicit(), guaranteed to compile to a single
311 * plain store instruction (when @a m is `relaxed`).
312 *
313 * @param p
314 * A pointer to an atomic variable.
315 *
316 * @param v
317 * The value to store.
318 *
319 * @param m
320 * The ordering to use.
321 *
322 * @returns
323 * The value stored at @a p.
324 */
325#define os_atomic_store(p, v, m) ({ \
326 _Static_assert(os_atomic_store_is_plain(p), "Store is wide"); \
327 __auto_type _v = (v); \
328 _os_compiler_barrier_before_atomic(m); \
329 os_atomic_std(atomic_store_explicit)(os_cast_to_atomic_pointer(p), _v, \
330 _os_atomic_mo_##m##_smp); \
331 _os_compiler_barrier_after_atomic(m); \
332 _v; \
333})
334
335#if OS_ATOMIC_HAS_STARVATION_FREE_RMW || !OS_ATOMIC_CONFIG_STARVATION_FREE_ONLY
336
337/*!
338 * @function os_atomic_load_wide
339 *
340 * @brief
341 * Wrapper for C11 atomic_load_explicit(), which may be implemented by a
342 * compare-exchange loop for double-wide variables.
343 *
344 * @param p
345 * A pointer to an atomic variable.
346 *
347 * @param m
348 * The ordering to use.
349 *
350 * @returns
351 * The value loaded from @a p.
352 */
353#define os_atomic_load_wide(p, m) ({ \
354 _os_compiler_barrier_before_atomic(m); \
355 __auto_type _r = os_atomic_std(atomic_load_explicit)( \
356 os_cast_to_atomic_pointer(p), _os_atomic_mo_##m##_smp); \
357 _os_compiler_barrier_after_atomic(m); \
358 _r; \
359})
360
361/*!
362 * @function os_atomic_store_wide
363 *
364 * @brief
365 * Wrapper for C11 atomic_store_explicit(), which may be implemented by a
366 * compare-exchange loop for double-wide variables.
367 *
368 * @param p
369 * A pointer to an atomic variable.
370 *
371 * @param v
372 * The value to store.
373 *
374 * @param m
375 * The ordering to use.
376 *
377 * @returns
378 * The value stored at @a p.
379 */
380#define os_atomic_store_wide(p, v, m) ({ \
381 __auto_type _v = (v); \
382 _os_compiler_barrier_before_atomic(m); \
383 os_atomic_std(atomic_store_explicit)(os_cast_to_atomic_pointer(p), _v, \
384 _os_atomic_mo_##m##_smp); \
385 _os_compiler_barrier_after_atomic(m); \
386 _v; \
387})
388
389/*!
390 * @function os_atomic_add, os_atomic_add_orig
391 *
392 * @brief
393 * Wrappers for C11 atomic_fetch_add_explicit().
394 *
395 * @param p
396 * A pointer to an atomic variable.
397 *
398 * @param v
399 * The value to add.
400 *
401 * @param m
402 * The ordering to use.
403 *
404 * @returns
405 * os_atomic_add_orig returns the value of the variable before the atomic add,
406 * os_atomic_add returns the value of the variable after the atomic add.
407 */
408#define os_atomic_add_orig(p, v, m) _os_atomic_c11_op_orig(p, v, m, fetch_add)
409#define os_atomic_add(p, v, m) _os_atomic_c11_op(p, v, m, fetch_add, +)
410
411/*!
412 * @function os_atomic_inc, os_atomic_inc_orig
413 *
414 * @brief
415 * Perform an atomic increment.
416 *
417 * @param p
418 * A pointer to an atomic variable.
419 *
420 * @param m
421 * The ordering to use.
422 *
423 * @returns
424 * os_atomic_inc_orig returns the value of the variable before the atomic increment,
425 * os_atomic_inc returns the value of the variable after the atomic increment.
426 */
427#define os_atomic_inc_orig(p, m) _os_atomic_c11_op_orig(p, 1, m, fetch_add)
428#define os_atomic_inc(p, m) _os_atomic_c11_op(p, 1, m, fetch_add, +)
429
430/*!
431 * @function os_atomic_sub, os_atomic_sub_orig
432 *
433 * @brief
434 * Wrappers for C11 atomic_fetch_sub_explicit().
435 *
436 * @param p
437 * A pointer to an atomic variable.
438 *
439 * @param v
440 * The value to subtract.
441 *
442 * @param m
443 * The ordering to use.
444 *
445 * @returns
446 * os_atomic_sub_orig returns the value of the variable before the atomic subtract,
447 * os_atomic_sub returns the value of the variable after the atomic subtract.
448 */
449#define os_atomic_sub_orig(p, v, m) _os_atomic_c11_op_orig(p, v, m, fetch_sub)
450#define os_atomic_sub(p, v, m) _os_atomic_c11_op(p, v, m, fetch_sub, -)
451
452/*!
453 * @function os_atomic_dec, os_atomic_dec_orig
454 *
455 * @brief
456 * Perform an atomic decrement.
457 *
458 * @param p
459 * A pointer to an atomic variable.
460 *
461 * @param m
462 * The ordering to use.
463 *
464 * @returns
465 * os_atomic_dec_orig returns the value of the variable before the atomic decrement,
466 * os_atomic_dec returns the value of the variable after the atomic decrement.
467 */
468#define os_atomic_dec_orig(p, m) _os_atomic_c11_op_orig(p, 1, m, fetch_sub)
469#define os_atomic_dec(p, m) _os_atomic_c11_op(p, 1, m, fetch_sub, -)
470
471/*!
472 * @function os_atomic_and, os_atomic_and_orig
473 *
474 * @brief
475 * Wrappers for C11 atomic_fetch_and_explicit().
476 *
477 * @param p
478 * A pointer to an atomic variable.
479 *
480 * @param v
481 * The value to and.
482 *
483 * @param m
484 * The ordering to use.
485 *
486 * @returns
487 * os_atomic_and_orig returns the value of the variable before the atomic and,
488 * os_atomic_and returns the value of the variable after the atomic and.
489 */
490#define os_atomic_and_orig(p, v, m) _os_atomic_c11_op_orig(p, v, m, fetch_and)
491#define os_atomic_and(p, v, m) _os_atomic_c11_op(p, v, m, fetch_and, &)
492
493/*!
494 * @function os_atomic_andnot, os_atomic_andnot_orig
495 *
496 * @brief
497 * Wrappers for C11 atomic_fetch_and_explicit(p, ~value).
498 *
499 * @param p
500 * A pointer to an atomic variable.
501 *
502 * @param v
503 * The value whose complement to and.
504 *
505 * @param m
506 * The ordering to use.
507 *
508 * @returns
509 * os_atomic_andnot_orig returns the value of the variable before the atomic andnot,
510 * os_atomic_andnot returns the value of the variable after the atomic andnot.
511 */
512#define os_atomic_andnot_orig(p, v, m) _os_atomic_c11_op_orig(p, (typeof(v))~(v), m, fetch_and)
513#define os_atomic_andnot(p, v, m) _os_atomic_c11_op(p, (typeof(v))~(v), m, fetch_and, &)
514
515/*!
516 * @function os_atomic_or, os_atomic_or_orig
517 *
518 * @brief
519 * Wrappers for C11 atomic_fetch_or_explicit().
520 *
521 * @param p
522 * A pointer to an atomic variable.
523 *
524 * @param v
525 * The value to or.
526 *
527 * @param m
528 * The ordering to use.
529 *
530 * @returns
531 * os_atomic_or_orig returns the value of the variable before the atomic or,
532 * os_atomic_or returns the value of the variable after the atomic or.
533 */
534#define os_atomic_or_orig(p, v, m) _os_atomic_c11_op_orig(p, v, m, fetch_or)
535#define os_atomic_or(p, v, m) _os_atomic_c11_op(p, v, m, fetch_or, |)
536
537/*!
538 * @function os_atomic_xor, os_atomic_xor_orig
539 *
540 * @brief
541 * Wrappers for C11 atomic_fetch_xor_explicit().
542 *
543 * @param p
544 * A pointer to an atomic variable.
545 *
546 * @param v
547 * The value to xor.
548 *
549 * @param m
550 * The ordering to use.
551 *
552 * @returns
553 * os_atomic_xor_orig returns the value of the variable before the atomic xor,
554 * os_atomic_xor returns the value of the variable after the atomic xor.
555 */
556#define os_atomic_xor_orig(p, v, m) _os_atomic_c11_op_orig(p, v, m, fetch_xor)
557#define os_atomic_xor(p, v, m) _os_atomic_c11_op(p, v, m, fetch_xor, ^)
558
559/*!
560 * @function os_atomic_min, os_atomic_min_orig
561 *
562 * @brief
563 * Wrappers for Clang's __atomic_fetch_min()
564 *
565 * @param p
566 * A pointer to an atomic variable.
567 *
568 * @param v
569 * The value to minimize.
570 *
571 * @param m
572 * The ordering to use.
573 *
574 * @returns
575 * os_atomic_min_orig returns the value of the variable before the atomic min,
576 * os_atomic_min returns the value of the variable after the atomic min.
577 */
578#define os_atomic_min_orig(p, v, m) _os_atomic_clang_op_orig(p, v, m, fetch_min)
579#define os_atomic_min(p, v, m) _os_atomic_clang_op(p, v, m, fetch_min, MIN)
580
581/*!
582 * @function os_atomic_max, os_atomic_max_orig
583 *
584 * @brief
585 * Wrappers for Clang's __atomic_fetch_max()
586 *
587 * @param p
588 * A pointer to an atomic variable.
589 *
590 * @param v
591 * The value to maximize.
592 *
593 * @param m
594 * The ordering to use.
595 *
596 * @returns
597 * os_atomic_max_orig returns the value of the variable before the atomic max,
598 * os_atomic_max returns the value of the variable after the atomic max.
599 */
600#define os_atomic_max_orig(p, v, m) _os_atomic_clang_op_orig(p, v, m, fetch_max)
601#define os_atomic_max(p, v, m) _os_atomic_clang_op(p, v, m, fetch_max, MAX)
602
603/*!
604 * @function os_atomic_xchg
605 *
606 * @brief
607 * Wrapper for C11 atomic_exchange_explicit().
608 *
609 * @param p
610 * A pointer to an atomic variable.
611 *
612 * @param v
613 * The value to exchange with.
614 *
615 * @param m
616 * The ordering to use.
617 *
618 * @returns
619 * The value of the variable before the exchange.
620 */
621#define os_atomic_xchg(p, v, m) _os_atomic_c11_op_orig(p, v, m, exchange)
622
623/*!
624 * @function os_atomic_cmpxchg
625 *
626 * @brief
627 * Wrapper for C11 atomic_compare_exchange_strong_explicit().
628 *
629 * @discussion
630 * Loops around os_atomic_cmpxchg() may want to consider using the
631 * os_atomic_rmw_loop() construct instead to take advantage of the C11 weak
632 * compare-exchange operation.
633 *
634 * @param p
635 * A pointer to an atomic variable.
636 *
637 * @param e
638 * The value expected in the atomic variable.
639 *
640 * @param v
641 * The value to store if the atomic variable has the expected value @a e.
642 *
643 * @param m
644 * The ordering to use in case of success.
645 * The ordering in case of failure is always `relaxed`.
646 *
647 * @returns
648 * 0 if the compare-exchange failed.
649 * 1 if the compare-exchange succeeded.
650 */
651#define os_atomic_cmpxchg(p, e, v, m) ({ \
652 os_atomic_basetypeof(p) _r = (e); int _b; \
653 _os_compiler_barrier_before_atomic(m); \
654 _b = os_atomic_std(atomic_compare_exchange_strong_explicit)( \
655 os_cast_to_atomic_pointer(p), &_r, \
656 _os_atomic_value_cast(p, v), \
657 _os_atomic_mo_##m##_smp, _os_atomic_mo_relaxed); \
658 _os_compiler_barrier_after_atomic(m); \
659 _b; \
660})
661
662/*!
663 * @function os_atomic_cmpxchgv
664 *
665 * @brief
666 * Wrapper for C11 atomic_compare_exchange_strong_explicit().
667 *
668 * @discussion
669 * Loops around os_atomic_cmpxchgv() may want to consider using the
670 * os_atomic_rmw_loop() construct instead to take advantage of the C11 weak
671 * compare-exchange operation.
672 *
673 * @param p
674 * A pointer to an atomic variable.
675 *
676 * @param e
677 * The value expected in the atomic variable.
678 *
679 * @param v
680 * The value to store if the atomic variable has the expected value @a e.
681 *
682 * @param g
683 * A pointer to a location that is filled with the value that was present in
684 * the atomic variable before the compare-exchange (whether successful or not).
685 * This can be used to redrive compare-exchange loops.
686 *
687 * @param m
688 * The ordering to use in case of success.
689 * The ordering in case of failure is always `relaxed`.
690 *
691 * @returns
692 * 0 if the compare-exchange failed.
693 * 1 if the compare-exchange succeeded.
694 */
695#define os_atomic_cmpxchgv(p, e, v, g, m) ({ \
696 os_atomic_basetypeof(p) _r = (e); int _b; \
697 _os_compiler_barrier_before_atomic(m); \
698 _b = os_atomic_std(atomic_compare_exchange_strong_explicit)( \
699 os_cast_to_atomic_pointer(p), &_r, \
700 _os_atomic_value_cast(p, v), \
701 _os_atomic_mo_##m##_smp, _os_atomic_mo_relaxed); \
702 _os_compiler_barrier_after_atomic(m); \
703 *(g) = _r; _b; \
704})
705
706/*!
707 * @function os_atomic_rmw_loop
708 *
709 * @brief
710 * Advanced read-modify-write construct to wrap compare-exchange loops.
711 *
712 * @param p
713 * A pointer to an atomic variable to be modified.
714 *
715 * @param ov
716 * The name of the variable that will contain the original value of the atomic
717 * variable (reloaded every iteration of the loop).
718 *
719 * @param nv
720 * The name of the variable that will contain the new value to compare-exchange
721 * the atomic variable to (typically computed from @a ov every iteration of the
722 * loop).
723 *
724 * @param m
725 * The ordering to use in case of success.
726 * The ordering in case of failure is always `relaxed`.
727 *
728 * @param ...
729 * Code block that validates the value of @p ov and computes the new value of
730 * @p nv that the atomic variable will be compare-exchanged to in an iteration
731 * of the loop.
732 *
733 * The loop can be aborted using os_atomic_rmw_loop_give_up(), e.g. when the
734 * value of @p ov is found to be "invalid" for the ovarall operation.
735 * `continue` cannot be used in this context.
736 *
737 * No stores to memory should be performed within the code block as it may cause
738 * LL/SC transactions used to implement compare-exchange to fail persistently.
739 *
740 * @returns
741 * 0 if the loop was aborted with os_atomic_rmw_loop_give_up().
742 * 1 if the loop completed.
743 */
744#define os_atomic_rmw_loop(p, ov, nv, m, ...) ({ \
745 int _result = 0; \
746 __auto_type _p = os_cast_to_nonatomic_pointer(p); \
747 _os_compiler_barrier_before_atomic(m); \
748 ov = *_p; \
749 do { \
750 __VA_ARGS__; \
751 _result = os_atomic_std(atomic_compare_exchange_weak_explicit)( \
752 os_cast_to_atomic_pointer(_p), &ov, nv, \
753 _os_atomic_mo_##m##_smp, _os_atomic_mo_relaxed); \
754 } while (__builtin_expect(!_result, 0)); \
755 _os_compiler_barrier_after_atomic(m); \
756 _result; \
757})
758
759/*!
760 * @function os_atomic_rmw_loop_give_up
761 *
762 * @brief
763 * Abort an os_atomic_rmw_loop() loop.
764 *
765 * @param ...
766 * Optional code block to execute before the `break` out of the loop. May
767 * further alter the control flow (e.g. using `return`, `goto`, ...).
768 */
769#define os_atomic_rmw_loop_give_up(...) ({ __VA_ARGS__; break; })
770
771#else // !OS_ATOMIC_HAS_STARVATION_FREE_RMW && OS_ATOMIC_CONFIG_STARVATION_FREE_ONLY
772
773#define _os_atomic_error_is_starvable(name) \
774 _Static_assert(0, #name " is not starvation-free and isn't available in this configuration")
775#define os_atomic_load_wide(p, m) _os_atomic_error_is_starvable(os_atomic_load_wide)
776#define os_atomic_store_wide(p, v, m) _os_atomic_error_is_starvable(os_atomic_store_wide)
777#define os_atomic_add_orig(p, v, m) _os_atomic_error_is_starvable(os_atomic_add_orig)
778#define os_atomic_add(p, v, m) _os_atomic_error_is_starvable(os_atomic_add)
779#define os_atomic_inc_orig(p, m) _os_atomic_error_is_starvable(os_atomic_inc_orig)
780#define os_atomic_inc(p, m) _os_atomic_error_is_starvable(os_atomic_inc)
781#define os_atomic_sub_orig(p, v, m) _os_atomic_error_is_starvable(os_atomic_sub_orig)
782#define os_atomic_sub(p, v, m) _os_atomic_error_is_starvable(os_atomic_sub)
783#define os_atomic_dec_orig(p, m) _os_atomic_error_is_starvable(os_atomic_dec_orig)
784#define os_atomic_dec(p, m) _os_atomic_error_is_starvable(os_atomic_dec)
785#define os_atomic_and_orig(p, v, m) _os_atomic_error_is_starvable(os_atomic_and_orig)
786#define os_atomic_and(p, v, m) _os_atomic_error_is_starvable(os_atomic_and)
787#define os_atomic_andnot_orig(p, v, m) _os_atomic_error_is_starvable(os_atomic_andnot_orig)
788#define os_atomic_andnot(p, v, m) _os_atomic_error_is_starvable(os_atomic_andnot)
789#define os_atomic_or_orig(p, v, m) _os_atomic_error_is_starvable(os_atomic_or_orig)
790#define os_atomic_or(p, v, m) _os_atomic_error_is_starvable(os_atomic_or)
791#define os_atomic_xor_orig(p, v, m) _os_atomic_error_is_starvable(os_atomic_xor_orig)
792#define os_atomic_xor(p, v, m) _os_atomic_error_is_starvable(os_atomic_xor)
793#define os_atomic_min_orig(p, v, m) _os_atomic_error_is_starvable(os_atomic_min_orig)
794#define os_atomic_min(p, v, m) _os_atomic_error_is_starvable(os_atomic_min)
795#define os_atomic_max_orig(p, v, m) _os_atomic_error_is_starvable(os_atomic_max_orig)
796#define os_atomic_max(p, v, m) _os_atomic_error_is_starvable(os_atomic_max)
797#define os_atomic_xchg(p, v, m) _os_atomic_error_is_starvable(os_atomic_xchg)
798#define os_atomic_cmpxchg(p, e, v, m) _os_atomic_error_is_starvable(os_atomic_cmpxchg)
799#define os_atomic_cmpxchgv(p, e, v, g, m) _os_atomic_error_is_starvable(os_atomic_cmpxchgv)
800#define os_atomic_rmw_loop(p, ov, nv, m, ...) _os_atomic_error_is_starvable(os_atomic_rmw_loop)
801#define os_atomic_rmw_loop_give_up(...) _os_atomic_error_is_starvable(os_atomic_rmw_loop_give_up)
802
803#endif // !OS_ATOMIC_HAS_STARVATION_FREE_RMW && OS_ATOMIC_CONFIG_STARVATION_FREE_ONLY
804
805#if OS_ATOMIC_CONFIG_MEMORY_ORDER_DEPENDENCY
806
807/*!
808 * @typedef os_atomic_dependency_t
809 *
810 * @brief
811 * Type for dependency tokens that can be derived from loads with dependency
812 * and injected into various expressions.
813 *
814 * @warning
815 * The implementation of atomic dependencies makes painstakingly sure that the
816 * compiler doesn't know that os_atomic_dependency_t::__opaque_zero is always 0.
817 *
818 * Users of os_atomic_dependency_t MUST NOT test its value (even with an
819 * assert), as doing so would allow the compiler to reason about the value and
820 * elide its use to inject hardware dependencies (thwarting the entire purpose
821 * of the construct).
822 */
823typedef struct { unsigned long __opaque_zero; } os_atomic_dependency_t;
824
825/*!
826 * @const OS_ATOMIC_DEPENDENCY_NONE
827 *
828 * @brief
829 * A value to pass to functions that can carry dependencies, to indicate that
830 * no dependency should be carried.
831 */
832#define OS_ATOMIC_DEPENDENCY_NONE \
833 ((os_atomic_dependency_t){ 0UL })
834
835/*!
836 * @function os_atomic_make_dependency
837 *
838 * @brief
839 * Create a dependency token that can be injected into expressions to force a
840 * hardware dependency.
841 *
842 * @discussion
843 * This function is only useful for cases where the dependency needs to be used
844 * several times.
845 *
846 * os_atomic_load_with_dependency_on() and os_atomic_inject_dependency() are
847 * otherwise capable of automatically creating dependency tokens.
848 *
849 * @param v
850 * The result of:
851 * - an os_atomic_load(..., dependency),
852 * - an os_atomic_inject_dependency(),
853 * - an os_atomic_load_with_dependency_on().
854 *
855 * Note that due to implementation limitations, the type of @p v must be
856 * register-sized, if necessary an explicit cast is required.
857 *
858 * @returns
859 * An os_atomic_dependency_t token that can be used to prolongate dependency
860 * chains.
861 *
862 * The token value is always 0, but the compiler must never be able to reason
863 * about that fact (c.f. os_atomic_dependency_t)
864 */
865#define os_atomic_make_dependency(v) \
866 ((void)(v), OS_ATOMIC_DEPENDENCY_NONE)
867
868/*!
869 * @function os_atomic_inject_dependency
870 *
871 * @brief
872 * Inject a hardware dependency resulting from a `dependency` load into a
873 * specified pointer.
874 *
875 * @param p
876 * A pointer to inject the dependency into.
877 *
878 * @param e
879 * - a dependency token returned from os_atomic_make_dependency(),
880 *
881 * - OS_ATOMIC_DEPENDENCY_NONE, which turns this operation into a no-op,
882 *
883 * - any value accepted by os_atomic_make_dependency().
884 *
885 * @returns
886 * A value equal to @a p but that prolongates the dependency chain rooted at
887 * @a e.
888 */
889#define os_atomic_inject_dependency(p, e) \
890 ((typeof(*(p)) *)((p) + _os_atomic_auto_dependency(e).__opaque_zero))
891
892/*!
893 * @function os_atomic_load_with_dependency_on
894 *
895 * @brief
896 * Load that prolongates the dependency chain rooted at `v`.
897 *
898 * @discussion
899 * This is shorthand for:
900 *
901 * <code>
902 * os_atomic_load(os_atomic_inject_dependency(p, e), dependency)
903 * </code>
904 *
905 * @param p
906 * A pointer to an atomic variable.
907 *
908 * @param e
909 * - a dependency token returned from os_atomic_make_dependency(),
910 *
911 * - OS_ATOMIC_DEPENDENCY_NONE, which turns this operation into a no-op,
912 *
913 * - any value accepted by os_atomic_make_dependency().
914 *
915 * @returns
916 * The value loaded from @a p.
917 */
918#define os_atomic_load_with_dependency_on(p, e) \
919 os_atomic_load(os_atomic_inject_dependency(p, e), dependency)
920
921#endif // OS_ATOMIC_CONFIG_MEMORY_ORDER_DEPENDENCY
922
923#include "atomic_private_arch.h" // Per architecture overrides
924
925#endif /* __OS_ATOMIC_PRIVATE_H__ */