X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/d26ffc64f583ab2d29df48f13518685602bc8832..d9a64523371fa019c4575bb400cbbc3a50ac9903:/osfmk/machine/atomic.h diff --git a/osfmk/machine/atomic.h b/osfmk/machine/atomic.h index b200f9363..c9aeda0ea 100644 --- a/osfmk/machine/atomic.h +++ b/osfmk/machine/atomic.h @@ -2,7 +2,7 @@ * Copyright (c) 2015 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -31,6 +31,81 @@ #include +#define _os_atomic_c11_atomic(p) \ + ((typeof(*(p)) _Atomic *)(p)) + +#define _os_atomic_basetypeof(p) \ + typeof(atomic_load(((typeof(*(p)) _Atomic *)(p)))) + +#define _os_atomic_c11_op_orig(p, v, m, o) \ + atomic_##o##_explicit(_os_atomic_c11_atomic(p), v, \ + memory_order_##m) + +#define _os_atomic_c11_op(p, v, m, o, op) \ + ({ typeof(v) _v = (v); _os_atomic_c11_op_orig(p, v, m, o) op _v; }) + +#define os_atomic_thread_fence(m) atomic_thread_fence(memory_order_##m) + +#define os_atomic_load(p, m) \ + atomic_load_explicit(_os_atomic_c11_atomic(p), memory_order_##m) +#define os_atomic_store(p, v, m) _os_atomic_c11_op_orig(p, v, m, store) + +#define os_atomic_add_orig(p, v, m) _os_atomic_c11_op_orig(p, v, m, fetch_add) +#define os_atomic_add(p, v, m) _os_atomic_c11_op(p, v, m, fetch_add, +) + +#define os_atomic_inc_orig(p, m) _os_atomic_c11_op_orig(p, 1, m, fetch_add) +#define os_atomic_inc(p, m) _os_atomic_c11_op(p, 1, m, fetch_add, +) + +#define os_atomic_sub_orig(p, v, m) _os_atomic_c11_op_orig(p, v, m, fetch_sub) +#define os_atomic_sub(p, v, m) _os_atomic_c11_op(p, v, m, fetch_sub, -) + +#define os_atomic_dec_orig(p, m) _os_atomic_c11_op_orig(p, 1, m, fetch_sub) +#define os_atomic_dec(p, m) _os_atomic_c11_op(p, 1, m, fetch_sub, -) + +#define os_atomic_and_orig(p, v, m) _os_atomic_c11_op_orig(p, v, m, fetch_and) +#define os_atomic_and(p, v, m) _os_atomic_c11_op(p, v, m, fetch_and, &) + +#define os_atomic_or_orig(p, v, m) _os_atomic_c11_op_orig(p, v, m, fetch_or) +#define os_atomic_or(p, v, m) _os_atomic_c11_op(p, v, m, fetch_or, |) + +#define os_atomic_xor_orig(p, v, m) _os_atomic_c11_op_orig(p, v, m, fetch_xor) +#define os_atomic_xor(p, v, m) _os_atomic_c11_op(p, v, m, fetch_xor, ^) + +#define os_atomic_xchg(p, v, m) _os_atomic_c11_op_orig(p, v, m, exchange) + +#define os_atomic_cmpxchg(p, e, v, m) \ + ({ _os_atomic_basetypeof(p) _r = (e); \ + atomic_compare_exchange_strong_explicit(_os_atomic_c11_atomic(p), \ + &_r, v, memory_order_##m, memory_order_relaxed); }) +#define os_atomic_cmpxchgv(p, e, v, g, m) \ + ({ _os_atomic_basetypeof(p) _r = (e); int _b = \ + atomic_compare_exchange_strong_explicit(_os_atomic_c11_atomic(p), \ + &_r, v, memory_order_##m, memory_order_relaxed); *(g) = _r; _b; }) +#define os_atomic_cmpxchgvw(p, e, v, g, m) \ + ({ _os_atomic_basetypeof(p) _r = (e); int _b = \ + atomic_compare_exchange_weak_explicit(_os_atomic_c11_atomic(p), \ + &_r, v, memory_order_##m, memory_order_relaxed); *(g) = _r; _b; }) + +#define os_atomic_rmw_loop(p, ov, nv, m, ...) ({ \ + bool _result = false; \ + typeof(p) _p = (p); \ + ov = os_atomic_load(_p, relaxed); \ + do { \ + __VA_ARGS__; \ + _result = os_atomic_cmpxchgvw(_p, ov, nv, &ov, m); \ + } while (!_result); \ + _result; \ + }) + +#define os_atomic_rmw_loop_give_up_with_fence(m, expr) \ + ({ os_atomic_thread_fence(m); expr; __builtin_unreachable(); }) +#define os_atomic_rmw_loop_give_up(expr) \ + os_atomic_rmw_loop_give_up_with_fence(relaxed, expr) + +#define os_atomic_force_dependency_on(p, e) (p) +#define os_atomic_load_with_dependency_on(p, e) \ + os_atomic_load(os_atomic_force_dependency_on(p, e), relaxed) + #if defined (__x86_64__) #include "i386/atomic.h" #elif defined (__arm__) || defined (__arm64__)